- Created T026-practical-test task.yaml for MVP smoke testing - Added k8shost-server to flake.nix (packages, apps, overlays) - Staged all workspace directories for nix flake build - Updated flake.nix shellHook to include k8shost Resolves: T026.S1 blocker (R8 - nix submodule visibility)
359 lines
11 KiB
Rust
359 lines
11 KiB
Rust
//! Integration tests for LightningSTOR server
|
|
//!
|
|
//! Run with: cargo test -p lightningstor-server --test integration -- --ignored
|
|
//! Requires: LIGHTNINGSTOR_TEST=1 environment variable
|
|
|
|
use bytes::Bytes;
|
|
use lightningstor_server::metadata::MetadataStore;
|
|
use lightningstor_storage::{LocalFsBackend, StorageBackend};
|
|
use lightningstor_types::{Bucket, BucketName, Object, ObjectKey};
|
|
use std::sync::Arc;
|
|
use tempfile::TempDir;
|
|
|
|
/// Test helper to create a test environment
|
|
struct TestEnv {
|
|
storage: Arc<LocalFsBackend>,
|
|
metadata: Arc<MetadataStore>,
|
|
_temp_dir: TempDir,
|
|
}
|
|
|
|
impl TestEnv {
|
|
async fn new() -> Self {
|
|
let temp_dir = TempDir::new().expect("Failed to create temp dir");
|
|
let data_path = temp_dir.path().to_str().unwrap();
|
|
|
|
let storage = Arc::new(
|
|
LocalFsBackend::new(data_path)
|
|
.await
|
|
.expect("Failed to create storage backend"),
|
|
);
|
|
|
|
// Use in-memory metadata store for testing (no ChainFire required)
|
|
let metadata = Arc::new(MetadataStore::new_in_memory());
|
|
|
|
Self {
|
|
storage,
|
|
metadata,
|
|
_temp_dir: temp_dir,
|
|
}
|
|
}
|
|
}
|
|
|
|
// =============================================================================
|
|
// gRPC-style Flow Tests (using services directly)
|
|
// =============================================================================
|
|
|
|
#[tokio::test]
|
|
#[ignore = "Integration test - run with LIGHTNINGSTOR_TEST=1"]
|
|
async fn test_bucket_lifecycle() {
|
|
let env = TestEnv::new().await;
|
|
|
|
let org_id = "test-org";
|
|
let project_id = "test-project";
|
|
let bucket_name = "test-bucket";
|
|
|
|
// Create bucket
|
|
let bucket_name_obj = BucketName::new(bucket_name).expect("Invalid bucket name");
|
|
let bucket = Bucket::new(bucket_name_obj, org_id, project_id, "us-east-1");
|
|
|
|
env.metadata
|
|
.save_bucket(&bucket)
|
|
.await
|
|
.expect("Failed to save bucket");
|
|
|
|
// Verify bucket exists
|
|
let loaded = env
|
|
.metadata
|
|
.load_bucket(org_id, project_id, bucket_name)
|
|
.await
|
|
.expect("Failed to load bucket")
|
|
.expect("Bucket not found");
|
|
|
|
assert_eq!(loaded.name.as_str(), bucket_name);
|
|
assert_eq!(loaded.org_id, org_id);
|
|
assert_eq!(loaded.project_id, project_id);
|
|
|
|
// List buckets
|
|
let buckets = env
|
|
.metadata
|
|
.list_buckets(org_id, None)
|
|
.await
|
|
.expect("Failed to list buckets");
|
|
|
|
assert_eq!(buckets.len(), 1);
|
|
assert_eq!(buckets[0].name.as_str(), bucket_name);
|
|
|
|
// Delete bucket
|
|
env.metadata
|
|
.delete_bucket(&loaded)
|
|
.await
|
|
.expect("Failed to delete bucket");
|
|
|
|
// Verify bucket is gone
|
|
let deleted = env
|
|
.metadata
|
|
.load_bucket(org_id, project_id, bucket_name)
|
|
.await
|
|
.expect("Failed to check bucket");
|
|
|
|
assert!(deleted.is_none(), "Bucket should be deleted");
|
|
|
|
println!("✓ Bucket lifecycle test passed");
|
|
}
|
|
|
|
#[tokio::test]
|
|
#[ignore = "Integration test - run with LIGHTNINGSTOR_TEST=1"]
|
|
async fn test_object_lifecycle() {
|
|
let env = TestEnv::new().await;
|
|
|
|
let org_id = "test-org";
|
|
let project_id = "test-project";
|
|
let bucket_name = "test-bucket";
|
|
let object_key = "test/object.txt";
|
|
let object_content = b"Hello, LightningSTOR!";
|
|
|
|
// Create bucket first
|
|
let bucket_name_obj = BucketName::new(bucket_name).expect("Invalid bucket name");
|
|
let bucket = Bucket::new(bucket_name_obj, org_id, project_id, "us-east-1");
|
|
env.metadata
|
|
.save_bucket(&bucket)
|
|
.await
|
|
.expect("Failed to save bucket");
|
|
|
|
// Create object
|
|
let object_key_obj = ObjectKey::new(object_key).expect("Invalid object key");
|
|
|
|
// Calculate ETag
|
|
use md5::{Digest, Md5};
|
|
let mut hasher = Md5::new();
|
|
hasher.update(object_content);
|
|
let hash = hasher.finalize();
|
|
let hash_array: [u8; 16] = hash.into();
|
|
let etag = lightningstor_types::ETag::from_md5(&hash_array);
|
|
|
|
let object = Object::new(
|
|
bucket.id.to_string(),
|
|
object_key_obj,
|
|
etag.clone(),
|
|
object_content.len() as u64,
|
|
Some("text/plain".to_string()),
|
|
);
|
|
|
|
// Store object data
|
|
env.storage
|
|
.put_object(&object.id, Bytes::from(object_content.to_vec()))
|
|
.await
|
|
.expect("Failed to store object data");
|
|
|
|
// Save object metadata
|
|
env.metadata
|
|
.save_object(&object)
|
|
.await
|
|
.expect("Failed to save object metadata");
|
|
|
|
// Verify object exists
|
|
let loaded = env
|
|
.metadata
|
|
.load_object(&bucket.id, object_key, None)
|
|
.await
|
|
.expect("Failed to load object")
|
|
.expect("Object not found");
|
|
|
|
assert_eq!(loaded.key.as_str(), object_key);
|
|
assert_eq!(loaded.size, object_content.len() as u64);
|
|
assert_eq!(loaded.etag.as_str(), etag.as_str());
|
|
|
|
// Get object data
|
|
let data = env
|
|
.storage
|
|
.get_object(&loaded.id)
|
|
.await
|
|
.expect("Failed to get object data");
|
|
|
|
assert_eq!(data.as_ref(), object_content);
|
|
|
|
// List objects
|
|
let objects = env
|
|
.metadata
|
|
.list_objects(&bucket.id, "", 1000)
|
|
.await
|
|
.expect("Failed to list objects");
|
|
|
|
assert_eq!(objects.len(), 1);
|
|
assert_eq!(objects[0].key.as_str(), object_key);
|
|
|
|
// Delete object
|
|
env.storage
|
|
.delete_object(&loaded.id)
|
|
.await
|
|
.expect("Failed to delete object data");
|
|
|
|
env.metadata
|
|
.delete_object(&bucket.id, object_key, None)
|
|
.await
|
|
.expect("Failed to delete object metadata");
|
|
|
|
// Verify object is gone
|
|
let deleted = env
|
|
.metadata
|
|
.load_object(&bucket.id, object_key, None)
|
|
.await
|
|
.expect("Failed to check object");
|
|
|
|
assert!(deleted.is_none(), "Object should be deleted");
|
|
|
|
// Cleanup bucket
|
|
env.metadata
|
|
.delete_bucket(&bucket)
|
|
.await
|
|
.expect("Failed to delete bucket");
|
|
|
|
println!("✓ Object lifecycle test passed");
|
|
}
|
|
|
|
#[tokio::test]
|
|
#[ignore = "Integration test - run with LIGHTNINGSTOR_TEST=1"]
|
|
async fn test_full_crud_cycle() {
|
|
let env = TestEnv::new().await;
|
|
|
|
println!("Starting full CRUD cycle test...");
|
|
|
|
let org_id = "crud-org";
|
|
let project_id = "crud-project";
|
|
|
|
// 1. Create multiple buckets
|
|
for i in 1..=3 {
|
|
let name = format!("bucket-{:03}", i);
|
|
let bucket_name = BucketName::new(&name).unwrap();
|
|
let bucket = Bucket::new(bucket_name, org_id, project_id, "us-west-2");
|
|
env.metadata.save_bucket(&bucket).await.unwrap();
|
|
println!(" Created bucket: {}", name);
|
|
}
|
|
|
|
// 2. Verify all buckets exist
|
|
let buckets = env.metadata.list_buckets(org_id, None).await.unwrap();
|
|
assert_eq!(buckets.len(), 3);
|
|
println!(" Verified {} buckets exist", buckets.len());
|
|
|
|
// 3. Add objects to first bucket
|
|
let bucket = &buckets[0];
|
|
let test_objects = vec![
|
|
("docs/readme.md", "# README\nThis is a test."),
|
|
("docs/guide.md", "# Guide\nStep by step instructions."),
|
|
("images/logo.png", "PNG_BINARY_DATA_PLACEHOLDER"),
|
|
("data/config.json", r#"{"key": "value"}"#),
|
|
];
|
|
|
|
for (key, content) in &test_objects {
|
|
let object_key = ObjectKey::new(*key).unwrap();
|
|
|
|
use md5::{Digest, Md5};
|
|
let mut hasher = Md5::new();
|
|
hasher.update(content.as_bytes());
|
|
let hash = hasher.finalize();
|
|
let hash_array: [u8; 16] = hash.into();
|
|
let etag = lightningstor_types::ETag::from_md5(&hash_array);
|
|
|
|
let object = Object::new(
|
|
bucket.id.to_string(),
|
|
object_key,
|
|
etag,
|
|
content.len() as u64,
|
|
Some("text/plain".to_string()),
|
|
);
|
|
|
|
env.storage
|
|
.put_object(&object.id, Bytes::from(content.as_bytes().to_vec()))
|
|
.await
|
|
.unwrap();
|
|
env.metadata.save_object(&object).await.unwrap();
|
|
println!(" Created object: {}", key);
|
|
}
|
|
|
|
// 4. List all objects
|
|
let objects = env.metadata.list_objects(&bucket.id, "", 1000).await.unwrap();
|
|
assert_eq!(objects.len(), test_objects.len());
|
|
println!(" Verified {} objects exist", objects.len());
|
|
|
|
// 5. List with prefix filter
|
|
let docs = env.metadata.list_objects(&bucket.id, "docs/", 1000).await.unwrap();
|
|
assert_eq!(docs.len(), 2);
|
|
println!(" Prefix filter 'docs/' returned {} objects", docs.len());
|
|
|
|
// 6. Read back each object and verify content
|
|
for (key, expected_content) in &test_objects {
|
|
let obj = env
|
|
.metadata
|
|
.load_object(&bucket.id, key, None)
|
|
.await
|
|
.unwrap()
|
|
.expect("Object not found");
|
|
|
|
let data = env.storage.get_object(&obj.id).await.unwrap();
|
|
assert_eq!(data.as_ref(), expected_content.as_bytes());
|
|
println!(" Verified content of: {}", key);
|
|
}
|
|
|
|
// 7. Delete all objects
|
|
for obj in &objects {
|
|
env.storage.delete_object(&obj.id).await.unwrap();
|
|
env.metadata
|
|
.delete_object(&bucket.id, obj.key.as_str(), None)
|
|
.await
|
|
.unwrap();
|
|
}
|
|
println!(" Deleted all objects");
|
|
|
|
// 8. Verify objects are gone
|
|
let remaining = env.metadata.list_objects(&bucket.id, "", 1000).await.unwrap();
|
|
assert_eq!(remaining.len(), 0);
|
|
println!(" Verified all objects deleted");
|
|
|
|
// 9. Delete all buckets
|
|
for bucket in &buckets {
|
|
env.metadata.delete_bucket(bucket).await.unwrap();
|
|
}
|
|
println!(" Deleted all buckets");
|
|
|
|
// 10. Verify buckets are gone
|
|
let remaining_buckets = env.metadata.list_buckets(org_id, None).await.unwrap();
|
|
assert_eq!(remaining_buckets.len(), 0);
|
|
println!(" Verified all buckets deleted");
|
|
|
|
println!("✓ Full CRUD cycle test passed");
|
|
}
|
|
|
|
// =============================================================================
|
|
// S3 HTTP Flow Tests (would require running server)
|
|
// =============================================================================
|
|
|
|
#[tokio::test]
|
|
#[ignore = "S3 HTTP test - requires running server"]
|
|
async fn test_s3_http_bucket_operations() {
|
|
// This test would require:
|
|
// 1. Starting the server in background
|
|
// 2. Making HTTP requests via reqwest
|
|
// 3. Verifying responses
|
|
|
|
// For now, we rely on curl manual testing:
|
|
// curl -X PUT http://localhost:9001/test-bucket
|
|
// curl http://localhost:9001/
|
|
// curl -X DELETE http://localhost:9001/test-bucket
|
|
|
|
println!("S3 HTTP tests require running server - use curl for manual testing");
|
|
}
|
|
|
|
#[tokio::test]
|
|
#[ignore = "S3 HTTP test - requires running server"]
|
|
async fn test_s3_http_object_operations() {
|
|
// Manual testing commands:
|
|
// curl -X PUT http://localhost:9001/test-bucket
|
|
// curl -X PUT -d "Hello World" http://localhost:9001/test-bucket/hello.txt
|
|
// curl http://localhost:9001/test-bucket/hello.txt
|
|
// curl -I http://localhost:9001/test-bucket/hello.txt
|
|
// curl http://localhost:9001/test-bucket?prefix=
|
|
// curl -X DELETE http://localhost:9001/test-bucket/hello.txt
|
|
// curl -X DELETE http://localhost:9001/test-bucket
|
|
|
|
println!("S3 HTTP tests require running server - use curl for manual testing");
|
|
}
|