- netboot-base.nix with SSH key auth - Launch scripts for node01/02/03 - Node configuration.nix and disko.nix - Nix modules for first-boot automation 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
263 lines
7.9 KiB
Rust
263 lines
7.9 KiB
Rust
//! Integration tests for Metricstor
|
|
//!
|
|
//! These tests verify end-to-end functionality of the metrics storage system.
|
|
//! Tests cover the full ingestion → storage → query pipeline.
|
|
//!
|
|
//! # Test Categories
|
|
//!
|
|
//! - **Ingestion**: Remote write protocol, compression, validation
|
|
//! - **Query**: PromQL execution, result formatting
|
|
//! - **Storage**: Persistence, compaction, retention
|
|
//! - **API**: gRPC and HTTP endpoints
|
|
//! - **Security**: mTLS authentication and authorization
|
|
//!
|
|
//! # Implementation Status
|
|
//!
|
|
//! This is a placeholder file. Full test suite will be implemented in S6.
|
|
|
|
#[cfg(test)]
|
|
mod tests {
|
|
use metricstor_api::prometheus::{Label, Sample, TimeSeries, WriteRequest};
|
|
use metricstor_api::metricstor::{InstantQueryRequest, RangeQueryRequest};
|
|
|
|
/// Helper: Create a test WriteRequest
|
|
fn create_test_write_request() -> WriteRequest {
|
|
WriteRequest {
|
|
timeseries: vec![TimeSeries {
|
|
labels: vec![
|
|
Label {
|
|
name: "__name__".to_string(),
|
|
value: "test_metric".to_string(),
|
|
},
|
|
Label {
|
|
name: "job".to_string(),
|
|
value: "test".to_string(),
|
|
},
|
|
],
|
|
samples: vec![Sample {
|
|
value: 42.0,
|
|
timestamp: 1234567890000,
|
|
}],
|
|
}],
|
|
}
|
|
}
|
|
|
|
#[test]
|
|
fn test_create_write_request() {
|
|
let request = create_test_write_request();
|
|
assert_eq!(request.timeseries.len(), 1);
|
|
assert_eq!(request.timeseries[0].labels.len(), 2);
|
|
assert_eq!(request.timeseries[0].samples.len(), 1);
|
|
}
|
|
|
|
#[test]
|
|
fn test_create_instant_query() {
|
|
let query = InstantQueryRequest {
|
|
query: "test_metric{job='test'}".to_string(),
|
|
time: 1234567890000,
|
|
timeout: 5000,
|
|
};
|
|
assert_eq!(query.query, "test_metric{job='test'}");
|
|
}
|
|
|
|
#[test]
|
|
fn test_create_range_query() {
|
|
let query = RangeQueryRequest {
|
|
query: "rate(test_metric[5m])".to_string(),
|
|
start: 1234567890000,
|
|
end: 1234571490000,
|
|
step: 60000,
|
|
timeout: 10000,
|
|
};
|
|
assert_eq!(query.start, 1234567890000);
|
|
assert_eq!(query.end, 1234571490000);
|
|
}
|
|
|
|
// Query API Integration Tests (S4)
|
|
|
|
#[tokio::test]
|
|
async fn test_query_service_creation() {
|
|
use metricstor_server::query::QueryService;
|
|
|
|
let service = QueryService::new();
|
|
assert!(service.storage().read().await.series.is_empty());
|
|
}
|
|
|
|
#[tokio::test]
|
|
async fn test_instant_query_empty_storage() {
|
|
use metricstor_server::query::QueryService;
|
|
|
|
let service = QueryService::new();
|
|
let result = service.execute_instant_query("up", 1000).await;
|
|
assert!(result.is_ok());
|
|
let query_result = result.unwrap();
|
|
assert_eq!(query_result.result_type, "vector");
|
|
assert!(query_result.result.is_empty());
|
|
}
|
|
|
|
#[tokio::test]
|
|
async fn test_range_query_empty_storage() {
|
|
use metricstor_server::query::QueryService;
|
|
|
|
let service = QueryService::new();
|
|
let result = service
|
|
.execute_range_query("up", 1000, 2000, 100)
|
|
.await;
|
|
assert!(result.is_ok());
|
|
}
|
|
|
|
#[tokio::test]
|
|
async fn test_instant_query_with_data() {
|
|
use metricstor_server::query::QueryService;
|
|
use metricstor_types::{Label, Sample, SeriesId, TimeSeries};
|
|
|
|
let service = QueryService::new();
|
|
|
|
// Add test data
|
|
{
|
|
let mut storage = service.storage().write().await;
|
|
let series = TimeSeries {
|
|
id: SeriesId(1),
|
|
labels: vec![
|
|
Label::new("__name__", "test_metric"),
|
|
Label::new("job", "test_job"),
|
|
],
|
|
samples: vec![Sample::new(1000, 42.0)],
|
|
};
|
|
storage.upsert_series(series);
|
|
}
|
|
|
|
// Query the data
|
|
let result = service.execute_instant_query("test_metric", 1000).await;
|
|
assert!(result.is_ok());
|
|
let query_result = result.unwrap();
|
|
assert_eq!(query_result.result.len(), 1);
|
|
assert_eq!(query_result.result[0].value, Some((1000, 42.0)));
|
|
}
|
|
|
|
#[tokio::test]
|
|
async fn test_label_values_query() {
|
|
use metricstor_server::query::QueryService;
|
|
use metricstor_types::{Label, SeriesId, TimeSeries};
|
|
|
|
let service = QueryService::new();
|
|
|
|
// Add test data with labels
|
|
{
|
|
let mut storage = service.storage().write().await;
|
|
let series = TimeSeries {
|
|
id: SeriesId(1),
|
|
labels: vec![
|
|
Label::new("__name__", "test_metric"),
|
|
Label::new("environment", "production"),
|
|
Label::new("job", "api"),
|
|
],
|
|
samples: vec![],
|
|
};
|
|
storage.upsert_series(series);
|
|
}
|
|
|
|
// Query label values
|
|
{
|
|
let storage = service.storage().read().await;
|
|
let values = storage.label_values("environment");
|
|
assert_eq!(values.len(), 1);
|
|
assert!(values.contains(&"production".to_string()));
|
|
}
|
|
}
|
|
|
|
// PromQL Parsing Tests
|
|
|
|
#[test]
|
|
fn test_promql_simple_selector() {
|
|
use promql_parser::parser::Parser;
|
|
|
|
let queries = vec![
|
|
"up",
|
|
"http_requests_total",
|
|
"node_cpu_seconds_total",
|
|
];
|
|
|
|
for query in queries {
|
|
let result = Parser::new(query).parse();
|
|
assert!(result.is_ok(), "Failed to parse: {}", query);
|
|
}
|
|
}
|
|
|
|
#[test]
|
|
fn test_promql_label_selector() {
|
|
use promql_parser::parser::Parser;
|
|
|
|
let queries = vec![
|
|
"http_requests_total{method=\"GET\"}",
|
|
"http_requests_total{method=\"GET\",status=\"200\"}",
|
|
"http_requests_total{job=~\"api.*\"}",
|
|
];
|
|
|
|
for query in queries {
|
|
let result = Parser::new(query).parse();
|
|
assert!(result.is_ok(), "Failed to parse: {}", query);
|
|
}
|
|
}
|
|
|
|
#[test]
|
|
fn test_promql_aggregation() {
|
|
use promql_parser::parser::Parser;
|
|
|
|
let queries = vec![
|
|
"sum(http_requests_total)",
|
|
"avg(http_requests_total)",
|
|
"min(http_requests_total)",
|
|
"max(http_requests_total)",
|
|
"count(http_requests_total)",
|
|
];
|
|
|
|
for query in queries {
|
|
let result = Parser::new(query).parse();
|
|
assert!(result.is_ok(), "Failed to parse: {}", query);
|
|
}
|
|
}
|
|
|
|
#[test]
|
|
fn test_promql_rate_function() {
|
|
use promql_parser::parser::Parser;
|
|
|
|
let queries = vec![
|
|
"rate(http_requests_total[5m])",
|
|
"irate(http_requests_total[5m])",
|
|
"increase(http_requests_total[1h])",
|
|
];
|
|
|
|
for query in queries {
|
|
let result = Parser::new(query).parse();
|
|
assert!(result.is_ok(), "Failed to parse: {}", query);
|
|
}
|
|
}
|
|
|
|
#[test]
|
|
fn test_promql_range_selector() {
|
|
use promql_parser::parser::Parser;
|
|
|
|
let queries = vec![
|
|
"http_requests_total[5m]",
|
|
"http_requests_total[1h]",
|
|
"http_requests_total[24h]",
|
|
];
|
|
|
|
for query in queries {
|
|
let result = Parser::new(query).parse();
|
|
assert!(result.is_ok(), "Failed to parse: {}", query);
|
|
}
|
|
}
|
|
|
|
// TODO (S6): Add more integration tests
|
|
// - [ ] Test HTTP endpoints with Axum test client
|
|
// - [ ] Test mTLS authentication
|
|
// - [ ] Test storage persistence
|
|
// - [ ] Test compaction
|
|
// - [ ] Test retention enforcement
|
|
// - [ ] Test error handling (invalid queries, timeouts)
|
|
// - [ ] Test concurrent writes and queries
|
|
// - [ ] Test backpressure handling
|
|
// - [ ] Test Grafana compatibility
|
|
}
|