- Created T026-practical-test task.yaml for MVP smoke testing - Added k8shost-server to flake.nix (packages, apps, overlays) - Staged all workspace directories for nix flake build - Updated flake.nix shellHook to include k8shost Resolves: T026.S1 blocker (R8 - nix submodule visibility)
616 lines
21 KiB
Rust
616 lines
21 KiB
Rust
//! DNS Metadata storage using ChainFire, FlareDB, or in-memory store
|
|
|
|
use chainfire_client::Client as ChainFireClient;
|
|
use dashmap::DashMap;
|
|
use flaredb_client::RdbClient;
|
|
use flashdns_types::{cidr_to_arpa, Record, RecordId, RecordType, ReverseZone, Zone, ZoneId};
|
|
use std::sync::Arc;
|
|
use tokio::sync::Mutex;
|
|
|
|
/// Result type for metadata operations
|
|
pub type Result<T> = std::result::Result<T, MetadataError>;
|
|
|
|
/// Metadata operation error
|
|
#[derive(Debug, thiserror::Error)]
|
|
pub enum MetadataError {
|
|
#[error("Storage error: {0}")]
|
|
Storage(String),
|
|
#[error("Serialization error: {0}")]
|
|
Serialization(String),
|
|
#[error("Not found: {0}")]
|
|
NotFound(String),
|
|
#[error("Invalid argument: {0}")]
|
|
InvalidArgument(String),
|
|
}
|
|
|
|
/// Storage backend enum
|
|
enum StorageBackend {
|
|
ChainFire(Arc<Mutex<ChainFireClient>>),
|
|
FlareDB(Arc<Mutex<RdbClient>>),
|
|
InMemory(Arc<DashMap<String, String>>),
|
|
}
|
|
|
|
/// DNS Metadata store for zones and records
|
|
pub struct DnsMetadataStore {
|
|
backend: StorageBackend,
|
|
}
|
|
|
|
impl DnsMetadataStore {
|
|
/// Create a new metadata store with ChainFire backend
|
|
pub async fn new(endpoint: Option<String>) -> Result<Self> {
|
|
let endpoint = endpoint.unwrap_or_else(|| {
|
|
std::env::var("FLASHDNS_CHAINFIRE_ENDPOINT")
|
|
.unwrap_or_else(|_| "http://127.0.0.1:50051".to_string())
|
|
});
|
|
|
|
let client = ChainFireClient::connect(&endpoint)
|
|
.await
|
|
.map_err(|e| MetadataError::Storage(format!("Failed to connect to ChainFire: {}", e)))?;
|
|
|
|
Ok(Self {
|
|
backend: StorageBackend::ChainFire(Arc::new(Mutex::new(client))),
|
|
})
|
|
}
|
|
|
|
/// Create a new metadata store with FlareDB backend
|
|
pub async fn new_flaredb(endpoint: Option<String>) -> Result<Self> {
|
|
let endpoint = endpoint.unwrap_or_else(|| {
|
|
std::env::var("FLASHDNS_FLAREDB_ENDPOINT")
|
|
.unwrap_or_else(|_| "127.0.0.1:2379".to_string())
|
|
});
|
|
|
|
// FlareDB client needs both server and PD address
|
|
// For now, we use the same endpoint for both (PD address)
|
|
let client = RdbClient::connect_with_pd_namespace(
|
|
endpoint.clone(),
|
|
endpoint.clone(),
|
|
"flashdns",
|
|
)
|
|
.await
|
|
.map_err(|e| MetadataError::Storage(format!(
|
|
"Failed to connect to FlareDB: {}", e
|
|
)))?;
|
|
|
|
Ok(Self {
|
|
backend: StorageBackend::FlareDB(Arc::new(Mutex::new(client))),
|
|
})
|
|
}
|
|
|
|
/// Create a new in-memory metadata store (for testing)
|
|
pub fn new_in_memory() -> Self {
|
|
Self {
|
|
backend: StorageBackend::InMemory(Arc::new(DashMap::new())),
|
|
}
|
|
}
|
|
|
|
// =========================================================================
|
|
// Internal storage helpers
|
|
// =========================================================================
|
|
|
|
async fn put(&self, key: &str, value: &str) -> Result<()> {
|
|
match &self.backend {
|
|
StorageBackend::ChainFire(client) => {
|
|
let mut c = client.lock().await;
|
|
c.put_str(key, value)
|
|
.await
|
|
.map_err(|e| MetadataError::Storage(format!("ChainFire put failed: {}", e)))?;
|
|
}
|
|
StorageBackend::FlareDB(client) => {
|
|
let mut c = client.lock().await;
|
|
c.raw_put(key.as_bytes().to_vec(), value.as_bytes().to_vec())
|
|
.await
|
|
.map_err(|e| MetadataError::Storage(format!("FlareDB put failed: {}", e)))?;
|
|
}
|
|
StorageBackend::InMemory(map) => {
|
|
map.insert(key.to_string(), value.to_string());
|
|
}
|
|
}
|
|
Ok(())
|
|
}
|
|
|
|
async fn get(&self, key: &str) -> Result<Option<String>> {
|
|
match &self.backend {
|
|
StorageBackend::ChainFire(client) => {
|
|
let mut c = client.lock().await;
|
|
c.get_str(key)
|
|
.await
|
|
.map_err(|e| MetadataError::Storage(format!("ChainFire get failed: {}", e)))
|
|
}
|
|
StorageBackend::FlareDB(client) => {
|
|
let mut c = client.lock().await;
|
|
let result = c.raw_get(key.as_bytes().to_vec())
|
|
.await
|
|
.map_err(|e| MetadataError::Storage(format!("FlareDB get failed: {}", e)))?;
|
|
Ok(result.map(|bytes| String::from_utf8_lossy(&bytes).to_string()))
|
|
}
|
|
StorageBackend::InMemory(map) => Ok(map.get(key).map(|v| v.value().clone())),
|
|
}
|
|
}
|
|
|
|
async fn delete_key(&self, key: &str) -> Result<()> {
|
|
match &self.backend {
|
|
StorageBackend::ChainFire(client) => {
|
|
let mut c = client.lock().await;
|
|
c.delete(key)
|
|
.await
|
|
.map_err(|e| MetadataError::Storage(format!("ChainFire delete failed: {}", e)))?;
|
|
}
|
|
StorageBackend::FlareDB(client) => {
|
|
let mut c = client.lock().await;
|
|
c.raw_delete(key.as_bytes().to_vec())
|
|
.await
|
|
.map_err(|e| MetadataError::Storage(format!("FlareDB delete failed: {}", e)))?;
|
|
}
|
|
StorageBackend::InMemory(map) => {
|
|
map.remove(key);
|
|
}
|
|
}
|
|
Ok(())
|
|
}
|
|
|
|
async fn get_prefix(&self, prefix: &str) -> Result<Vec<(String, String)>> {
|
|
match &self.backend {
|
|
StorageBackend::ChainFire(client) => {
|
|
let mut c = client.lock().await;
|
|
let items = c
|
|
.get_prefix(prefix)
|
|
.await
|
|
.map_err(|e| MetadataError::Storage(format!("ChainFire get_prefix failed: {}", e)))?;
|
|
Ok(items
|
|
.into_iter()
|
|
.map(|(k, v)| {
|
|
(
|
|
String::from_utf8_lossy(&k).to_string(),
|
|
String::from_utf8_lossy(&v).to_string(),
|
|
)
|
|
})
|
|
.collect())
|
|
}
|
|
StorageBackend::FlareDB(client) => {
|
|
let mut c = client.lock().await;
|
|
|
|
// Calculate end_key by incrementing the last byte of prefix
|
|
let mut end_key = prefix.as_bytes().to_vec();
|
|
if let Some(last) = end_key.last_mut() {
|
|
if *last == 0xff {
|
|
// If last byte is 0xff, append a 0x00
|
|
end_key.push(0x00);
|
|
} else {
|
|
*last += 1;
|
|
}
|
|
} else {
|
|
// Empty prefix - scan everything
|
|
end_key.push(0xff);
|
|
}
|
|
|
|
let mut results = Vec::new();
|
|
let mut start_key = prefix.as_bytes().to_vec();
|
|
|
|
// Pagination loop to get all results
|
|
loop {
|
|
let (keys, values, next) = c.raw_scan(
|
|
start_key.clone(),
|
|
end_key.clone(),
|
|
1000, // Batch size
|
|
)
|
|
.await
|
|
.map_err(|e| MetadataError::Storage(format!("FlareDB scan failed: {}", e)))?;
|
|
|
|
// Convert and add results
|
|
for (k, v) in keys.iter().zip(values.iter()) {
|
|
results.push((
|
|
String::from_utf8_lossy(k).to_string(),
|
|
String::from_utf8_lossy(v).to_string(),
|
|
));
|
|
}
|
|
|
|
// Check if there are more results
|
|
if let Some(next_key) = next {
|
|
start_key = next_key;
|
|
} else {
|
|
break;
|
|
}
|
|
}
|
|
|
|
Ok(results)
|
|
}
|
|
StorageBackend::InMemory(map) => {
|
|
let mut results = Vec::new();
|
|
for entry in map.iter() {
|
|
if entry.key().starts_with(prefix) {
|
|
results.push((entry.key().clone(), entry.value().clone()));
|
|
}
|
|
}
|
|
Ok(results)
|
|
}
|
|
}
|
|
}
|
|
|
|
// =========================================================================
|
|
// Key builders
|
|
// =========================================================================
|
|
|
|
fn zone_key(org_id: &str, project_id: &str, zone_name: &str) -> String {
|
|
format!("/flashdns/zones/{}/{}/{}", org_id, project_id, zone_name)
|
|
}
|
|
|
|
fn zone_id_key(zone_id: &ZoneId) -> String {
|
|
format!("/flashdns/zone_ids/{}", zone_id)
|
|
}
|
|
|
|
fn record_key(zone_id: &ZoneId, record_name: &str, record_type: RecordType) -> String {
|
|
format!("/flashdns/records/{}/{}/{}", zone_id, record_name, record_type)
|
|
}
|
|
|
|
fn record_prefix(zone_id: &ZoneId) -> String {
|
|
format!("/flashdns/records/{}/", zone_id)
|
|
}
|
|
|
|
fn record_id_key(record_id: &RecordId) -> String {
|
|
format!("/flashdns/record_ids/{}", record_id)
|
|
}
|
|
|
|
// =========================================================================
|
|
// Zone operations
|
|
// =========================================================================
|
|
|
|
/// Save zone metadata
|
|
pub async fn save_zone(&self, zone: &Zone) -> Result<()> {
|
|
let key = Self::zone_key(&zone.org_id, &zone.project_id, zone.name.as_str());
|
|
let value = serde_json::to_string(zone)
|
|
.map_err(|e| MetadataError::Serialization(format!("Failed to serialize zone: {}", e)))?;
|
|
|
|
self.put(&key, &value).await?;
|
|
|
|
// Also save zone ID mapping
|
|
let id_key = Self::zone_id_key(&zone.id);
|
|
self.put(&id_key, &key).await?;
|
|
|
|
Ok(())
|
|
}
|
|
|
|
/// Load zone by name
|
|
pub async fn load_zone(
|
|
&self,
|
|
org_id: &str,
|
|
project_id: &str,
|
|
zone_name: &str,
|
|
) -> Result<Option<Zone>> {
|
|
let key = Self::zone_key(org_id, project_id, zone_name);
|
|
|
|
if let Some(value) = self.get(&key).await? {
|
|
let zone: Zone = serde_json::from_str(&value)
|
|
.map_err(|e| MetadataError::Serialization(format!("Failed to deserialize zone: {}", e)))?;
|
|
Ok(Some(zone))
|
|
} else {
|
|
Ok(None)
|
|
}
|
|
}
|
|
|
|
/// Load zone by ID
|
|
pub async fn load_zone_by_id(&self, zone_id: &ZoneId) -> Result<Option<Zone>> {
|
|
let id_key = Self::zone_id_key(zone_id);
|
|
|
|
if let Some(zone_key) = self.get(&id_key).await? {
|
|
if let Some(value) = self.get(&zone_key).await? {
|
|
let zone: Zone = serde_json::from_str(&value)
|
|
.map_err(|e| MetadataError::Serialization(format!("Failed to deserialize zone: {}", e)))?;
|
|
Ok(Some(zone))
|
|
} else {
|
|
Ok(None)
|
|
}
|
|
} else {
|
|
Ok(None)
|
|
}
|
|
}
|
|
|
|
/// Delete zone (cascade delete all records)
|
|
pub async fn delete_zone(&self, zone: &Zone) -> Result<()> {
|
|
// First, delete all records in the zone (cascade delete)
|
|
self.delete_zone_records(&zone.id).await?;
|
|
|
|
// Then delete the zone metadata
|
|
let key = Self::zone_key(&zone.org_id, &zone.project_id, zone.name.as_str());
|
|
let id_key = Self::zone_id_key(&zone.id);
|
|
|
|
self.delete_key(&key).await?;
|
|
self.delete_key(&id_key).await?;
|
|
|
|
Ok(())
|
|
}
|
|
|
|
/// List zones for a tenant
|
|
pub async fn list_zones(&self, org_id: &str, project_id: Option<&str>) -> Result<Vec<Zone>> {
|
|
let prefix = if let Some(project_id) = project_id {
|
|
format!("/flashdns/zones/{}/{}/", org_id, project_id)
|
|
} else {
|
|
format!("/flashdns/zones/{}/", org_id)
|
|
};
|
|
|
|
let items = self.get_prefix(&prefix).await?;
|
|
|
|
let mut zones = Vec::new();
|
|
for (_, value) in items {
|
|
if let Ok(zone) = serde_json::from_str::<Zone>(&value) {
|
|
zones.push(zone);
|
|
}
|
|
}
|
|
|
|
// Sort by name for consistent ordering
|
|
zones.sort_by(|a, b| a.name.as_str().cmp(b.name.as_str()));
|
|
|
|
Ok(zones)
|
|
}
|
|
|
|
// =========================================================================
|
|
// Record operations
|
|
// =========================================================================
|
|
|
|
/// Save record
|
|
pub async fn save_record(&self, record: &Record) -> Result<()> {
|
|
let key = Self::record_key(&record.zone_id, &record.name, record.record_type);
|
|
let value = serde_json::to_string(record)
|
|
.map_err(|e| MetadataError::Serialization(format!("Failed to serialize record: {}", e)))?;
|
|
|
|
self.put(&key, &value).await?;
|
|
|
|
// Also save record ID mapping
|
|
let id_key = Self::record_id_key(&record.id);
|
|
self.put(&id_key, &key).await?;
|
|
|
|
Ok(())
|
|
}
|
|
|
|
/// Load record by name and type
|
|
pub async fn load_record(
|
|
&self,
|
|
zone_id: &ZoneId,
|
|
record_name: &str,
|
|
record_type: RecordType,
|
|
) -> Result<Option<Record>> {
|
|
let key = Self::record_key(zone_id, record_name, record_type);
|
|
|
|
if let Some(value) = self.get(&key).await? {
|
|
let record: Record = serde_json::from_str(&value)
|
|
.map_err(|e| MetadataError::Serialization(format!("Failed to deserialize record: {}", e)))?;
|
|
Ok(Some(record))
|
|
} else {
|
|
Ok(None)
|
|
}
|
|
}
|
|
|
|
/// Load record by ID
|
|
pub async fn load_record_by_id(&self, record_id: &RecordId) -> Result<Option<Record>> {
|
|
let id_key = Self::record_id_key(record_id);
|
|
|
|
if let Some(record_key) = self.get(&id_key).await? {
|
|
if let Some(value) = self.get(&record_key).await? {
|
|
let record: Record = serde_json::from_str(&value)
|
|
.map_err(|e| MetadataError::Serialization(format!("Failed to deserialize record: {}", e)))?;
|
|
Ok(Some(record))
|
|
} else {
|
|
Ok(None)
|
|
}
|
|
} else {
|
|
Ok(None)
|
|
}
|
|
}
|
|
|
|
/// Delete record
|
|
pub async fn delete_record(&self, record: &Record) -> Result<()> {
|
|
let key = Self::record_key(&record.zone_id, &record.name, record.record_type);
|
|
let id_key = Self::record_id_key(&record.id);
|
|
|
|
self.delete_key(&key).await?;
|
|
self.delete_key(&id_key).await?;
|
|
|
|
Ok(())
|
|
}
|
|
|
|
/// List records for a zone
|
|
pub async fn list_records(&self, zone_id: &ZoneId) -> Result<Vec<Record>> {
|
|
let prefix = Self::record_prefix(zone_id);
|
|
|
|
let items = self.get_prefix(&prefix).await?;
|
|
|
|
let mut records = Vec::new();
|
|
for (_, value) in items {
|
|
if let Ok(record) = serde_json::from_str::<Record>(&value) {
|
|
records.push(record);
|
|
}
|
|
}
|
|
|
|
// Sort by name then type for consistent ordering
|
|
records.sort_by(|a, b| {
|
|
a.name
|
|
.cmp(&b.name)
|
|
.then(a.record_type.type_code().cmp(&b.record_type.type_code()))
|
|
});
|
|
|
|
Ok(records)
|
|
}
|
|
|
|
/// List records by name (all types)
|
|
pub async fn list_records_by_name(&self, zone_id: &ZoneId, name: &str) -> Result<Vec<Record>> {
|
|
let prefix = format!("/flashdns/records/{}/{}/", zone_id, name);
|
|
|
|
let items = self.get_prefix(&prefix).await?;
|
|
|
|
let mut records = Vec::new();
|
|
for (_, value) in items {
|
|
if let Ok(record) = serde_json::from_str::<Record>(&value) {
|
|
records.push(record);
|
|
}
|
|
}
|
|
|
|
Ok(records)
|
|
}
|
|
|
|
/// Delete all records for a zone
|
|
pub async fn delete_zone_records(&self, zone_id: &ZoneId) -> Result<()> {
|
|
let records = self.list_records(zone_id).await?;
|
|
for record in records {
|
|
self.delete_record(&record).await?;
|
|
}
|
|
Ok(())
|
|
}
|
|
|
|
// =========================================================================
|
|
// Reverse Zone operations
|
|
// =========================================================================
|
|
|
|
/// Create a reverse zone
|
|
pub async fn create_reverse_zone(&self, mut zone: ReverseZone) -> Result<ReverseZone> {
|
|
// Generate arpa zone from CIDR
|
|
zone.arpa_zone = cidr_to_arpa(&zone.cidr)
|
|
.map_err(|e| MetadataError::InvalidArgument(format!("Failed to generate arpa zone: {}", e)))?;
|
|
|
|
let zone_key = format!(
|
|
"/flashdns/reverse_zones/{}/{}/{}",
|
|
zone.org_id,
|
|
zone.project_id.as_deref().unwrap_or("global"),
|
|
zone.id
|
|
);
|
|
let cidr_index_key = format!("/flashdns/reverse_zones/by-cidr/{}", normalize_cidr(&zone.cidr));
|
|
|
|
let value = serde_json::to_string(&zone)
|
|
.map_err(|e| MetadataError::Serialization(format!("Failed to serialize reverse zone: {}", e)))?;
|
|
|
|
self.put(&zone_key, &value).await?;
|
|
self.put(&cidr_index_key, &zone.id).await?;
|
|
|
|
Ok(zone)
|
|
}
|
|
|
|
/// Get a reverse zone by ID
|
|
pub async fn get_reverse_zone(&self, zone_id: &str) -> Result<Option<ReverseZone>> {
|
|
// Need to scan for the zone since we don't know org_id/project_id
|
|
let prefix = "/flashdns/reverse_zones/";
|
|
let results = self.get_prefix(prefix).await?;
|
|
|
|
for (key, value) in results {
|
|
if key.ends_with(&format!("/{}", zone_id)) {
|
|
let zone: ReverseZone = serde_json::from_str(&value)
|
|
.map_err(|e| MetadataError::Serialization(format!("Failed to deserialize reverse zone: {}", e)))?;
|
|
return Ok(Some(zone));
|
|
}
|
|
}
|
|
Ok(None)
|
|
}
|
|
|
|
/// Delete a reverse zone
|
|
pub async fn delete_reverse_zone(&self, zone: &ReverseZone) -> Result<()> {
|
|
let zone_key = format!(
|
|
"/flashdns/reverse_zones/{}/{}/{}",
|
|
zone.org_id,
|
|
zone.project_id.as_deref().unwrap_or("global"),
|
|
zone.id
|
|
);
|
|
let cidr_index_key = format!("/flashdns/reverse_zones/by-cidr/{}", normalize_cidr(&zone.cidr));
|
|
|
|
self.delete_key(&zone_key).await?;
|
|
self.delete_key(&cidr_index_key).await?;
|
|
|
|
Ok(())
|
|
}
|
|
|
|
/// List reverse zones for an organization
|
|
pub async fn list_reverse_zones(&self, org_id: &str, project_id: Option<&str>) -> Result<Vec<ReverseZone>> {
|
|
let prefix = format!(
|
|
"/flashdns/reverse_zones/{}/{}/",
|
|
org_id,
|
|
project_id.unwrap_or("global")
|
|
);
|
|
let results = self.get_prefix(&prefix).await?;
|
|
|
|
let mut zones = Vec::new();
|
|
for (_, value) in results {
|
|
if let Ok(zone) = serde_json::from_str::<ReverseZone>(&value) {
|
|
zones.push(zone);
|
|
}
|
|
}
|
|
Ok(zones)
|
|
}
|
|
}
|
|
|
|
/// Normalize CIDR for use as key (replace / with _, . with -, : with -)
|
|
fn normalize_cidr(cidr: &str) -> String {
|
|
cidr.replace('/', "_").replace('.', "-").replace(':', "-")
|
|
}
|
|
|
|
#[cfg(test)]
|
|
mod tests {
|
|
use super::*;
|
|
use flashdns_types::{RecordData, ZoneName};
|
|
|
|
#[tokio::test]
|
|
async fn test_zone_crud() {
|
|
let store = DnsMetadataStore::new_in_memory();
|
|
|
|
let zone_name = ZoneName::new("example.com").unwrap();
|
|
let zone = Zone::new(zone_name, "test-org", "test-project");
|
|
|
|
// Save
|
|
store.save_zone(&zone).await.unwrap();
|
|
|
|
// Load by name
|
|
let loaded = store
|
|
.load_zone("test-org", "test-project", "example.com.")
|
|
.await
|
|
.unwrap()
|
|
.unwrap();
|
|
assert_eq!(loaded.id, zone.id);
|
|
|
|
// Load by ID
|
|
let loaded_by_id = store.load_zone_by_id(&zone.id).await.unwrap().unwrap();
|
|
assert_eq!(loaded_by_id.name.as_str(), "example.com.");
|
|
|
|
// List
|
|
let zones = store.list_zones("test-org", None).await.unwrap();
|
|
assert_eq!(zones.len(), 1);
|
|
|
|
// Delete
|
|
store.delete_zone(&zone).await.unwrap();
|
|
let deleted = store
|
|
.load_zone("test-org", "test-project", "example.com.")
|
|
.await
|
|
.unwrap();
|
|
assert!(deleted.is_none());
|
|
}
|
|
|
|
#[tokio::test]
|
|
async fn test_record_crud() {
|
|
let store = DnsMetadataStore::new_in_memory();
|
|
|
|
let zone_name = ZoneName::new("example.com").unwrap();
|
|
let zone = Zone::new(zone_name, "test-org", "test-project");
|
|
store.save_zone(&zone).await.unwrap();
|
|
|
|
// Create A record
|
|
let record_data = RecordData::a_from_str("192.168.1.1").unwrap();
|
|
let record = Record::new(zone.id, "www", record_data);
|
|
|
|
// Save
|
|
store.save_record(&record).await.unwrap();
|
|
|
|
// Load
|
|
let loaded = store
|
|
.load_record(&zone.id, "www", RecordType::A)
|
|
.await
|
|
.unwrap()
|
|
.unwrap();
|
|
assert_eq!(loaded.id, record.id);
|
|
|
|
// List
|
|
let records = store.list_records(&zone.id).await.unwrap();
|
|
assert_eq!(records.len(), 1);
|
|
|
|
// Delete
|
|
store.delete_record(&record).await.unwrap();
|
|
let deleted = store
|
|
.load_record(&zone.id, "www", RecordType::A)
|
|
.await
|
|
.unwrap();
|
|
assert!(deleted.is_none());
|
|
}
|
|
}
|