photoncloud-monorepo/deployer/crates/plasmacloud-reconciler/src/main.rs
centra 37f5479ab8
Some checks failed
Nix CI / filter (push) Failing after 1s
Nix CI / gate () (push) Has been skipped
Nix CI / gate (shared crates) (push) Has been skipped
Nix CI / build () (push) Has been skipped
Nix CI / ci-status (push) Failing after 1s
Add daemon scheduling for native services
2026-03-30 21:31:32 +09:00

1927 lines
61 KiB
Rust

use std::collections::{HashMap, HashSet};
use std::path::PathBuf;
use anyhow::{Context, Result};
use clap::{Parser, Subcommand};
use serde::Deserialize;
use tonic::transport::Channel;
use tracing::{info, warn};
use tracing_subscriber::EnvFilter;
use fiberlb_api::backend_service_client::BackendServiceClient;
use fiberlb_api::health_check_service_client::HealthCheckServiceClient;
use fiberlb_api::l7_policy_service_client::L7PolicyServiceClient;
use fiberlb_api::l7_rule_service_client::L7RuleServiceClient;
use fiberlb_api::listener_service_client::ListenerServiceClient;
use fiberlb_api::load_balancer_service_client::LoadBalancerServiceClient;
use fiberlb_api::pool_service_client::PoolServiceClient;
use fiberlb_api::{
Backend, BackendAdminState, CreateBackendRequest, CreateHealthCheckRequest,
CreateL7PolicyRequest, CreateL7RuleRequest, CreateListenerRequest,
CreateLoadBalancerRequest, CreatePoolRequest, DeleteBackendRequest,
DeleteHealthCheckRequest, DeleteL7PolicyRequest, DeleteL7RuleRequest,
DeleteListenerRequest, DeleteLoadBalancerRequest, DeletePoolRequest, HealthCheck,
HealthCheckType, HttpHealthConfig, L7CompareType, L7Policy, L7PolicyAction, L7Rule,
L7RuleType, Listener, ListenerProtocol, LoadBalancer, Pool, PoolAlgorithm,
PoolProtocol, SessionPersistence, TlsConfig, TlsVersion, UpdateBackendRequest,
UpdateHealthCheckRequest, UpdateL7PolicyRequest, UpdateL7RuleRequest,
UpdateListenerRequest, UpdateLoadBalancerRequest, UpdatePoolRequest,
};
use flashdns_api::RecordServiceClient;
use flashdns_api::ZoneServiceClient;
use flashdns_api::proto::{
reverse_zone_service_client::ReverseZoneServiceClient,
record_data, ARecord, AaaaRecord, CaaRecord, CnameRecord, CreateRecordRequest,
CreateReverseZoneRequest, CreateZoneRequest, DeleteRecordRequest, DeleteReverseZoneRequest,
DeleteZoneRequest, ListReverseZonesRequest, MxRecord, NsRecord, PtrRecord, RecordData,
RecordInfo, ReverseZone, SrvRecord, TxtRecord, UpdateRecordRequest, UpdateZoneRequest,
ZoneInfo,
};
mod hosts;
#[derive(Parser)]
#[command(author, version, about)]
struct Cli {
#[command(subcommand)]
command: Command,
}
#[derive(Subcommand)]
enum Command {
/// Apply FiberLB declarations
Lb {
#[arg(long)]
config: PathBuf,
#[arg(long)]
endpoint: String,
#[arg(long, default_value_t = false)]
prune: bool,
},
/// Apply FlashDNS declarations
Dns {
#[arg(long)]
config: PathBuf,
#[arg(long)]
endpoint: String,
#[arg(long, default_value_t = false)]
prune: bool,
},
/// Reconcile host deployments into per-node desired-system state
Hosts(hosts::HostsCommand),
}
#[derive(Debug, Deserialize)]
struct LbConfig {
#[serde(default)]
load_balancers: Vec<LoadBalancerSpec>,
}
#[derive(Debug, Deserialize)]
struct LoadBalancerSpec {
name: String,
org_id: String,
#[serde(default)]
project_id: Option<String>,
#[serde(default)]
description: Option<String>,
#[serde(default)]
pools: Vec<PoolSpec>,
#[serde(default)]
listeners: Vec<ListenerSpec>,
}
#[derive(Debug, Deserialize)]
struct PoolSpec {
name: String,
#[serde(default)]
algorithm: Option<String>,
#[serde(default)]
protocol: Option<String>,
#[serde(default)]
session_persistence: Option<SessionPersistenceSpec>,
#[serde(default)]
backends: Vec<BackendSpec>,
#[serde(default)]
health_checks: Vec<HealthCheckSpec>,
}
#[derive(Debug, Deserialize)]
struct SessionPersistenceSpec {
#[serde(rename = "type")]
persistence_type: String,
#[serde(default)]
cookie_name: Option<String>,
#[serde(default)]
timeout_seconds: Option<u32>,
}
#[derive(Debug, Deserialize)]
struct BackendSpec {
name: String,
address: String,
port: u32,
#[serde(default)]
weight: Option<u32>,
#[serde(default)]
admin_state: Option<String>,
}
#[derive(Debug, Deserialize)]
struct HealthCheckSpec {
name: String,
#[serde(rename = "type")]
check_type: String,
#[serde(default)]
interval_seconds: Option<u32>,
#[serde(default)]
timeout_seconds: Option<u32>,
#[serde(default)]
healthy_threshold: Option<u32>,
#[serde(default)]
unhealthy_threshold: Option<u32>,
#[serde(default)]
http: Option<HttpHealthSpec>,
#[serde(default)]
enabled: Option<bool>,
}
#[derive(Debug, Deserialize)]
struct HttpHealthSpec {
#[serde(default)]
method: Option<String>,
#[serde(default)]
path: Option<String>,
#[serde(default)]
expected_codes: Option<Vec<u32>>,
#[serde(default)]
host: Option<String>,
}
#[derive(Debug, Deserialize)]
struct ListenerSpec {
name: String,
#[serde(default)]
protocol: Option<String>,
port: u32,
default_pool: String,
#[serde(default)]
tls: Option<TlsSpec>,
#[serde(default)]
connection_limit: Option<u32>,
#[serde(default)]
enabled: Option<bool>,
#[serde(default)]
l7_policies: Vec<L7PolicySpec>,
}
#[derive(Debug, Deserialize)]
struct TlsSpec {
certificate_id: String,
#[serde(default)]
min_version: Option<String>,
#[serde(default)]
cipher_suites: Vec<String>,
}
#[derive(Debug, Deserialize)]
struct L7PolicySpec {
name: String,
#[serde(default)]
position: Option<u32>,
action: String,
#[serde(default)]
redirect_url: Option<String>,
#[serde(default)]
redirect_pool: Option<String>,
#[serde(default)]
redirect_http_status_code: Option<u32>,
#[serde(default)]
enabled: Option<bool>,
#[serde(default)]
rules: Vec<L7RuleSpec>,
}
#[derive(Debug, Deserialize)]
struct L7RuleSpec {
#[serde(rename = "type")]
rule_type: String,
#[serde(default)]
compare_type: Option<String>,
value: String,
#[serde(default)]
key: Option<String>,
#[serde(default)]
invert: Option<bool>,
}
#[derive(Debug, Deserialize)]
struct DnsConfig {
#[serde(default)]
zones: Vec<ZoneSpec>,
#[serde(default)]
reverse_zones: Vec<ReverseZoneSpec>,
}
#[derive(Debug, Deserialize)]
struct ZoneSpec {
name: String,
org_id: String,
#[serde(default)]
project_id: Option<String>,
#[serde(default)]
primary_ns: Option<String>,
#[serde(default)]
admin_email: Option<String>,
#[serde(default)]
refresh: Option<u32>,
#[serde(default)]
retry: Option<u32>,
#[serde(default)]
expire: Option<u32>,
#[serde(default)]
minimum: Option<u32>,
#[serde(default)]
records: Vec<RecordSpec>,
}
#[derive(Debug, Deserialize)]
struct RecordSpec {
name: String,
record_type: String,
#[serde(default)]
ttl: Option<u32>,
data: serde_json::Value,
#[serde(default)]
enabled: Option<bool>,
}
#[derive(Debug, Deserialize)]
struct ReverseZoneSpec {
org_id: String,
#[serde(default)]
project_id: Option<String>,
cidr: String,
ptr_pattern: String,
#[serde(default)]
ttl: Option<u32>,
}
#[tokio::main]
async fn main() -> Result<()> {
tracing_subscriber::fmt()
.with_env_filter(EnvFilter::from_default_env().add_directive("info".parse()?))
.init();
let cli = Cli::parse();
match cli.command {
Command::Lb {
config,
endpoint,
prune,
} => {
let spec: LbConfig = read_json(&config).await?;
reconcile_lb(spec, endpoint, prune).await?;
}
Command::Dns {
config,
endpoint,
prune,
} => {
let spec: DnsConfig = read_json(&config).await?;
reconcile_dns(spec, endpoint, prune).await?;
}
Command::Hosts(command) => {
hosts::run(command).await?;
}
}
Ok(())
}
async fn read_json<T: for<'a> Deserialize<'a>>(path: &PathBuf) -> Result<T> {
let contents = tokio::fs::read_to_string(path)
.await
.with_context(|| format!("failed to read {}", path.display()))?;
let config = serde_json::from_str(&contents)
.with_context(|| format!("failed to parse {}", path.display()))?;
Ok(config)
}
async fn reconcile_lb(spec: LbConfig, endpoint: String, prune: bool) -> Result<()> {
let mut lb_client = LoadBalancerServiceClient::connect(endpoint.clone()).await?;
let mut pool_client = PoolServiceClient::connect(endpoint.clone()).await?;
let mut backend_client = BackendServiceClient::connect(endpoint.clone()).await?;
let mut listener_client = ListenerServiceClient::connect(endpoint.clone()).await?;
let mut policy_client = L7PolicyServiceClient::connect(endpoint.clone()).await?;
let mut rule_client = L7RuleServiceClient::connect(endpoint.clone()).await?;
let mut health_client = HealthCheckServiceClient::connect(endpoint).await?;
let mut desired_scopes: HashMap<(String, String), HashSet<String>> = HashMap::new();
for lb_spec in &spec.load_balancers {
let scope = lb_scope(&lb_spec.org_id, lb_spec.project_id.as_deref());
desired_scopes
.entry(scope)
.or_default()
.insert(lb_spec.name.clone());
}
for lb_spec in spec.load_balancers {
let lb = ensure_load_balancer(&mut lb_client, &lb_spec).await?;
let lb_id = lb.id.clone();
let pool_ids = ensure_pools(
&mut pool_client,
&mut backend_client,
&mut health_client,
&lb_id,
&lb_spec,
prune,
)
.await?;
ensure_listeners(
&mut listener_client,
&mut policy_client,
&mut rule_client,
&lb_id,
&pool_ids,
&lb_spec,
prune,
)
.await?;
if prune {
prune_pools(
&mut pool_client,
&mut health_client,
&lb_id,
&lb_spec.pools,
)
.await?;
}
}
if prune {
prune_load_balancers(
&mut lb_client,
&mut listener_client,
&mut policy_client,
&mut pool_client,
&mut health_client,
&desired_scopes,
)
.await?;
}
Ok(())
}
async fn ensure_load_balancer(
client: &mut LoadBalancerServiceClient<Channel>,
spec: &LoadBalancerSpec,
) -> Result<LoadBalancer> {
let mut existing = list_load_balancers(
client,
&spec.org_id,
spec.project_id.as_deref().unwrap_or(""),
)
.await?;
if let Some(lb) = existing.iter_mut().find(|lb| lb.name == spec.name) {
if let Some(description) = &spec.description {
if lb.description != *description {
info!("Updating load balancer {}", spec.name);
let response = client
.update_load_balancer(UpdateLoadBalancerRequest {
id: lb.id.clone(),
name: spec.name.clone(),
description: description.clone(),
})
.await?
.into_inner();
return response
.loadbalancer
.context("missing load balancer in update response");
}
}
return Ok(lb.clone());
}
info!("Creating load balancer {}", spec.name);
let response = client
.create_load_balancer(CreateLoadBalancerRequest {
name: spec.name.clone(),
org_id: spec.org_id.clone(),
project_id: spec.project_id.clone().unwrap_or_default(),
description: spec.description.clone().unwrap_or_default(),
vip_address: String::new(),
})
.await?
.into_inner();
response
.loadbalancer
.context("missing load balancer in create response")
}
async fn ensure_pools(
pool_client: &mut PoolServiceClient<Channel>,
backend_client: &mut BackendServiceClient<Channel>,
health_client: &mut HealthCheckServiceClient<Channel>,
lb_id: &str,
spec: &LoadBalancerSpec,
prune: bool,
) -> Result<HashMap<String, String>> {
let mut pool_map = HashMap::new();
let pools = list_pools(pool_client, lb_id).await?;
for pool_spec in &spec.pools {
let desired_algorithm = parse_pool_algorithm(pool_spec.algorithm.as_deref());
let desired_protocol = parse_pool_protocol(pool_spec.protocol.as_deref());
let desired_persistence = session_persistence_from_spec(pool_spec.session_persistence.as_ref());
let pool = if let Some(existing) = pools.iter().find(|p| p.name == pool_spec.name) {
let mut update = UpdatePoolRequest {
id: existing.id.clone(),
name: String::new(),
algorithm: 0,
session_persistence: None,
};
let mut should_update = false;
if existing.algorithm != desired_algorithm as i32 {
update.algorithm = desired_algorithm as i32;
should_update = true;
}
if let Some(desired) = desired_persistence.clone() {
if !session_persistence_eq(existing.session_persistence.as_ref(), Some(&desired)) {
update.session_persistence = Some(desired);
should_update = true;
}
}
if existing.protocol != desired_protocol as i32 {
warn!(
"Pool {} protocol mismatch (update not supported)",
pool_spec.name
);
}
if should_update {
info!("Updating pool {}", pool_spec.name);
let response = pool_client.update_pool(update).await?.into_inner();
response.pool.context("missing pool in update response")?
} else {
existing.clone()
}
} else {
info!("Creating pool {}", pool_spec.name);
let response = pool_client
.create_pool(CreatePoolRequest {
name: pool_spec.name.clone(),
loadbalancer_id: lb_id.to_string(),
algorithm: desired_algorithm as i32,
protocol: desired_protocol as i32,
session_persistence: desired_persistence,
})
.await?
.into_inner();
response.pool.context("missing pool in create response")?
};
pool_map.insert(pool_spec.name.clone(), pool.id.clone());
ensure_backends(backend_client, &pool.id, pool_spec, prune).await?;
ensure_health_checks(health_client, &pool.id, pool_spec, prune).await?;
}
Ok(pool_map)
}
async fn ensure_backends(
backend_client: &mut BackendServiceClient<Channel>,
pool_id: &str,
pool_spec: &PoolSpec,
prune: bool,
) -> Result<()> {
let backends = list_backends(backend_client, pool_id).await?;
for backend_spec in &pool_spec.backends {
if let Some(existing) = backends.iter().find(|b| b.name == backend_spec.name) {
let desired_weight = backend_spec.weight.unwrap_or(1);
let desired_admin_state = backend_spec
.admin_state
.as_deref()
.map(parse_backend_admin_state);
let mut update = UpdateBackendRequest {
id: existing.id.clone(),
name: String::new(),
weight: 0,
admin_state: 0,
};
let mut should_update = false;
if existing.weight != desired_weight {
update.weight = desired_weight;
should_update = true;
}
if let Some(admin_state) = desired_admin_state {
if existing.admin_state != admin_state as i32 {
update.admin_state = admin_state as i32;
should_update = true;
}
}
if existing.address != backend_spec.address || existing.port != backend_spec.port {
warn!(
"Backend {} differs from desired spec (update not supported)",
backend_spec.name
);
}
if should_update {
info!("Updating backend {}", backend_spec.name);
backend_client.update_backend(update).await?;
}
continue;
}
info!("Creating backend {}", backend_spec.name);
backend_client
.create_backend(CreateBackendRequest {
name: backend_spec.name.clone(),
pool_id: pool_id.to_string(),
address: backend_spec.address.clone(),
port: backend_spec.port,
weight: backend_spec.weight.unwrap_or(1),
})
.await?;
}
if prune {
let desired: HashSet<String> = pool_spec
.backends
.iter()
.map(|backend| backend.name.clone())
.collect();
for backend in backends {
if !desired.contains(&backend.name) {
info!("Deleting backend {}", backend.name);
backend_client
.delete_backend(DeleteBackendRequest { id: backend.id })
.await?;
}
}
}
Ok(())
}
async fn ensure_health_checks(
health_client: &mut HealthCheckServiceClient<Channel>,
pool_id: &str,
pool_spec: &PoolSpec,
prune: bool,
) -> Result<()> {
let checks = list_health_checks(health_client, pool_id).await?;
for check_spec in &pool_spec.health_checks {
if let Some(existing) = checks.iter().find(|hc| hc.name == check_spec.name) {
let desired_type = parse_health_check_type(&check_spec.check_type) as i32;
let desired_interval = check_spec.interval_seconds.unwrap_or(10);
let desired_timeout = check_spec.timeout_seconds.unwrap_or(5);
let desired_healthy = check_spec.healthy_threshold.unwrap_or(2);
let desired_unhealthy = check_spec.unhealthy_threshold.unwrap_or(2);
let desired_enabled = check_spec.enabled.unwrap_or(true);
let desired_http = check_spec.http.as_ref().map(|http| HttpHealthConfig {
method: http.method.clone().unwrap_or_else(|| "GET".to_string()),
path: http.path.clone().unwrap_or_else(|| "/".to_string()),
expected_codes: http.expected_codes.clone().unwrap_or_default(),
host: http.host.clone().unwrap_or_default(),
});
if existing.r#type != desired_type {
warn!(
"Health check {} type mismatch (update not supported)",
check_spec.name
);
continue;
}
let mut update = UpdateHealthCheckRequest {
id: existing.id.clone(),
name: String::new(),
interval_seconds: 0,
timeout_seconds: 0,
healthy_threshold: 0,
unhealthy_threshold: 0,
http_config: None,
enabled: desired_enabled,
};
let mut should_update = false;
if existing.interval_seconds != desired_interval {
update.interval_seconds = desired_interval;
should_update = true;
}
if existing.timeout_seconds != desired_timeout {
update.timeout_seconds = desired_timeout;
should_update = true;
}
if existing.healthy_threshold != desired_healthy {
update.healthy_threshold = desired_healthy;
should_update = true;
}
if existing.unhealthy_threshold != desired_unhealthy {
update.unhealthy_threshold = desired_unhealthy;
should_update = true;
}
if existing.enabled != desired_enabled {
should_update = true;
}
if let Some(desired_http) = desired_http {
if !http_config_eq(existing.http_config.as_ref(), Some(&desired_http)) {
update.http_config = Some(desired_http);
should_update = true;
}
} else if existing.http_config.is_some() {
warn!(
"Health check {} has HTTP config but spec does not (clear not supported)",
check_spec.name
);
}
if should_update {
info!("Updating health check {}", check_spec.name);
health_client.update_health_check(update).await?;
}
continue;
}
info!("Creating health check {}", check_spec.name);
let http_config = check_spec.http.as_ref().map(|http| HttpHealthConfig {
method: http.method.clone().unwrap_or_else(|| "GET".to_string()),
path: http.path.clone().unwrap_or_else(|| "/".to_string()),
expected_codes: http.expected_codes.clone().unwrap_or_default(),
host: http.host.clone().unwrap_or_default(),
});
health_client
.create_health_check(CreateHealthCheckRequest {
name: check_spec.name.clone(),
pool_id: pool_id.to_string(),
r#type: parse_health_check_type(&check_spec.check_type) as i32,
interval_seconds: check_spec.interval_seconds.unwrap_or(10),
timeout_seconds: check_spec.timeout_seconds.unwrap_or(5),
healthy_threshold: check_spec.healthy_threshold.unwrap_or(2),
unhealthy_threshold: check_spec.unhealthy_threshold.unwrap_or(2),
http_config,
})
.await?;
}
if prune {
let desired: HashSet<String> = pool_spec
.health_checks
.iter()
.map(|check| check.name.clone())
.collect();
for check in checks {
if !desired.contains(&check.name) {
info!("Deleting health check {}", check.name);
health_client
.delete_health_check(DeleteHealthCheckRequest { id: check.id })
.await?;
}
}
}
Ok(())
}
async fn ensure_listeners(
listener_client: &mut ListenerServiceClient<Channel>,
policy_client: &mut L7PolicyServiceClient<Channel>,
rule_client: &mut L7RuleServiceClient<Channel>,
lb_id: &str,
pool_ids: &HashMap<String, String>,
spec: &LoadBalancerSpec,
prune: bool,
) -> Result<()> {
let listeners = list_listeners(listener_client, lb_id).await?;
for listener_spec in &spec.listeners {
let pool_id = pool_ids
.get(&listener_spec.default_pool)
.with_context(|| {
format!(
"listener {} references unknown pool {}",
listener_spec.name, listener_spec.default_pool
)
})?
.to_string();
let desired_protocol = parse_listener_protocol(listener_spec.protocol.as_deref());
let desired_tls = listener_spec
.tls
.as_ref()
.map(|tls| TlsConfig {
certificate_id: tls.certificate_id.clone(),
min_version: parse_tls_version(tls.min_version.as_deref()) as i32,
cipher_suites: tls.cipher_suites.clone(),
});
let desired_enabled = listener_spec.enabled.unwrap_or(true);
let desired_connection_limit = listener_spec.connection_limit.unwrap_or(0);
if let Some(existing) = listeners.iter().find(|l| l.name == listener_spec.name) {
if existing.protocol != desired_protocol as i32 || existing.port != listener_spec.port {
warn!(
"Listener {} protocol/port mismatch (update not supported)",
listener_spec.name
);
continue;
}
let mut update = UpdateListenerRequest {
id: existing.id.clone(),
name: String::new(),
default_pool_id: String::new(),
tls_config: None,
connection_limit: 0,
enabled: desired_enabled,
};
let mut should_update = false;
if existing.default_pool_id != pool_id {
update.default_pool_id = pool_id.clone();
should_update = true;
}
if existing.connection_limit != desired_connection_limit {
update.connection_limit = desired_connection_limit;
should_update = true;
}
if existing.enabled != desired_enabled {
should_update = true;
}
if desired_tls.is_some() && existing.tls_config != desired_tls {
update.tls_config = desired_tls;
should_update = true;
}
if should_update {
info!("Updating listener {}", listener_spec.name);
listener_client.update_listener(update).await?;
}
if !listener_spec.l7_policies.is_empty()
&& !is_l7_listener(desired_protocol)
{
warn!(
"Listener {} is not L7 capable but has policies",
listener_spec.name
);
}
ensure_l7_policies(
policy_client,
rule_client,
&existing.id,
pool_ids,
listener_spec,
prune,
)
.await?;
continue;
}
info!("Creating listener {}", listener_spec.name);
let created = listener_client
.create_listener(CreateListenerRequest {
name: listener_spec.name.clone(),
loadbalancer_id: lb_id.to_string(),
protocol: desired_protocol as i32,
port: listener_spec.port,
default_pool_id: pool_id,
tls_config: desired_tls,
connection_limit: desired_connection_limit,
})
.await?
.into_inner()
.listener
.context("missing listener in create response")?;
if !listener_spec.l7_policies.is_empty() && !is_l7_listener(desired_protocol) {
warn!(
"Listener {} is not L7 capable but has policies",
listener_spec.name
);
}
ensure_l7_policies(
policy_client,
rule_client,
&created.id,
pool_ids,
listener_spec,
prune,
)
.await?;
}
if prune {
prune_listeners(listener_client, policy_client, lb_id, &spec.listeners).await?;
}
Ok(())
}
async fn ensure_l7_policies(
policy_client: &mut L7PolicyServiceClient<Channel>,
rule_client: &mut L7RuleServiceClient<Channel>,
listener_id: &str,
pool_ids: &HashMap<String, String>,
listener_spec: &ListenerSpec,
prune: bool,
) -> Result<()> {
let policies = list_l7_policies(policy_client, listener_id).await?;
let mut desired_names = HashSet::new();
for policy_spec in &listener_spec.l7_policies {
desired_names.insert(policy_spec.name.clone());
let desired_action = parse_l7_policy_action(&policy_spec.action);
let desired_position = policy_spec.position.unwrap_or(1);
let desired_enabled = policy_spec.enabled.unwrap_or(true);
let desired_redirect_url = policy_spec
.redirect_url
.as_ref()
.filter(|value| !value.is_empty())
.cloned();
let desired_redirect_pool_id = match &policy_spec.redirect_pool {
Some(pool_name) => Some(
pool_ids
.get(pool_name)
.with_context(|| {
format!(
"l7 policy {} references unknown pool {}",
policy_spec.name, pool_name
)
})?
.to_string(),
),
None => None,
};
let desired_status = policy_spec.redirect_http_status_code;
if matches!(desired_action, L7PolicyAction::RedirectToPool)
&& desired_redirect_pool_id.is_none()
{
warn!(
"L7 policy {} action redirect_to_pool is missing redirect_pool",
policy_spec.name
);
}
if matches!(desired_action, L7PolicyAction::RedirectToUrl)
&& desired_redirect_url.is_none()
{
warn!(
"L7 policy {} action redirect_to_url is missing redirect_url",
policy_spec.name
);
}
let (policy_id, needs_update) = if let Some(existing) =
policies.iter().find(|p| p.name == policy_spec.name)
{
let matches = l7_policy_matches(
existing,
desired_action,
desired_position,
desired_enabled,
desired_redirect_url.as_ref(),
desired_redirect_pool_id.as_ref(),
desired_status,
);
(existing.id.clone(), !matches)
} else {
let response = policy_client
.create_l7_policy(CreateL7PolicyRequest {
listener_id: listener_id.to_string(),
name: policy_spec.name.clone(),
position: desired_position,
action: desired_action as i32,
redirect_url: desired_redirect_url.clone().unwrap_or_default(),
redirect_pool_id: desired_redirect_pool_id.clone().unwrap_or_default(),
redirect_http_status_code: desired_status.unwrap_or(0),
})
.await?
.into_inner();
let policy = response
.l7_policy
.context("missing l7 policy in create response")?;
let matches = l7_policy_matches(
&policy,
desired_action,
desired_position,
desired_enabled,
desired_redirect_url.as_ref(),
desired_redirect_pool_id.as_ref(),
desired_status,
);
(policy.id, !matches)
};
if needs_update {
info!("Updating L7 policy {}", policy_spec.name);
policy_client
.update_l7_policy(UpdateL7PolicyRequest {
id: policy_id.clone(),
name: policy_spec.name.clone(),
position: desired_position,
action: desired_action as i32,
redirect_url: desired_redirect_url.clone().unwrap_or_default(),
redirect_pool_id: desired_redirect_pool_id.clone().unwrap_or_default(),
redirect_http_status_code: desired_status.unwrap_or(0),
enabled: desired_enabled,
})
.await?;
}
ensure_l7_rules(
rule_client,
&policy_id,
policy_spec,
prune,
)
.await?;
}
if prune {
for policy in policies {
if !desired_names.contains(&policy.name) {
info!("Deleting L7 policy {}", policy.name);
policy_client
.delete_l7_policy(DeleteL7PolicyRequest { id: policy.id })
.await?;
}
}
}
Ok(())
}
async fn ensure_l7_rules(
rule_client: &mut L7RuleServiceClient<Channel>,
policy_id: &str,
policy_spec: &L7PolicySpec,
prune: bool,
) -> Result<()> {
let rules = list_l7_rules(rule_client, policy_id).await?;
let mut used_rules: HashSet<String> = HashSet::new();
for rule_spec in &policy_spec.rules {
let desired_rule_type = parse_l7_rule_type(&rule_spec.rule_type);
let desired_compare_type =
parse_l7_compare_type(rule_spec.compare_type.as_deref());
let desired_value = rule_spec.value.clone();
let desired_key = rule_spec
.key
.as_ref()
.filter(|value| !value.is_empty())
.cloned();
let desired_invert = rule_spec.invert.unwrap_or(false);
if let Some(existing) = rules.iter().find(|rule| {
!used_rules.contains(&rule.id)
&& l7_rule_key_matches(rule, desired_rule_type, &desired_value, desired_key.as_deref())
}) {
let needs_update = existing.compare_type != desired_compare_type as i32
|| existing.invert != desired_invert;
if needs_update {
info!("Updating L7 rule {}", existing.id);
rule_client
.update_l7_rule(UpdateL7RuleRequest {
id: existing.id.clone(),
rule_type: desired_rule_type as i32,
compare_type: desired_compare_type as i32,
value: desired_value.clone(),
key: desired_key.clone().unwrap_or_default(),
invert: desired_invert,
})
.await?;
}
used_rules.insert(existing.id.clone());
continue;
}
info!("Creating L7 rule for policy {}", policy_spec.name);
let response = rule_client
.create_l7_rule(CreateL7RuleRequest {
policy_id: policy_id.to_string(),
rule_type: desired_rule_type as i32,
compare_type: desired_compare_type as i32,
value: desired_value.clone(),
key: desired_key.clone().unwrap_or_default(),
invert: desired_invert,
})
.await?
.into_inner();
if let Some(rule) = response.l7_rule {
used_rules.insert(rule.id);
}
}
if prune {
for rule in rules {
if !used_rules.contains(&rule.id) {
info!("Deleting L7 rule {}", rule.id);
rule_client
.delete_l7_rule(DeleteL7RuleRequest { id: rule.id })
.await?;
}
}
}
Ok(())
}
async fn prune_listeners(
listener_client: &mut ListenerServiceClient<Channel>,
policy_client: &mut L7PolicyServiceClient<Channel>,
lb_id: &str,
specs: &[ListenerSpec],
) -> Result<()> {
let listeners = list_listeners(listener_client, lb_id).await?;
let desired: HashSet<String> = specs
.iter()
.map(|listener| listener.name.clone())
.collect();
for listener in listeners {
if !desired.contains(&listener.name) {
info!("Deleting listener {}", listener.name);
delete_listener_policies(policy_client, &listener.id).await?;
listener_client
.delete_listener(DeleteListenerRequest { id: listener.id })
.await?;
}
}
Ok(())
}
async fn prune_pools(
pool_client: &mut PoolServiceClient<Channel>,
health_client: &mut HealthCheckServiceClient<Channel>,
lb_id: &str,
specs: &[PoolSpec],
) -> Result<()> {
let pools = list_pools(pool_client, lb_id).await?;
let desired: HashSet<String> = specs.iter().map(|pool| pool.name.clone()).collect();
for pool in pools {
if !desired.contains(&pool.name) {
info!("Deleting pool {}", pool.name);
let checks = list_health_checks(health_client, &pool.id).await?;
for check in checks {
health_client
.delete_health_check(DeleteHealthCheckRequest { id: check.id })
.await?;
}
pool_client
.delete_pool(DeletePoolRequest { id: pool.id })
.await?;
}
}
Ok(())
}
async fn prune_load_balancers(
lb_client: &mut LoadBalancerServiceClient<Channel>,
listener_client: &mut ListenerServiceClient<Channel>,
policy_client: &mut L7PolicyServiceClient<Channel>,
pool_client: &mut PoolServiceClient<Channel>,
health_client: &mut HealthCheckServiceClient<Channel>,
desired_scopes: &HashMap<(String, String), HashSet<String>>,
) -> Result<()> {
for ((org_id, project_id), desired_names) in desired_scopes {
let lbs = list_load_balancers(lb_client, org_id, project_id).await?;
for lb in lbs {
if !desired_names.contains(&lb.name) {
info!("Deleting load balancer {}", lb.name);
let listeners = list_listeners(listener_client, &lb.id).await?;
for listener in listeners {
delete_listener_policies(policy_client, &listener.id).await?;
}
let pools = list_pools(pool_client, &lb.id).await?;
for pool in pools {
let checks = list_health_checks(health_client, &pool.id).await?;
for check in checks {
health_client
.delete_health_check(DeleteHealthCheckRequest { id: check.id })
.await?;
}
}
lb_client
.delete_load_balancer(DeleteLoadBalancerRequest { id: lb.id })
.await?;
}
}
}
Ok(())
}
async fn delete_listener_policies(
policy_client: &mut L7PolicyServiceClient<Channel>,
listener_id: &str,
) -> Result<()> {
let policies = list_l7_policies(policy_client, listener_id).await?;
for policy in policies {
info!("Deleting L7 policy {}", policy.name);
policy_client
.delete_l7_policy(DeleteL7PolicyRequest { id: policy.id })
.await?;
}
Ok(())
}
async fn list_load_balancers(
client: &mut LoadBalancerServiceClient<Channel>,
org_id: &str,
project_id: &str,
) -> Result<Vec<LoadBalancer>> {
let response = client
.list_load_balancers(fiberlb_api::ListLoadBalancersRequest {
org_id: org_id.to_string(),
project_id: project_id.to_string(),
page_size: 1000,
page_token: String::new(),
})
.await?
.into_inner();
Ok(response.loadbalancers)
}
async fn list_pools(
client: &mut PoolServiceClient<Channel>,
lb_id: &str,
) -> Result<Vec<Pool>> {
let response = client
.list_pools(fiberlb_api::ListPoolsRequest {
loadbalancer_id: lb_id.to_string(),
page_size: 1000,
page_token: String::new(),
})
.await?
.into_inner();
Ok(response.pools)
}
async fn list_backends(
client: &mut BackendServiceClient<Channel>,
pool_id: &str,
) -> Result<Vec<Backend>> {
let response = client
.list_backends(fiberlb_api::ListBackendsRequest {
pool_id: pool_id.to_string(),
page_size: 1000,
page_token: String::new(),
})
.await?
.into_inner();
Ok(response.backends)
}
async fn list_listeners(
client: &mut ListenerServiceClient<Channel>,
lb_id: &str,
) -> Result<Vec<Listener>> {
let response = client
.list_listeners(fiberlb_api::ListListenersRequest {
loadbalancer_id: lb_id.to_string(),
page_size: 1000,
page_token: String::new(),
})
.await?
.into_inner();
Ok(response.listeners)
}
async fn list_health_checks(
client: &mut HealthCheckServiceClient<Channel>,
pool_id: &str,
) -> Result<Vec<HealthCheck>> {
let response = client
.list_health_checks(fiberlb_api::ListHealthChecksRequest {
pool_id: pool_id.to_string(),
page_size: 1000,
page_token: String::new(),
})
.await?
.into_inner();
Ok(response.health_checks)
}
async fn list_l7_policies(
client: &mut L7PolicyServiceClient<Channel>,
listener_id: &str,
) -> Result<Vec<L7Policy>> {
let response = client
.list_l7_policies(fiberlb_api::ListL7PoliciesRequest {
listener_id: listener_id.to_string(),
page_size: 1000,
page_token: String::new(),
})
.await?
.into_inner();
Ok(response.l7_policies)
}
async fn list_l7_rules(
client: &mut L7RuleServiceClient<Channel>,
policy_id: &str,
) -> Result<Vec<L7Rule>> {
let response = client
.list_l7_rules(fiberlb_api::ListL7RulesRequest {
policy_id: policy_id.to_string(),
page_size: 1000,
page_token: String::new(),
})
.await?
.into_inner();
Ok(response.l7_rules)
}
fn parse_pool_algorithm(value: Option<&str>) -> PoolAlgorithm {
match normalize_name(value.unwrap_or("round_robin")).as_str() {
"least_connections" => PoolAlgorithm::LeastConnections,
"ip_hash" => PoolAlgorithm::IpHash,
"weighted_round_robin" => PoolAlgorithm::WeightedRoundRobin,
"random" => PoolAlgorithm::Random,
"maglev" => PoolAlgorithm::Maglev,
_ => PoolAlgorithm::RoundRobin,
}
}
fn parse_pool_protocol(value: Option<&str>) -> PoolProtocol {
match normalize_name(value.unwrap_or("tcp")).as_str() {
"udp" => PoolProtocol::Udp,
"http" => PoolProtocol::Http,
"https" => PoolProtocol::Https,
_ => PoolProtocol::Tcp,
}
}
fn parse_listener_protocol(value: Option<&str>) -> ListenerProtocol {
match normalize_name(value.unwrap_or("tcp")).as_str() {
"udp" => ListenerProtocol::Udp,
"http" => ListenerProtocol::Http,
"https" => ListenerProtocol::Https,
"terminated_https" => ListenerProtocol::TerminatedHttps,
_ => ListenerProtocol::Tcp,
}
}
fn parse_health_check_type(value: &str) -> HealthCheckType {
match normalize_name(value).as_str() {
"http" => HealthCheckType::Http,
"https" => HealthCheckType::Https,
"udp" => HealthCheckType::Udp,
"ping" => HealthCheckType::Ping,
_ => HealthCheckType::Tcp,
}
}
fn parse_tls_version(value: Option<&str>) -> TlsVersion {
match normalize_name(value.unwrap_or("tls_1_2")).as_str() {
"tls_1_3" => TlsVersion::Tls13,
_ => TlsVersion::Tls12,
}
}
fn parse_backend_admin_state(value: &str) -> BackendAdminState {
match normalize_name(value).as_str() {
"disabled" => BackendAdminState::Disabled,
"drain" => BackendAdminState::Drain,
_ => BackendAdminState::Enabled,
}
}
fn parse_l7_policy_action(value: &str) -> L7PolicyAction {
match normalize_name(value).as_str() {
"redirect_to_url" => L7PolicyAction::RedirectToUrl,
"reject" => L7PolicyAction::Reject,
_ => L7PolicyAction::RedirectToPool,
}
}
fn parse_l7_rule_type(value: &str) -> L7RuleType {
match normalize_name(value).as_str() {
"host_name" => L7RuleType::HostName,
"path" => L7RuleType::Path,
"file_type" => L7RuleType::FileType,
"header" => L7RuleType::Header,
"cookie" => L7RuleType::Cookie,
"ssl_conn_has_sni" => L7RuleType::SslConnHasSni,
_ => L7RuleType::Path,
}
}
fn parse_l7_compare_type(value: Option<&str>) -> L7CompareType {
match normalize_name(value.unwrap_or("equal_to")).as_str() {
"regex" => L7CompareType::Regex,
"starts_with" => L7CompareType::StartsWith,
"ends_with" => L7CompareType::EndsWith,
"contains" => L7CompareType::Contains,
_ => L7CompareType::EqualTo,
}
}
fn is_l7_listener(protocol: ListenerProtocol) -> bool {
matches!(
protocol,
ListenerProtocol::Http | ListenerProtocol::Https | ListenerProtocol::TerminatedHttps
)
}
fn session_persistence_from_spec(spec: Option<&SessionPersistenceSpec>) -> Option<SessionPersistence> {
let spec = spec?;
let persistence_type = match normalize_name(&spec.persistence_type).as_str() {
"cookie" => fiberlb_api::PersistenceType::Cookie,
"app_cookie" => fiberlb_api::PersistenceType::AppCookie,
_ => fiberlb_api::PersistenceType::SourceIp,
};
Some(SessionPersistence {
r#type: persistence_type as i32,
cookie_name: spec.cookie_name.clone().unwrap_or_default(),
timeout_seconds: spec.timeout_seconds.unwrap_or(0),
})
}
fn session_persistence_eq(
existing: Option<&SessionPersistence>,
desired: Option<&SessionPersistence>,
) -> bool {
match (existing, desired) {
(None, None) => true,
(Some(lhs), Some(rhs)) => {
lhs.r#type == rhs.r#type
&& lhs.cookie_name == rhs.cookie_name
&& lhs.timeout_seconds == rhs.timeout_seconds
}
_ => false,
}
}
fn http_config_eq(
existing: Option<&HttpHealthConfig>,
desired: Option<&HttpHealthConfig>,
) -> bool {
match (existing, desired) {
(None, None) => true,
(Some(lhs), Some(rhs)) => {
lhs.method == rhs.method
&& lhs.path == rhs.path
&& lhs.expected_codes == rhs.expected_codes
&& lhs.host == rhs.host
}
_ => false,
}
}
fn normalize_name(value: &str) -> String {
value.trim().to_lowercase().replace('-', "_")
}
fn normalize_optional_string(value: &str) -> Option<String> {
let trimmed = value.trim();
if trimmed.is_empty() {
None
} else {
Some(trimmed.to_string())
}
}
fn normalize_optional_u32(value: u32) -> Option<u32> {
if value == 0 {
None
} else {
Some(value)
}
}
fn l7_policy_matches(
existing: &L7Policy,
desired_action: L7PolicyAction,
desired_position: u32,
desired_enabled: bool,
desired_redirect_url: Option<&String>,
desired_redirect_pool_id: Option<&String>,
desired_status: Option<u32>,
) -> bool {
existing.action == desired_action as i32
&& existing.position == desired_position
&& existing.enabled == desired_enabled
&& normalize_optional_string(&existing.redirect_url).as_deref()
== desired_redirect_url.map(|value| value.as_str())
&& normalize_optional_string(&existing.redirect_pool_id).as_deref()
== desired_redirect_pool_id.map(|value| value.as_str())
&& normalize_optional_u32(existing.redirect_http_status_code) == desired_status
}
fn l7_rule_key_matches(
rule: &L7Rule,
desired_rule_type: L7RuleType,
desired_value: &str,
desired_key: Option<&str>,
) -> bool {
rule.rule_type == desired_rule_type as i32
&& rule.value == desired_value
&& normalize_optional_string(&rule.key).as_deref() == desired_key
}
fn lb_scope(org_id: &str, project_id: Option<&str>) -> (String, String) {
(
org_id.to_string(),
project_id.unwrap_or("").to_string(),
)
}
fn dns_scope(org_id: &str, project_id: Option<&str>) -> (String, String) {
(
org_id.to_string(),
project_id.unwrap_or("").to_string(),
)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_parse_l7_policy_action() {
assert_eq!(
parse_l7_policy_action("redirect_to_url"),
L7PolicyAction::RedirectToUrl
);
assert_eq!(parse_l7_policy_action("reject"), L7PolicyAction::Reject);
assert_eq!(
parse_l7_policy_action("redirect-to-pool"),
L7PolicyAction::RedirectToPool
);
}
#[test]
fn test_parse_l7_rule_type_and_compare() {
assert_eq!(parse_l7_rule_type("host_name"), L7RuleType::HostName);
assert_eq!(
parse_l7_rule_type("ssl_conn_has_sni"),
L7RuleType::SslConnHasSni
);
assert_eq!(
parse_l7_compare_type(Some("starts_with")),
L7CompareType::StartsWith
);
assert_eq!(
parse_l7_compare_type(None),
L7CompareType::EqualTo
);
}
#[test]
fn test_is_l7_listener() {
assert!(is_l7_listener(ListenerProtocol::Http));
assert!(is_l7_listener(ListenerProtocol::Https));
assert!(is_l7_listener(ListenerProtocol::TerminatedHttps));
assert!(!is_l7_listener(ListenerProtocol::Tcp));
}
#[test]
fn test_l7_rule_key_matches() {
let rule = L7Rule {
id: "rule-1".to_string(),
policy_id: "policy-1".to_string(),
rule_type: L7RuleType::Path as i32,
compare_type: L7CompareType::EqualTo as i32,
value: "/v1".to_string(),
key: String::new(),
invert: false,
created_at: 0,
updated_at: 0,
};
assert!(l7_rule_key_matches(&rule, L7RuleType::Path, "/v1", None));
assert!(!l7_rule_key_matches(&rule, L7RuleType::Path, "/v2", None));
}
#[test]
fn test_l7_policy_matches() {
let policy = L7Policy {
id: "policy-1".to_string(),
listener_id: "listener-1".to_string(),
name: "policy".to_string(),
position: 1,
action: L7PolicyAction::RedirectToPool as i32,
redirect_url: String::new(),
redirect_pool_id: "pool-1".to_string(),
redirect_http_status_code: 0,
enabled: true,
created_at: 0,
updated_at: 0,
};
let pool_id = "pool-1".to_string();
assert!(l7_policy_matches(
&policy,
L7PolicyAction::RedirectToPool,
1,
true,
None,
Some(&pool_id),
None
));
assert!(!l7_policy_matches(
&policy,
L7PolicyAction::RedirectToPool,
2,
true,
None,
Some(&pool_id),
None
));
}
}
async fn reconcile_dns(spec: DnsConfig, endpoint: String, prune: bool) -> Result<()> {
let mut zone_client = ZoneServiceClient::connect(endpoint.clone()).await?;
let mut record_client = RecordServiceClient::connect(endpoint.clone()).await?;
let mut reverse_client = ReverseZoneServiceClient::connect(endpoint).await?;
let mut desired_scopes: HashMap<(String, String), HashSet<String>> = HashMap::new();
for zone_spec in &spec.zones {
let scope = dns_scope(&zone_spec.org_id, zone_spec.project_id.as_deref());
desired_scopes
.entry(scope)
.or_default()
.insert(zone_spec.name.clone());
}
for zone_spec in &spec.zones {
let zone = ensure_zone(&mut zone_client, zone_spec).await?;
ensure_records(&mut record_client, &zone, zone_spec, prune).await?;
}
if prune {
prune_zones(&mut zone_client, &desired_scopes).await?;
}
ensure_reverse_zones(&mut reverse_client, &spec.reverse_zones, prune).await?;
Ok(())
}
async fn ensure_zone(
client: &mut ZoneServiceClient<Channel>,
spec: &ZoneSpec,
) -> Result<ZoneInfo> {
let existing = list_zones(
client,
&spec.org_id,
spec.project_id.as_deref().unwrap_or(""),
Some(&spec.name),
)
.await?;
if let Some(zone) = existing.into_iter().find(|z| z.name == spec.name) {
let update = UpdateZoneRequest {
id: zone.id.clone(),
refresh: spec.refresh,
retry: spec.retry,
expire: spec.expire,
minimum: spec.minimum,
primary_ns: spec.primary_ns.clone(),
admin_email: spec.admin_email.clone(),
};
if update.refresh.is_some()
|| update.retry.is_some()
|| update.expire.is_some()
|| update.minimum.is_some()
|| update.primary_ns.is_some()
|| update.admin_email.is_some()
{
info!("Updating zone {}", spec.name);
let response = client.update_zone(update).await?.into_inner();
return response.zone.context("missing zone in update response");
}
return Ok(zone);
}
info!("Creating zone {}", spec.name);
let response = client
.create_zone(CreateZoneRequest {
name: spec.name.clone(),
org_id: spec.org_id.clone(),
project_id: spec.project_id.clone().unwrap_or_default(),
primary_ns: spec.primary_ns.clone().unwrap_or_default(),
admin_email: spec.admin_email.clone().unwrap_or_default(),
})
.await?
.into_inner();
response.zone.context("missing zone in create response")
}
async fn ensure_records(
client: &mut RecordServiceClient<Channel>,
zone: &ZoneInfo,
spec: &ZoneSpec,
prune: bool,
) -> Result<()> {
let records = list_records(client, &zone.id).await?;
let mut existing: HashMap<(String, String), RecordInfo> = HashMap::new();
for record in records {
existing.insert((record.name.clone(), normalize_name(&record.record_type)), record);
}
for record_spec in &spec.records {
let key = (record_spec.name.clone(), normalize_name(&record_spec.record_type));
let data = record_data_from_spec(&record_spec.record_type, &record_spec.data)?;
let ttl = record_spec.ttl.unwrap_or(300);
if let Some(existing_record) = existing.get(&key) {
info!("Updating record {} {}", record_spec.record_type, record_spec.name);
client
.update_record(UpdateRecordRequest {
id: existing_record.id.clone(),
ttl: record_spec.ttl,
data: Some(data),
enabled: record_spec.enabled,
})
.await?;
continue;
}
info!("Creating record {} {}", record_spec.record_type, record_spec.name);
client
.create_record(CreateRecordRequest {
zone_id: zone.id.clone(),
name: record_spec.name.clone(),
record_type: record_spec.record_type.clone(),
ttl,
data: Some(data),
})
.await?;
}
if prune {
let desired: HashSet<(String, String)> = spec
.records
.iter()
.map(|record| {
(
record.name.clone(),
normalize_name(&record.record_type),
)
})
.collect();
for (key, record) in existing {
if !desired.contains(&key) {
info!("Deleting record {} {}", record.record_type, record.name);
client
.delete_record(DeleteRecordRequest { id: record.id })
.await?;
}
}
}
Ok(())
}
async fn prune_zones(
client: &mut ZoneServiceClient<Channel>,
desired_scopes: &HashMap<(String, String), HashSet<String>>,
) -> Result<()> {
for ((org_id, project_id), desired_names) in desired_scopes {
let zones = list_zones(client, org_id, project_id, None).await?;
for zone in zones {
if !desired_names.contains(&zone.name) {
info!("Deleting zone {}", zone.name);
client
.delete_zone(DeleteZoneRequest {
id: zone.id,
force: true,
})
.await?;
}
}
}
Ok(())
}
async fn ensure_reverse_zones(
client: &mut ReverseZoneServiceClient<Channel>,
specs: &[ReverseZoneSpec],
prune: bool,
) -> Result<()> {
let mut scopes: HashMap<(String, String), Vec<&ReverseZoneSpec>> = HashMap::new();
for spec in specs {
let scope = dns_scope(&spec.org_id, spec.project_id.as_deref());
scopes.entry(scope).or_default().push(spec);
}
for ((org_id, project_id), scoped_specs) in scopes {
let project_id_opt = if project_id.is_empty() {
None
} else {
Some(project_id.as_str())
};
let existing = list_reverse_zones(client, &org_id, project_id_opt).await?;
for spec in &scoped_specs {
let desired_ttl = spec.ttl.unwrap_or(3600);
if let Some(zone) = existing.iter().find(|zone| zone.cidr == spec.cidr) {
if zone.ptr_pattern != spec.ptr_pattern || zone.ttl != desired_ttl {
info!("Recreating reverse zone {}", spec.cidr);
client
.delete_reverse_zone(DeleteReverseZoneRequest {
zone_id: zone.id.clone(),
})
.await?;
client
.create_reverse_zone(CreateReverseZoneRequest {
org_id: spec.org_id.clone(),
project_id: spec.project_id.clone(),
cidr: spec.cidr.clone(),
ptr_pattern: spec.ptr_pattern.clone(),
ttl: desired_ttl,
})
.await?;
}
continue;
}
info!("Creating reverse zone {}", spec.cidr);
client
.create_reverse_zone(CreateReverseZoneRequest {
org_id: spec.org_id.clone(),
project_id: spec.project_id.clone(),
cidr: spec.cidr.clone(),
ptr_pattern: spec.ptr_pattern.clone(),
ttl: desired_ttl,
})
.await?;
}
if prune {
let desired_cidrs: HashSet<String> =
scoped_specs.iter().map(|spec| spec.cidr.clone()).collect();
for zone in existing {
if !desired_cidrs.contains(&zone.cidr) {
info!("Deleting reverse zone {}", zone.cidr);
client
.delete_reverse_zone(DeleteReverseZoneRequest { zone_id: zone.id })
.await?;
}
}
}
}
Ok(())
}
async fn list_zones(
client: &mut ZoneServiceClient<Channel>,
org_id: &str,
project_id: &str,
name_filter: Option<&str>,
) -> Result<Vec<ZoneInfo>> {
let response = client
.list_zones(flashdns_api::proto::ListZonesRequest {
org_id: org_id.to_string(),
project_id: project_id.to_string(),
name_filter: name_filter.unwrap_or_default().to_string(),
page_size: 1000,
page_token: String::new(),
})
.await?
.into_inner();
Ok(response.zones)
}
async fn list_reverse_zones(
client: &mut ReverseZoneServiceClient<Channel>,
org_id: &str,
project_id: Option<&str>,
) -> Result<Vec<ReverseZone>> {
let response = client
.list_reverse_zones(ListReverseZonesRequest {
org_id: org_id.to_string(),
project_id: project_id.map(|value| value.to_string()),
})
.await?
.into_inner();
Ok(response.zones)
}
async fn list_records(
client: &mut RecordServiceClient<Channel>,
zone_id: &str,
) -> Result<Vec<RecordInfo>> {
let response = client
.list_records(flashdns_api::proto::ListRecordsRequest {
zone_id: zone_id.to_string(),
name_filter: String::new(),
type_filter: String::new(),
page_size: 1000,
page_token: String::new(),
})
.await?
.into_inner();
Ok(response.records)
}
fn record_data_from_spec(record_type: &str, data: &serde_json::Value) -> Result<RecordData> {
let record_type = normalize_name(record_type);
let map = data
.as_object()
.context("record data must be an object")?;
let record_data = match record_type.as_str() {
"a" => record_data::Data::A(ARecord {
address: get_string(map, "address")?,
}),
"aaaa" => record_data::Data::Aaaa(AaaaRecord {
address: get_string(map, "address")?,
}),
"cname" => record_data::Data::Cname(CnameRecord {
target: get_string(map, "target")?,
}),
"mx" => record_data::Data::Mx(MxRecord {
preference: get_u32(map, "preference")?,
exchange: get_string(map, "exchange")?,
}),
"txt" => record_data::Data::Txt(TxtRecord {
text: get_string(map, "text")?,
}),
"srv" => record_data::Data::Srv(SrvRecord {
priority: get_u32(map, "priority")?,
weight: get_u32(map, "weight")?,
port: get_u32(map, "port")?,
target: get_string(map, "target")?,
}),
"ns" => record_data::Data::Ns(NsRecord {
nameserver: get_string(map, "nameserver")?,
}),
"ptr" => record_data::Data::Ptr(PtrRecord {
target: get_string(map, "target")?,
}),
"caa" => record_data::Data::Caa(CaaRecord {
flags: get_u32(map, "flags")?,
tag: get_string(map, "tag")?,
value: get_string(map, "value")?,
}),
_ => return Err(anyhow::anyhow!("unsupported record type {}", record_type)),
};
Ok(RecordData {
data: Some(record_data),
})
}
fn get_string(map: &serde_json::Map<String, serde_json::Value>, key: &str) -> Result<String> {
map.get(key)
.and_then(|value| value.as_str())
.map(|value| value.to_string())
.with_context(|| format!("record data missing {}", key))
}
fn get_u32(map: &serde_json::Map<String, serde_json::Value>, key: &str) -> Result<u32> {
map.get(key)
.and_then(|value| value.as_u64())
.map(|value| value as u32)
.with_context(|| format!("record data missing {}", key))
}