From aba1b73d5beb31505b7549d45e70e4694959254a Mon Sep 17 00:00:00 2001
From: centra
Date: Tue, 31 Mar 2026 10:03:40 +0900
Subject: [PATCH] Tighten cluster builds and add record-set DNS publication
---
.cargo/config.toml | 2 +
.github/workflows/nix.yml | 11 +-
Makefile | 6 +-
deployer/crates/deployer-types/src/lib.rs | 10 +
.../crates/fleet-scheduler/src/publish.rs | 321 +++++++++++++-----
flake.nix | 290 +++++++++++++---
.../crates/flashdns-server/src/metadata.rs | 136 ++++++--
iam/crates/iam-server/src/rest.rs | 6 +-
.../crates/nightlight-server/src/query.rs | 42 +++
.../crates/nightlight-server/src/storage.rs | 1 +
nix-nos/lib/cluster-config-lib.nix | 2 +-
nix/ci/flake.nix | 70 +++-
nix/ci/workspaces.json | 1 -
nix/test-cluster/common.nix | 8 +
nix/test-cluster/node01.nix | 1 +
nix/test-cluster/node02.nix | 1 +
nix/test-cluster/node03.nix | 1 +
nix/test-cluster/run-cluster.sh | 74 +++-
nix/test-cluster/storage-node01.nix | 1 +
nix/test-cluster/storage-node02.nix | 1 +
nix/test-cluster/storage-node03.nix | 1 +
scripts/ci_changed_workspaces.py | 140 +++++++-
22 files changed, 925 insertions(+), 201 deletions(-)
create mode 100644 .cargo/config.toml
diff --git a/.cargo/config.toml b/.cargo/config.toml
new file mode 100644
index 0000000..013d047
--- /dev/null
+++ b/.cargo/config.toml
@@ -0,0 +1,2 @@
+[build]
+target-dir = "work/cargo-target"
diff --git a/.github/workflows/nix.yml b/.github/workflows/nix.yml
index 9f87315..1770d79 100644
--- a/.github/workflows/nix.yml
+++ b/.github/workflows/nix.yml
@@ -17,6 +17,7 @@ jobs:
any_changed: ${{ steps.detect.outputs.any_changed }}
build_changed: ${{ steps.detect.outputs.build_changed }}
global_changed: ${{ steps.detect.outputs.global_changed }}
+ shared_crates: ${{ steps.detect.outputs.shared_crates }}
shared_crates_changed: ${{ steps.detect.outputs.shared_crates_changed }}
steps:
- uses: actions/checkout@v4
@@ -81,15 +82,19 @@ jobs:
needs: filter
if: ${{ needs.filter.outputs.shared_crates_changed == 'true' }}
runs-on: ubuntu-latest
- name: gate (shared crates)
+ strategy:
+ fail-fast: false
+ matrix:
+ crate: ${{ fromJSON(needs.filter.outputs.shared_crates) }}
+ name: gate (shared crate: ${{ matrix.crate }})
steps:
- uses: actions/checkout@v4
- uses: DeterminateSystems/nix-installer-action@v11
- uses: DeterminateSystems/magic-nix-cache-action@v8
- - name: Run Shared Crates Gate
+ - name: Run Shared Crate Gate
run: |
- nix run ./nix/ci#gate-ci -- --shared-crates --tier 0 --no-logs
+ nix run ./nix/ci#gate-ci -- --shared-crate ${{ matrix.crate }} --tier 0 --no-logs
# Build server packages (tier 1+)
build:
diff --git a/Makefile b/Makefile
index 6f994ac..a880eba 100644
--- a/Makefile
+++ b/Makefile
@@ -3,9 +3,11 @@
.PHONY: all build cluster-up cluster-down cluster-status cluster-validate cluster-smoke cluster-matrix cluster-bench-storage clean
-# Build all services (using Nix)
+PACKAGE ?= default
+
+# Build a single package by default; set PACKAGE=default to build the full bundle.
build:
- nix build .#packages.x86_64-linux.default
+ nix build .#$(PACKAGE)
# Cluster Management
cluster-up:
diff --git a/deployer/crates/deployer-types/src/lib.rs b/deployer/crates/deployer-types/src/lib.rs
index 0f7bf75..ae5c125 100644
--- a/deployer/crates/deployer-types/src/lib.rs
+++ b/deployer/crates/deployer-types/src/lib.rs
@@ -496,6 +496,8 @@ pub enum DnsPublishMode {
LoadBalancer,
/// Publish the first healthy instance IP directly.
Direct,
+ /// Publish all healthy instance IPs directly as a DNS record set.
+ DirectMulti,
}
/// Desired DNS publication for a service.
@@ -894,9 +896,15 @@ pub struct PublishedLoadBalancerState {
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct PublishedDnsRecordState {
pub zone_id: String,
+ #[serde(default)]
pub record_id: String,
+ #[serde(default)]
+ pub record_ids: Vec,
pub fqdn: String,
+ #[serde(default)]
pub value: String,
+ #[serde(default)]
+ pub values: Vec,
}
/// Observed publication state stored separately from ServiceSpec desired state.
@@ -1215,8 +1223,10 @@ mod tests {
dns: Some(PublishedDnsRecordState {
zone_id: "zone-1".to_string(),
record_id: "record-1".to_string(),
+ record_ids: vec!["record-1".to_string()],
fqdn: "api.test.cluster.local".to_string(),
value: "10.0.0.50".to_string(),
+ values: vec!["10.0.0.50".to_string()],
}),
observed_at: None,
};
diff --git a/deployer/crates/fleet-scheduler/src/publish.rs b/deployer/crates/fleet-scheduler/src/publish.rs
index be41e0f..9012585 100644
--- a/deployer/crates/fleet-scheduler/src/publish.rs
+++ b/deployer/crates/fleet-scheduler/src/publish.rs
@@ -1,4 +1,4 @@
-use std::collections::{HashMap, HashSet};
+use std::collections::{BTreeMap, HashMap, HashSet};
use anyhow::{Context, Result};
use chainfire_client::Client;
@@ -354,24 +354,33 @@ impl PublicationController {
return Ok(existing.cloned());
};
- let Some(value) = desired_dns_value(spec, healthy_instances, load_balancer) else {
+ let desired_values = desired_dns_values(spec, healthy_instances, load_balancer);
+ if desired_values.is_empty() {
if let Some(existing) = existing {
self.cleanup_dns(auth_token, existing).await?;
}
return Ok(None);
- };
+ }
let zone_name = normalize_zone_name(&spec.zone);
let record_name = record_name_for_service(spec, service);
let fqdn = format!("{}.{}", record_name, zone_name);
+ let primary_value = desired_values.first().cloned().unwrap_or_default();
if self.config.dry_run {
- info!(service = %service.name, fqdn = %fqdn, value = %value, "would reconcile native DNS record");
+ info!(
+ service = %service.name,
+ fqdn = %fqdn,
+ values = ?desired_values,
+ "would reconcile native DNS record set"
+ );
return Ok(existing.cloned().or(Some(PublishedDnsRecordState {
zone_id: String::new(),
record_id: String::new(),
+ record_ids: Vec::new(),
fqdn,
- value,
+ value: primary_value,
+ values: desired_values,
})));
}
@@ -380,22 +389,28 @@ impl PublicationController {
let zone =
ensure_zone(&mut zone_client, auth_token, &zone_name, org_id, project_id).await?;
- let record = ensure_record(
+ let records = ensure_records(
&mut record_client,
auth_token,
existing,
&zone.id,
&record_name,
spec.ttl,
- &value,
+ &desired_values,
)
.await?;
+ let record_ids = records
+ .iter()
+ .map(|record| record.id.clone())
+ .collect::>();
Ok(Some(PublishedDnsRecordState {
zone_id: zone.id,
- record_id: record.id,
+ record_id: record_ids.first().cloned().unwrap_or_default(),
+ record_ids,
fqdn,
- value,
+ value: primary_value,
+ values: desired_values,
}))
}
@@ -408,18 +423,25 @@ impl PublicationController {
return Ok(());
};
let mut record_client = RecordServiceClient::connect(endpoint.clone()).await?;
- match record_client
- .delete_record(authorized_request(
- DeleteRecordRequest {
- id: dns_state.record_id.clone(),
- },
- auth_token,
- ))
- .await
- {
- Ok(_) => {}
- Err(status) if status.code() == Code::NotFound => {}
- Err(status) => return Err(status.into()),
+ let mut record_ids = dns_state.record_ids.clone();
+ if record_ids.is_empty() && !dns_state.record_id.is_empty() {
+ record_ids.push(dns_state.record_id.clone());
+ }
+ record_ids.sort();
+ record_ids.dedup();
+
+ for record_id in record_ids {
+ match record_client
+ .delete_record(authorized_request(
+ DeleteRecordRequest { id: record_id },
+ auth_token,
+ ))
+ .await
+ {
+ Ok(_) => {}
+ Err(status) if status.code() == Code::NotFound => {}
+ Err(status) => return Err(status.into()),
+ }
}
Ok(())
}
@@ -812,15 +834,15 @@ async fn ensure_zone(
.context("FlashDNS returned empty CreateZone response")?)
}
-async fn ensure_record(
+async fn ensure_records(
client: &mut RecordServiceClient,
auth_token: &str,
existing: Option<&PublishedDnsRecordState>,
zone_id: &str,
name: &str,
ttl: u32,
- value: &str,
-) -> Result {
+ desired_values: &[String],
+) -> Result> {
let records = client
.list_records(authorized_request(
ListRecordsRequest {
@@ -839,73 +861,96 @@ async fn ensure_record(
let mut matching = records
.iter()
.filter(|record| {
- existing.map(|state| state.record_id.as_str()) == Some(record.id.as_str())
- || record.name == name
+ record.name == name
+ || existing.map(|state| state.record_id.as_str()) == Some(record.id.as_str())
+ || existing
+ .map(|state| state.record_ids.iter().any(|id| id == &record.id))
+ .unwrap_or(false)
})
.cloned()
.collect::>();
+ matching.sort_by(|lhs, rhs| {
+ record_a_value(lhs)
+ .cmp(&record_a_value(rhs))
+ .then_with(|| lhs.id.cmp(&rhs.id))
+ });
- if let Some(record) = matching.first().cloned() {
- let record_value = record
- .data
- .as_ref()
- .and_then(|data| data.data.as_ref())
- .and_then(|data| match data {
- record_data::Data::A(record) => Some(record.address.clone()),
- _ => None,
- });
+ let mut records_by_value: BTreeMap> = BTreeMap::new();
+ for record in matching {
+ let Some(value) = record_a_value(&record) else {
+ continue;
+ };
+ records_by_value.entry(value).or_default().push(record);
+ }
- if record_value.as_deref() != Some(value) || record.ttl != ttl {
- let updated = client
- .update_record(authorized_request(
- UpdateRecordRequest {
- id: record.id.clone(),
- ttl: Some(ttl),
+ let mut ensured = Vec::new();
+ for desired_value in desired_values {
+ if let Some(record) = records_by_value.get_mut(desired_value).and_then(|records| {
+ if records.is_empty() {
+ None
+ } else {
+ Some(records.remove(0))
+ }
+ }) {
+ if record.ttl != ttl || !record.enabled {
+ let updated = client
+ .update_record(authorized_request(
+ UpdateRecordRequest {
+ id: record.id.clone(),
+ ttl: Some(ttl),
+ data: Some(RecordData {
+ data: Some(record_data::Data::A(ARecord {
+ address: desired_value.to_string(),
+ })),
+ }),
+ enabled: Some(true),
+ },
+ auth_token,
+ ))
+ .await?
+ .into_inner()
+ .record
+ .context("FlashDNS returned empty UpdateRecord response")?;
+ ensured.push(updated);
+ } else {
+ ensured.push(record);
+ }
+ continue;
+ }
+
+ ensured.push(
+ client
+ .create_record(authorized_request(
+ CreateRecordRequest {
+ zone_id: zone_id.to_string(),
+ name: name.to_string(),
+ record_type: "A".to_string(),
+ ttl,
data: Some(RecordData {
data: Some(record_data::Data::A(ARecord {
- address: value.to_string(),
+ address: desired_value.to_string(),
})),
}),
- enabled: Some(true),
},
auth_token,
))
.await?
.into_inner()
.record
- .context("FlashDNS returned empty UpdateRecord response")?;
- matching.remove(0);
- for extra in matching {
- delete_record(client, auth_token, &extra.id).await?;
- }
- return Ok(updated);
- }
-
- for extra in matching.into_iter().skip(1) {
- delete_record(client, auth_token, &extra.id).await?;
- }
- return Ok(record);
+ .context("FlashDNS returned empty CreateRecord response")?,
+ );
}
- Ok(client
- .create_record(authorized_request(
- CreateRecordRequest {
- zone_id: zone_id.to_string(),
- name: name.to_string(),
- record_type: "A".to_string(),
- ttl,
- data: Some(RecordData {
- data: Some(record_data::Data::A(ARecord {
- address: value.to_string(),
- })),
- }),
- },
- auth_token,
- ))
- .await?
- .into_inner()
- .record
- .context("FlashDNS returned empty CreateRecord response")?)
+ for extra in records_by_value.into_values().flatten() {
+ delete_record(client, auth_token, &extra.id).await?;
+ }
+
+ ensured.sort_by(|lhs, rhs| {
+ record_a_value(lhs)
+ .cmp(&record_a_value(rhs))
+ .then_with(|| lhs.id.cmp(&rhs.id))
+ });
+ Ok(ensured)
}
async fn delete_record(
@@ -940,23 +985,54 @@ fn resolve_target_port(service: &ServiceSpec, spec: &LoadBalancerPublicationSpec
.or_else(|| service.ports.as_ref().and_then(|ports| ports.grpc))
}
-fn desired_dns_value(
+fn record_a_value(record: &RecordInfo) -> Option {
+ record
+ .data
+ .as_ref()
+ .and_then(|data| data.data.as_ref())
+ .and_then(|data| match data {
+ record_data::Data::A(record) => Some(record.address.clone()),
+ _ => None,
+ })
+}
+
+fn normalize_dns_values(values: impl IntoIterator- ) -> Vec {
+ let mut values = values
+ .into_iter()
+ .map(|value| value.trim().to_string())
+ .filter(|value| !value.is_empty())
+ .collect::>();
+ values.sort();
+ values.dedup();
+ values
+}
+
+fn desired_dns_values(
spec: &DnsPublicationSpec,
healthy_instances: &[ServiceInstanceSpec],
load_balancer: Option<&PublishedLoadBalancerState>,
-) -> Option {
+) -> Vec {
match spec.mode {
- DnsPublishMode::LoadBalancer => load_balancer
- .and_then(|state| state.vip_address.clone())
- .filter(|value| !value.is_empty() && value != "0.0.0.0")
- .or_else(|| {
- healthy_instances
- .first()
- .map(|instance| instance.ip.clone())
- }),
- DnsPublishMode::Direct => healthy_instances
- .first()
- .map(|instance| instance.ip.clone()),
+ DnsPublishMode::LoadBalancer => normalize_dns_values(
+ load_balancer
+ .and_then(|state| state.vip_address.clone())
+ .filter(|value| !value.is_empty() && value != "0.0.0.0")
+ .or_else(|| {
+ healthy_instances
+ .first()
+ .map(|instance| instance.ip.clone())
+ })
+ .into_iter(),
+ ),
+ DnsPublishMode::Direct => normalize_dns_values(
+ healthy_instances
+ .first()
+ .map(|instance| instance.ip.clone())
+ .into_iter(),
+ ),
+ DnsPublishMode::DirectMulti => {
+ normalize_dns_values(healthy_instances.iter().map(|instance| instance.ip.clone()))
+ }
}
}
@@ -1153,7 +1229,7 @@ mod tests {
}
#[test]
- fn test_dns_value_falls_back_to_healthy_instance_when_vip_missing() {
+ fn test_dns_values_fall_back_to_healthy_instance_when_vip_missing() {
let spec = DnsPublicationSpec {
zone: "native.cluster.test".to_string(),
name: Some("api".to_string()),
@@ -1178,8 +1254,73 @@ mod tests {
}];
assert_eq!(
- desired_dns_value(&spec, &instances, None).as_deref(),
- Some("10.0.0.11")
+ desired_dns_values(&spec, &instances, None),
+ vec!["10.0.0.11".to_string()]
+ );
+ }
+
+ #[test]
+ fn test_direct_multi_dns_publishes_all_healthy_instance_ips() {
+ let spec = DnsPublicationSpec {
+ zone: "native.cluster.test".to_string(),
+ name: Some("daemon".to_string()),
+ ttl: 60,
+ mode: DnsPublishMode::DirectMulti,
+ };
+ let instances = vec![
+ ServiceInstanceSpec {
+ instance_id: "daemon-node02".to_string(),
+ service: "daemon".to_string(),
+ node_id: "node02".to_string(),
+ ip: "10.0.0.12".to_string(),
+ port: 8080,
+ mesh_port: None,
+ version: None,
+ health_check: None,
+ process: None,
+ container: None,
+ managed_by: None,
+ state: Some("healthy".to_string()),
+ last_heartbeat: None,
+ observed_at: None,
+ },
+ ServiceInstanceSpec {
+ instance_id: "daemon-node01".to_string(),
+ service: "daemon".to_string(),
+ node_id: "node01".to_string(),
+ ip: "10.0.0.11".to_string(),
+ port: 8080,
+ mesh_port: None,
+ version: None,
+ health_check: None,
+ process: None,
+ container: None,
+ managed_by: None,
+ state: Some("healthy".to_string()),
+ last_heartbeat: None,
+ observed_at: None,
+ },
+ ServiceInstanceSpec {
+ instance_id: "daemon-node03".to_string(),
+ service: "daemon".to_string(),
+ node_id: "node03".to_string(),
+ ip: "10.0.0.11".to_string(),
+ port: 8080,
+ mesh_port: None,
+ version: None,
+ health_check: None,
+ process: None,
+ container: None,
+ managed_by: None,
+ state: Some("healthy".to_string()),
+ last_heartbeat: None,
+ observed_at: None,
+ },
+ ];
+
+ assert_eq!(
+ desired_dns_values(&spec, &instances, None),
+ vec!["10.0.0.11".to_string(), "10.0.0.12".to_string()]
);
}
diff --git a/flake.nix b/flake.nix
index 34aad00..3a42ff4 100644
--- a/flake.nix
+++ b/flake.nix
@@ -71,37 +71,160 @@
clusterPython = pkgs.python3.withPackages (ps: [ ps.python-snappy ]);
- # Keep package builds stable even when docs or archived assets change.
- repoSrc = pkgs.lib.cleanSourceWith {
- src = ./.;
- filter = path: type:
- let
- rel = pkgs.lib.removePrefix ((toString ./. ) + "/") (toString path);
- topLevel = builtins.head (pkgs.lib.splitString "/" rel);
- includedTopLevels = [
- "apigateway"
- "chainfire"
- "coronafs"
- "crates"
- "creditservice"
- "deployer"
- "fiberlb"
- "flashdns"
- "flaredb"
- "iam"
- "k8shost"
- "lightningstor"
- "mtls-agent"
- "nightlight"
- "plasmavmc"
- "prismnet"
- ];
- in
- rel == ""
- || builtins.elem rel [ "flake.nix" "flake.lock" ]
- || builtins.elem topLevel includedTopLevels;
+ # Keep Rust package builds stable without invalidating every package on
+ # unrelated workspace changes.
+ workspaceSourceRoots = {
+ chainfire = [ "chainfire" ];
+ flaredb = [ "flaredb" ];
+ iam = [
+ "apigateway"
+ "chainfire"
+ "creditservice"
+ "crates/photon-auth-client"
+ "crates/photon-config"
+ "crates/photon-runtime"
+ "crates/photon-state"
+ "flaredb"
+ "iam"
+ ];
+ coronafs = [ "coronafs" ];
+ plasmavmc = [
+ "apigateway"
+ "chainfire"
+ "creditservice"
+ "crates/photon-auth-client"
+ "crates/photon-config"
+ "crates/photon-runtime"
+ "crates/photon-state"
+ "flaredb"
+ "iam"
+ "lightningstor"
+ "plasmavmc"
+ "prismnet"
+ ];
+ prismnet = [
+ "apigateway"
+ "chainfire"
+ "creditservice"
+ "crates/photon-auth-client"
+ "crates/photon-config"
+ "crates/photon-runtime"
+ "crates/photon-state"
+ "flaredb"
+ "iam"
+ "prismnet"
+ ];
+ flashdns = [
+ "apigateway"
+ "chainfire"
+ "creditservice"
+ "crates/photon-auth-client"
+ "crates/photon-config"
+ "crates/photon-runtime"
+ "crates/photon-state"
+ "flashdns"
+ "flaredb"
+ "iam"
+ ];
+ fiberlb = [
+ "apigateway"
+ "chainfire"
+ "creditservice"
+ "crates/photon-auth-client"
+ "crates/photon-config"
+ "crates/photon-runtime"
+ "crates/photon-state"
+ "fiberlb"
+ "flaredb"
+ "iam"
+ ];
+ lightningstor = [
+ "apigateway"
+ "chainfire"
+ "creditservice"
+ "crates/photon-auth-client"
+ "crates/photon-config"
+ "crates/photon-runtime"
+ "crates/photon-state"
+ "flaredb"
+ "iam"
+ "lightningstor"
+ ];
+ nightlight = [ "nightlight" ];
+ creditservice = [
+ "apigateway"
+ "chainfire"
+ "creditservice"
+ "crates/photon-auth-client"
+ "crates/photon-config"
+ "crates/photon-runtime"
+ "crates/photon-state"
+ "flaredb"
+ "iam"
+ ];
+ apigateway = [
+ "apigateway"
+ "chainfire"
+ "creditservice"
+ "crates/photon-auth-client"
+ "crates/photon-config"
+ "crates/photon-runtime"
+ "crates/photon-state"
+ "flaredb"
+ "iam"
+ ];
+ k8shost = [
+ "apigateway"
+ "chainfire"
+ "creditservice"
+ "crates/photon-auth-client"
+ "crates/photon-config"
+ "crates/photon-runtime"
+ "crates/photon-state"
+ "fiberlb"
+ "flaredb"
+ "flashdns"
+ "iam"
+ "k8shost"
+ "lightningstor"
+ "plasmavmc"
+ "prismnet"
+ ];
+ deployer = [
+ "apigateway"
+ "chainfire"
+ "creditservice"
+ "crates/photon-auth-client"
+ "crates/photon-config"
+ "crates/photon-runtime"
+ "crates/photon-state"
+ "deployer"
+ "fiberlb"
+ "flaredb"
+ "flashdns"
+ "iam"
+ ];
};
+ mkWorkspaceSrc = workspaceSubdir:
+ let
+ sourceRoots = workspaceSourceRoots.${workspaceSubdir} or [ workspaceSubdir ];
+ in
+ pkgs.lib.cleanSourceWith {
+ src = ./.;
+ filter = path: type:
+ let
+ rel = pkgs.lib.removePrefix ((toString ./. ) + "/") (toString path);
+ in
+ rel == ""
+ || builtins.elem rel [ "flake.nix" "flake.lock" ]
+ || builtins.any (root:
+ rel == root
+ || pkgs.lib.hasPrefix "${root}/" rel
+ || pkgs.lib.hasPrefix "${rel}/" root
+ ) sourceRoots;
+ };
+
flakeBundleSrc = pkgs.lib.cleanSourceWith {
src = ./.;
filter = path: type:
@@ -440,12 +563,12 @@
# workspaceSubdir: subdirectory containing Cargo.toml (e.g., "chainfire")
# mainCrate: optional main crate name if different from workspace
# description: package description for meta
- # doCheck: whether to run tests during build (default: true)
- buildRustWorkspace = { name, workspaceSubdir, mainCrate ? null, description ? "", doCheck ? true }:
+ # doCheck: whether to run tests during build (default: false)
+ buildRustWorkspace = { name, workspaceSubdir, mainCrate ? null, description ? "", doCheck ? false }:
pkgs.rustPlatform.buildRustPackage ({
pname = name;
version = "0.1.0";
- src = repoSrc;
+ src = mkWorkspaceSrc workspaceSubdir;
cargoLock = {
lockFile = ./${workspaceSubdir}/Cargo.lock;
@@ -486,6 +609,43 @@
cargoBuildFlags = [ "-p" mainCrate ];
});
+ # Helper function to build multiple binaries from the same workspace in
+ # one cargo invocation. This is mainly used by the VM cluster builds so
+ # a single host build can satisfy several services from the same
+ # workspace.
+ buildRustWorkspaceBundle = { name, workspaceSubdir, crates, description ? "", doCheck ? false }:
+ pkgs.rustPlatform.buildRustPackage {
+ pname = name;
+ version = "0.1.0";
+ src = mkWorkspaceSrc workspaceSubdir;
+
+ cargoLock = {
+ lockFile = ./${workspaceSubdir}/Cargo.lock;
+ };
+
+ buildAndTestSubdir = workspaceSubdir;
+
+ postUnpack = ''
+ cp $sourceRoot/${workspaceSubdir}/Cargo.lock $sourceRoot/Cargo.lock
+ '';
+
+ nativeBuildInputs = commonNativeBuildInputs;
+ buildInputs = commonBuildInputs;
+
+ inherit (commonEnvVars) LIBCLANG_PATH PROTOC ROCKSDB_LIB_DIR;
+ inherit doCheck;
+
+ cargoBuildFlags = pkgs.lib.concatMap (crate: [ "-p" crate ]) crates;
+
+ meta = with pkgs.lib; {
+ description = description;
+ homepage = "https://github.com/yourorg/plasmacloud";
+ license = licenses.asl20;
+ maintainers = [ ];
+ platforms = platforms.linux;
+ };
+ };
+
in
{
# ======================================================================
@@ -658,6 +818,16 @@
description = "LightningStor distributed storage node daemon";
};
+ lightningstor-workspace = buildRustWorkspaceBundle {
+ name = "lightningstor-workspace";
+ workspaceSubdir = "lightningstor";
+ crates = [
+ "lightningstor-server"
+ "lightningstor-node"
+ ];
+ description = "Combined LightningStor server and node workspace build";
+ };
+
# --------------------------------------------------------------------
# NightLight: Prometheus-compatible Metrics Store
# --------------------------------------------------------------------
@@ -768,6 +938,20 @@
description = "Label-aware service scheduler for PhotonCloud bare-metal fleets";
};
+ deployer-workspace = buildRustWorkspaceBundle {
+ name = "deployer-workspace";
+ workspaceSubdir = "deployer";
+ crates = [
+ "deployer-server"
+ "deployer-ctl"
+ "node-agent"
+ "nix-agent"
+ "plasmacloud-reconciler"
+ "fleet-scheduler"
+ ];
+ description = "Combined deployer workspace build for cluster images and checks";
+ };
+
vmClusterDeployerState =
self.nixosConfigurations.node01.config.system.build.plasmacloudDeployerClusterState;
@@ -787,18 +971,12 @@
self.packages.${system}.prismnet-server
self.packages.${system}.flashdns-server
self.packages.${system}.fiberlb-server
- self.packages.${system}.lightningstor-server
- self.packages.${system}.lightningstor-node
+ self.packages.${system}.lightningstor-workspace
self.packages.${system}.nightlight-server
self.packages.${system}.creditservice-server
self.packages.${system}.apigateway-server
self.packages.${system}.k8shost-server
- self.packages.${system}.deployer-server
- self.packages.${system}.deployer-ctl
- self.packages.${system}.plasmacloud-reconciler
- self.packages.${system}.nix-agent
- self.packages.${system}.node-agent
- self.packages.${system}.fleet-scheduler
+ self.packages.${system}.deployer-workspace
self.packages.${system}.vmClusterDeployerState
];
};
@@ -970,9 +1148,9 @@
PHOTONCLOUD_CHAINFIRE_SERVER_BIN =
"${self.packages.${system}.chainfire-server}/bin/chainfire";
PHOTONCLOUD_DEPLOYER_SERVER_BIN =
- "${self.packages.${system}.deployer-server}/bin/deployer-server";
+ "${self.packages.${system}.deployer-workspace}/bin/deployer-server";
PHOTONCLOUD_DEPLOYER_CTL_BIN =
- "${self.packages.${system}.deployer-ctl}/bin/deployer-ctl";
+ "${self.packages.${system}.deployer-workspace}/bin/deployer-ctl";
} ''
export HOME="$TMPDIR/home"
mkdir -p "$HOME"
@@ -1007,9 +1185,9 @@
PHOTONCLOUD_CHAINFIRE_SERVER_BIN =
"${self.packages.${system}.chainfire-server}/bin/chainfire";
PHOTONCLOUD_DEPLOYER_CTL_BIN =
- "${self.packages.${system}.deployer-ctl}/bin/deployer-ctl";
+ "${self.packages.${system}.deployer-workspace}/bin/deployer-ctl";
PHOTONCLOUD_PLASMACLOUD_RECONCILER_BIN =
- "${self.packages.${system}.plasmacloud-reconciler}/bin/plasmacloud-reconciler";
+ "${self.packages.${system}.deployer-workspace}/bin/plasmacloud-reconciler";
} ''
export HOME="$TMPDIR/home"
mkdir -p "$HOME"
@@ -1044,11 +1222,11 @@
PHOTONCLOUD_CHAINFIRE_SERVER_BIN =
"${self.packages.${system}.chainfire-server}/bin/chainfire";
PHOTONCLOUD_DEPLOYER_CTL_BIN =
- "${self.packages.${system}.deployer-ctl}/bin/deployer-ctl";
+ "${self.packages.${system}.deployer-workspace}/bin/deployer-ctl";
PHOTONCLOUD_NODE_AGENT_BIN =
- "${self.packages.${system}.node-agent}/bin/node-agent";
+ "${self.packages.${system}.deployer-workspace}/bin/node-agent";
PHOTONCLOUD_FLEET_SCHEDULER_BIN =
- "${self.packages.${system}.fleet-scheduler}/bin/fleet-scheduler";
+ "${self.packages.${system}.deployer-workspace}/bin/fleet-scheduler";
} ''
export HOME="$TMPDIR/home"
mkdir -p "$HOME"
@@ -1229,19 +1407,21 @@
prismnet-server = self.packages.${final.system}.prismnet-server;
flashdns-server = self.packages.${final.system}.flashdns-server;
fiberlb-server = self.packages.${final.system}.fiberlb-server;
- lightningstor-server = self.packages.${final.system}.lightningstor-server;
- lightningstor-node = self.packages.${final.system}.lightningstor-node;
+ lightningstor-workspace = self.packages.${final.system}.lightningstor-workspace;
+ lightningstor-server = self.packages.${final.system}.lightningstor-workspace;
+ lightningstor-node = self.packages.${final.system}.lightningstor-workspace;
nightlight-server = self.packages.${final.system}.nightlight-server;
creditservice-server = self.packages.${final.system}.creditservice-server;
apigateway-server = self.packages.${final.system}.apigateway-server;
k8shost-server = self.packages.${final.system}.k8shost-server;
- deployer-server = self.packages.${final.system}.deployer-server;
- deployer-ctl = self.packages.${final.system}.deployer-ctl;
- plasmacloud-reconciler = self.packages.${final.system}.plasmacloud-reconciler;
+ deployer-workspace = self.packages.${final.system}.deployer-workspace;
+ deployer-server = self.packages.${final.system}.deployer-workspace;
+ deployer-ctl = self.packages.${final.system}.deployer-workspace;
+ plasmacloud-reconciler = self.packages.${final.system}.deployer-workspace;
plasmacloudFlakeBundle = self.packages.${final.system}.plasmacloudFlakeBundle;
- nix-agent = self.packages.${final.system}.nix-agent;
- node-agent = self.packages.${final.system}.node-agent;
- fleet-scheduler = self.packages.${final.system}.fleet-scheduler;
+ nix-agent = self.packages.${final.system}.deployer-workspace;
+ node-agent = self.packages.${final.system}.deployer-workspace;
+ fleet-scheduler = self.packages.${final.system}.deployer-workspace;
};
};
}
diff --git a/flashdns/crates/flashdns-server/src/metadata.rs b/flashdns/crates/flashdns-server/src/metadata.rs
index 3a5688d..fb9eff3 100644
--- a/flashdns/crates/flashdns-server/src/metadata.rs
+++ b/flashdns/crates/flashdns-server/src/metadata.rs
@@ -152,7 +152,9 @@ impl DnsMetadataStore {
)
.execute(pool)
.await
- .map_err(|e| MetadataError::Storage(format!("Failed to initialize Postgres schema: {}", e)))?;
+ .map_err(|e| {
+ MetadataError::Storage(format!("Failed to initialize Postgres schema: {}", e))
+ })?;
Ok(())
}
@@ -165,7 +167,9 @@ impl DnsMetadataStore {
)
.execute(pool)
.await
- .map_err(|e| MetadataError::Storage(format!("Failed to initialize SQLite schema: {}", e)))?;
+ .map_err(|e| {
+ MetadataError::Storage(format!("Failed to initialize SQLite schema: {}", e))
+ })?;
Ok(())
}
@@ -192,9 +196,7 @@ impl DnsMetadataStore {
.bind(value)
.execute(pool.as_ref())
.await
- .map_err(|e| {
- MetadataError::Storage(format!("Postgres put failed: {}", e))
- })?;
+ .map_err(|e| MetadataError::Storage(format!("Postgres put failed: {}", e)))?;
}
SqlStorageBackend::Sqlite(pool) => {
sqlx::query(
@@ -395,10 +397,15 @@ impl DnsMetadataStore {
format!("/flashdns/zone_ids/{}", zone_id)
}
- fn record_key(zone_id: &ZoneId, record_name: &str, record_type: RecordType) -> String {
+ fn record_key(
+ zone_id: &ZoneId,
+ record_name: &str,
+ record_type: RecordType,
+ record_id: &RecordId,
+ ) -> String {
format!(
- "/flashdns/records/{}/{}/{}",
- zone_id, record_name, record_type
+ "/flashdns/records/{}/{}/{}/{}",
+ zone_id, record_name, record_type, record_id
)
}
@@ -406,6 +413,20 @@ impl DnsMetadataStore {
format!("/flashdns/records/{}/", zone_id)
}
+ fn record_type_prefix(zone_id: &ZoneId, record_name: &str, record_type: RecordType) -> String {
+ format!(
+ "/flashdns/records/{}/{}/{}/",
+ zone_id, record_name, record_type
+ )
+ }
+
+ fn legacy_record_key(zone_id: &ZoneId, record_name: &str, record_type: RecordType) -> String {
+ format!(
+ "/flashdns/records/{}/{}/{}",
+ zone_id, record_name, record_type
+ )
+ }
+
fn record_id_key(record_id: &RecordId) -> String {
format!("/flashdns/record_ids/{}", record_id)
}
@@ -521,7 +542,18 @@ impl DnsMetadataStore {
/// Save record
pub async fn save_record(&self, record: &Record) -> Result<()> {
- let key = Self::record_key(&record.zone_id, &record.name, record.record_type);
+ let key = Self::record_key(
+ &record.zone_id,
+ &record.name,
+ record.record_type,
+ &record.id,
+ );
+ let id_key = Self::record_id_key(&record.id);
+ if let Some(existing_key) = self.get(&id_key).await? {
+ if existing_key != key {
+ self.delete_key(&existing_key).await?;
+ }
+ }
let value = serde_json::to_string(record).map_err(|e| {
MetadataError::Serialization(format!("Failed to serialize record: {}", e))
})?;
@@ -529,29 +561,40 @@ impl DnsMetadataStore {
self.put(&key, &value).await?;
// Also save record ID mapping
- let id_key = Self::record_id_key(&record.id);
self.put(&id_key, &key).await?;
Ok(())
}
- /// Load record by name and type
+ /// Load the first record by name and type, preserving compatibility with
+ /// older single-record keys.
pub async fn load_record(
&self,
zone_id: &ZoneId,
record_name: &str,
record_type: RecordType,
) -> Result