From b8ebd24d4e9b2dbe71e34ba09b77092dfa7dd43c Mon Sep 17 00:00:00 2001
From: centra
Date: Sat, 4 Apr 2026 16:33:03 +0900
Subject: [PATCH] =?UTF-8?q?nix-nos=E5=89=8A=E9=99=A4?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.github/workflows/nix.yml | 4 +-
CONTRIBUTING.md | 2 +-
Makefile | 2 +-
README.md | 6 +-
apigateway/Cargo.toml | 4 +-
baremetal/image-builder/build-images.sh | 32 +-
.../vm-cluster/legacy/alpine-ssh-setup.sh | 4 +-
.../vm-cluster/legacy/launch-node01-disk.sh | 2 +-
.../vm-cluster/legacy/launch-node01-dual.sh | 2 +-
.../legacy/launch-node01-from-disk.sh | 2 +-
.../vm-cluster/legacy/launch-node01-iso.sh | 4 +-
.../legacy/launch-node01-netboot.sh | 2 +-
.../vm-cluster/legacy/launch-node01-vde.sh | 2 +-
.../vm-cluster/legacy/launch-node02-alpine.sh | 2 +-
.../vm-cluster/legacy/launch-node02-disk.sh | 2 +-
.../legacy/launch-node02-from-disk.sh | 2 +-
.../vm-cluster/legacy/launch-node02-iso.sh | 2 +-
.../legacy/launch-node02-netboot.sh | 2 +-
.../legacy/launch-node02-recovery.sh | 2 +-
.../vm-cluster/legacy/launch-node02-vde.sh | 2 +-
.../vm-cluster/legacy/launch-node03-disk.sh | 2 +-
.../legacy/launch-node03-from-disk.sh | 2 +-
.../vm-cluster/legacy/launch-node03-iso.sh | 2 +-
.../legacy/launch-node03-netboot.sh | 2 +-
.../legacy/launch-node03-recovery.sh | 2 +-
.../vm-cluster/legacy/launch-node03-vde.sh | 2 +-
.../vm-cluster/legacy/pxe-server-setup.sh | 8 +-
.../vm-cluster/pxe-server/configuration.nix | 4 +-
bin/cloud-cli | 2 +-
.../assets/nixos/control-plane/netboot.ipxe | 4 +-
chainfire/baremetal/pxe-server/ipxe/boot.ipxe | 2 +-
.../baremetal/pxe-server/nixos-module.nix | 6 +-
chainfire/chainfire-client/src/metadata.rs | 2 +-
client-common/Cargo.toml | 4 +-
client-common/src/lib.rs | 2 +-
coronafs/Cargo.toml | 4 +-
coronafs/README.md | 2 +-
crates/photon-auth-client/Cargo.toml | 2 +-
crates/photon-config/Cargo.toml | 2 +-
crates/photon-runtime/Cargo.toml | 2 +-
crates/photon-state/Cargo.toml | 2 +-
creditservice/Cargo.toml | 6 +-
creditservice/README.md | 2 +-
deployer/Cargo.lock | 44 +-
deployer/Cargo.toml | 6 +-
deployer/crates/cert-authority/src/main.rs | 10 +-
deployer/crates/deployer-ctl/src/chainfire.rs | 53 +-
deployer/crates/deployer-ctl/src/main.rs | 12 +-
deployer/crates/deployer-server/src/admin.rs | 210 +-
.../deployer-server/src/bootstrap_assets.rs | 2 +-
.../crates/deployer-server/src/cloud_init.rs | 87 +-
deployer/crates/deployer-server/src/config.rs | 14 +-
deployer/crates/deployer-server/src/lib.rs | 2 +-
.../deployer-server/src/local_storage.rs | 68 +-
.../crates/deployer-server/src/phone_home.rs | 589 ++--
deployer/crates/deployer-server/src/state.rs | 4 +-
.../crates/deployer-server/src/storage.rs | 172 +-
deployer/crates/deployer-server/src/tls.rs | 2 +-
deployer/crates/deployer-types/src/lib.rs | 250 +-
deployer/crates/fleet-scheduler/src/main.rs | 6 +-
deployer/crates/nix-agent/src/main.rs | 26 +-
deployer/crates/node-agent/src/agent.rs | 12 +-
deployer/crates/node-agent/src/main.rs | 12 +-
.../Cargo.toml | 2 +-
.../src/auth.rs | 2 +-
.../src/hosts.rs | 6 +-
.../src/main.rs | 0
.../src/tenant_network.rs | 0
.../src/watcher.rs | 0
.../scripts/verify-deployer-bootstrap-e2e.sh | 64 +-
.../scripts/verify-fleet-scheduler-e2e.sh | 34 +-
deployer/scripts/verify-host-lifecycle-e2e.sh | 34 +-
docs/README.md | 2 +-
docs/component-matrix.md | 2 +-
docs/testing.md | 5 +-
flake.lock | 17 -
flake.nix | 2670 ++++++++---------
.../crates/k8shost-controllers/src/main.rs | 4 +-
.../k8shost-server/src/fiberlb_controller.rs | 4 +-
.../k8shost-server/src/flashdns_controller.rs | 8 +-
k8shost/crates/k8shost-server/src/main.rs | 2 +-
.../k8shost-server/src/services/deployment.rs | 6 +-
k8shost/crates/k8shost-types/src/lib.rs | 2 +-
mtls-agent/src/discovery.rs | 2 +-
mtls-agent/src/main.rs | 4 +-
nix-nos/modules/default.nix | 3 +-
nix-nos/modules/topology.nix | 68 -
nix/ci/flake.lock | 82 +-
nix/ci/flake.nix | 28 +-
nix/ci/workspaces.json | 3 +-
nix/images/netboot-all-in-one.nix | 4 +-
nix/images/netboot-control-plane.nix | 4 +-
nix/images/netboot-worker.nix | 2 +-
...plasmacloud-iso.nix => ultracloud-iso.nix} | 406 +--
.../lib/cluster-schema.nix | 315 +-
nix/modules/apigateway.nix | 2 +-
nix/modules/cluster-config-lib.nix | 2 +-
nix/modules/default.nix | 8 +-
nix/modules/deployer.nix | 12 +-
nix/modules/first-boot-automation.nix | 76 +-
nix/modules/fleet-scheduler.nix | 6 +-
nix/modules/install-target.nix | 10 +-
nix/modules/nix-agent.nix | 6 +-
.../nix-nos/cluster-config-generator.nix | 36 -
nix/modules/nix-nos/example-topology.nix | 94 -
nix/modules/nix-nos/topology.nix | 3 -
nix/modules/node-agent.nix | 8 +-
nix/modules/service-port-reservations.nix | 2 +-
...oud-cluster.nix => ultracloud-cluster.nix} | 97 +-
...oud-network.nix => ultracloud-network.nix} | 69 +-
...resources.nix => ultracloud-resources.nix} | 44 +-
...g.nix => ultracloud-tenant-networking.nix} | 24 +-
nix/nodes/vm-cluster/cluster.nix | 132 +-
nix/nodes/vm-cluster/common-disko.nix | 33 +
nix/nodes/vm-cluster/lib.nix | 240 ++
nix/nodes/vm-cluster/node01/configuration.nix | 47 +-
nix/nodes/vm-cluster/node01/disko.nix | 32 +-
nix/nodes/vm-cluster/node02/configuration.nix | 57 +-
nix/nodes/vm-cluster/node02/disko.nix | 32 +-
nix/nodes/vm-cluster/node03/configuration.nix | 57 +-
nix/nodes/vm-cluster/node03/disko.nix | 32 +-
nix/templates/iam-flaredb-minimal.nix | 4 +-
...d-3node-ha.nix => ultracloud-3node-ha.nix} | 4 +-
...le-node.nix => ultracloud-single-node.nix} | 6 +-
nix/test-cluster/README.md | 12 +-
nix/test-cluster/common.nix | 15 +-
nix/test-cluster/flake.lock | 81 +-
nix/test-cluster/flake.nix | 16 +-
nix/test-cluster/node01.nix | 10 +-
nix/test-cluster/node06.nix | 18 +-
nix/test-cluster/run-cluster.sh | 815 ++++-
nix/test-cluster/storage-node01.nix | 6 +-
nix/test-cluster/storage-node02.nix | 2 +-
nix/test-cluster/storage-node03.nix | 2 +-
nix/test-cluster/storage-node04.nix | 2 +-
nix/test-cluster/storage-node05.nix | 2 +-
nix/test-cluster/vm-bench-guest-image.nix | 4 +-
nix/test-cluster/vm-guest-image.nix | 1355 ++++++++-
nix/tests/deployer-vm-smoke.nix | 62 +-
...fiberlb-native-bgp-ecmp-drain-vm-smoke.nix | 19 +-
.../fiberlb-native-bgp-interop-vm-smoke.nix | 12 +-
.../fiberlb-native-bgp-multipath-vm-smoke.nix | 12 +-
nix/tests/fiberlb-native-bgp-vm-smoke.nix | 12 +-
nix/tests/first-boot-topology-vm-smoke.nix | 101 +-
...aremetal-maas-simplification-2026-04-04.md | 571 ++++
.../component-clarity-findings.md | 2 +-
.../deployment-architecture-findings.md | 14 +-
.../issue-register.md | 8 +-
plans/nix-nos-simplification-2026-04-04.md | 133 +
...deployment-scheduler-roadmap-2026-03-20.md | 8 +-
...=> ultracloud-design-patterns-analysis.md} | 10 +-
...sign.md => ultracloud-radical-redesign.md} | 12 +-
...racloud-standalone-integration-balance.md} | 46 +-
.../plasmavmc-server/src/volume_manager.rs | 2 +-
scripts/ci_changed_workspaces.py | 2 +-
scripts/rename_status.sh | 2 +-
156 files changed, 6077 insertions(+), 3982 deletions(-)
rename deployer/crates/{plasmacloud-reconciler => ultracloud-reconciler}/Cargo.toml (95%)
rename deployer/crates/{plasmacloud-reconciler => ultracloud-reconciler}/src/auth.rs (98%)
rename deployer/crates/{plasmacloud-reconciler => ultracloud-reconciler}/src/hosts.rs (99%)
rename deployer/crates/{plasmacloud-reconciler => ultracloud-reconciler}/src/main.rs (100%)
rename deployer/crates/{plasmacloud-reconciler => ultracloud-reconciler}/src/tenant_network.rs (100%)
rename deployer/crates/{plasmacloud-reconciler => ultracloud-reconciler}/src/watcher.rs (100%)
delete mode 100644 nix-nos/modules/topology.nix
rename nix/iso/{plasmacloud-iso.nix => ultracloud-iso.nix} (51%)
rename nix-nos/lib/cluster-config-lib.nix => nix/lib/cluster-schema.nix (87%)
delete mode 100644 nix/modules/nix-nos/cluster-config-generator.nix
delete mode 100644 nix/modules/nix-nos/example-topology.nix
delete mode 100644 nix/modules/nix-nos/topology.nix
rename nix/modules/{plasmacloud-cluster.nix => ultracloud-cluster.nix} (73%)
rename nix/modules/{plasmacloud-network.nix => ultracloud-network.nix} (62%)
rename nix/modules/{plasmacloud-resources.nix => ultracloud-resources.nix} (91%)
rename nix/modules/{plasmacloud-tenant-networking.nix => ultracloud-tenant-networking.nix} (91%)
create mode 100644 nix/nodes/vm-cluster/common-disko.nix
create mode 100644 nix/nodes/vm-cluster/lib.nix
rename nix/templates/{plasmacloud-3node-ha.nix => ultracloud-3node-ha.nix} (95%)
rename nix/templates/{plasmacloud-single-node.nix => ultracloud-single-node.nix} (90%)
create mode 100644 plans/baremetal-maas-simplification-2026-04-04.md
create mode 100644 plans/nix-nos-simplification-2026-04-04.md
rename plans/{photoncloud-design-patterns-analysis.md => ultracloud-design-patterns-analysis.md} (98%)
rename plans/{photoncloud-radical-redesign.md => ultracloud-radical-redesign.md} (98%)
rename plans/{photoncloud-standalone-integration-balance.md => ultracloud-standalone-integration-balance.md} (95%)
diff --git a/.github/workflows/nix.yml b/.github/workflows/nix.yml
index 1770d79..3b96a4d 100644
--- a/.github/workflows/nix.yml
+++ b/.github/workflows/nix.yml
@@ -59,7 +59,7 @@ jobs:
--github-output "$GITHUB_OUTPUT"
# Run CI gates for changed workspaces
- # Uses the provider-agnostic 'photoncloud-gate' defined in nix/ci/flake.nix
+ # Uses the provider-agnostic 'ultracloud-gate' defined in nix/ci/flake.nix
gate:
needs: filter
if: ${{ needs.filter.outputs.any_changed == 'true' }}
@@ -74,7 +74,7 @@ jobs:
- uses: DeterminateSystems/nix-installer-action@v11
- uses: DeterminateSystems/magic-nix-cache-action@v8
- - name: Run PhotonCloud Gate
+ - name: Run UltraCloud Gate
run: |
nix run ./nix/ci#gate-ci -- --workspace ${{ matrix.workspace }} --tier 0 --no-logs
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index c701f62..955467a 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -1,6 +1,6 @@
# Contributing
-PhotonCloud uses Nix as the primary development and validation entrypoint.
+UltraCloud uses Nix as the primary development and validation entrypoint.
## Setup
diff --git a/Makefile b/Makefile
index a880eba..ff3ab67 100644
--- a/Makefile
+++ b/Makefile
@@ -1,4 +1,4 @@
-# PhotonCloud Makefile
+# UltraCloud Makefile
# Unifies build and test commands
.PHONY: all build cluster-up cluster-down cluster-status cluster-validate cluster-smoke cluster-matrix cluster-bench-storage clean
diff --git a/README.md b/README.md
index c219b79..1a2f63a 100644
--- a/README.md
+++ b/README.md
@@ -1,6 +1,6 @@
-# PhotonCloud
+# UltraCloud
-PhotonCloud is a Nix-first cloud platform workspace that assembles a small control plane, network services, VM hosting, shared storage, object storage, and gateway services into one reproducible repository.
+UltraCloud is a Nix-first cloud platform workspace that assembles a small control plane, network services, VM hosting, shared storage, object storage, and gateway services into one reproducible repository.
The canonical local proof path is the six-node VM cluster under [`nix/test-cluster`](/home/centra/cloud/nix/test-cluster/README.md). It builds all guest images on the host, boots them as hardware-like QEMU nodes, and validates real multi-node behavior.
@@ -47,6 +47,6 @@ nix run ./nix/test-cluster#cluster -- fresh-smoke
## Scope
-PhotonCloud is centered on reproducible infrastructure behavior rather than polished end-user product surfaces. Some services, such as `creditservice`, are intentionally minimal reference implementations that prove integration points rather than full products.
+UltraCloud is centered on reproducible infrastructure behavior rather than polished end-user product surfaces. Some services, such as `creditservice`, are intentionally minimal reference implementations that prove integration points rather than full products.
Host-level NixOS rollout validation is also expected to stay reproducible: the `deployer-vm-smoke` VM test now proves that `nix-agent` can activate a prebuilt target system closure directly, without recompiling the stack inside the guest.
diff --git a/apigateway/Cargo.toml b/apigateway/Cargo.toml
index 6a59fbb..15283e9 100644
--- a/apigateway/Cargo.toml
+++ b/apigateway/Cargo.toml
@@ -10,8 +10,8 @@ version = "0.1.0"
edition = "2021"
license = "MIT OR Apache-2.0"
rust-version = "1.75"
-authors = ["PlasmaCloud Contributors"]
-repository = "https://github.com/yourorg/plasmacloud"
+authors = ["UltraCloud Contributors"]
+repository = "https://github.com/yourorg/ultracloud"
[workspace.dependencies]
# Internal crates
diff --git a/baremetal/image-builder/build-images.sh b/baremetal/image-builder/build-images.sh
index 8359e85..dd004cb 100755
--- a/baremetal/image-builder/build-images.sh
+++ b/baremetal/image-builder/build-images.sh
@@ -1,8 +1,8 @@
#!/usr/bin/env bash
# ==============================================================================
-# PlasmaCloud NixOS Netboot Image Builder
+# UltraCloud NixOS Netboot Image Builder
# ==============================================================================
-# This script builds netboot images for bare-metal provisioning of PlasmaCloud.
+# This script builds netboot images for bare-metal provisioning of UltraCloud.
#
# Usage:
# ./build-images.sh [--profile PROFILE] [--output-dir DIR] [--help]
@@ -61,7 +61,7 @@ print_error() {
print_banner() {
echo ""
echo "╔════════════════════════════════════════════════════════════════╗"
- echo "║ PlasmaCloud NixOS Netboot Image Builder ║"
+ echo "║ UltraCloud NixOS Netboot Image Builder ║"
echo "║ Building bare-metal provisioning images ║"
echo "╚════════════════════════════════════════════════════════════════╝"
echo ""
@@ -72,11 +72,11 @@ print_usage() {
cat << EOF
Usage: $0 [OPTIONS]
-Build NixOS netboot images for PlasmaCloud bare-metal provisioning.
+Build NixOS netboot images for UltraCloud bare-metal provisioning.
OPTIONS:
--profile PROFILE Build specific profile:
- - control-plane: All 8 PlasmaCloud services
+ - control-plane: All 8 UltraCloud services
- worker: Compute-focused services (PlasmaVMC, PrismNET)
- all-in-one: All services for single-node deployment
- all: Build all profiles (default)
@@ -107,9 +107,9 @@ OUTPUT:
- netboot.ipxe iPXE boot script
ENVIRONMENT:
- PLASMACLOUD_DEPLOYER_URL Optional deployer endpoint embedded into generated netboot.ipxe
- PLASMACLOUD_BOOTSTRAP_TOKEN Optional bootstrap token embedded into generated netboot.ipxe
- PLASMACLOUD_CA_CERT_URL Optional CA certificate URL embedded into generated netboot.ipxe
+ ULTRACLOUD_DEPLOYER_URL Optional deployer endpoint embedded into generated netboot.ipxe
+ ULTRACLOUD_BOOTSTRAP_TOKEN Optional bootstrap token embedded into generated netboot.ipxe
+ ULTRACLOUD_CA_CERT_URL Optional CA certificate URL embedded into generated netboot.ipxe
EOF
}
@@ -157,14 +157,14 @@ build_profile() {
fi
local deployer_kernel_args=""
- if [ -n "${PLASMACLOUD_DEPLOYER_URL:-}" ]; then
- deployer_kernel_args+=" plasmacloud.deployer_url=${PLASMACLOUD_DEPLOYER_URL}"
+ if [ -n "${ULTRACLOUD_DEPLOYER_URL:-}" ]; then
+ deployer_kernel_args+=" ultracloud.deployer_url=${ULTRACLOUD_DEPLOYER_URL}"
fi
- if [ -n "${PLASMACLOUD_BOOTSTRAP_TOKEN:-}" ]; then
- deployer_kernel_args+=" plasmacloud.bootstrap_token=${PLASMACLOUD_BOOTSTRAP_TOKEN}"
+ if [ -n "${ULTRACLOUD_BOOTSTRAP_TOKEN:-}" ]; then
+ deployer_kernel_args+=" ultracloud.bootstrap_token=${ULTRACLOUD_BOOTSTRAP_TOKEN}"
fi
- if [ -n "${PLASMACLOUD_CA_CERT_URL:-}" ]; then
- deployer_kernel_args+=" plasmacloud.ca_cert_url=${PLASMACLOUD_CA_CERT_URL}"
+ if [ -n "${ULTRACLOUD_CA_CERT_URL:-}" ]; then
+ deployer_kernel_args+=" ultracloud.ca_cert_url=${ULTRACLOUD_CA_CERT_URL}"
fi
# Generate iPXE boot script
@@ -172,14 +172,14 @@ build_profile() {
cat > "$profile_dir/netboot.ipxe" << EOF
#!ipxe
-# PlasmaCloud Netboot - $profile
+# UltraCloud Netboot - $profile
# Generated: $(date -u +"%Y-%m-%d %H:%M:%S UTC")
# Set variables
set boot-server \${boot-url}
# Display info
-echo Loading PlasmaCloud ($profile profile)...
+echo Loading UltraCloud ($profile profile)...
echo Kernel: bzImage
echo Initrd: initrd
echo
diff --git a/baremetal/vm-cluster/legacy/alpine-ssh-setup.sh b/baremetal/vm-cluster/legacy/alpine-ssh-setup.sh
index 568b06e..bf71d88 100755
--- a/baremetal/vm-cluster/legacy/alpine-ssh-setup.sh
+++ b/baremetal/vm-cluster/legacy/alpine-ssh-setup.sh
@@ -57,7 +57,7 @@ echo ""
sleep 2
echo "rc-service sshd restart" # Restart with new config
sleep 2
- echo "echo 'root:plasmacloud' | chpasswd" # Set root password
+ echo "echo 'root:ultracloud' | chpasswd" # Set root password
sleep 2
echo "ip addr show" # Show network info
sleep 2
@@ -72,7 +72,7 @@ echo ""
echo "=== SSH Setup Complete ==="
echo "SSH should now be accessible via:"
echo " ssh -p 2202 root@localhost"
-echo " Password: plasmacloud"
+echo " Password: ultracloud"
echo ""
echo "Test with: ssh -o StrictHostKeyChecking=no -p 2202 root@localhost 'echo SSH_OK'"
echo ""
diff --git a/baremetal/vm-cluster/legacy/launch-node01-disk.sh b/baremetal/vm-cluster/legacy/launch-node01-disk.sh
index a95cfa6..5058066 100755
--- a/baremetal/vm-cluster/legacy/launch-node01-disk.sh
+++ b/baremetal/vm-cluster/legacy/launch-node01-disk.sh
@@ -1,7 +1,7 @@
#!/usr/bin/env bash
set -euo pipefail
-# PlasmaCloud VM Cluster - Node 01 (Disk Boot)
+# UltraCloud VM Cluster - Node 01 (Disk Boot)
# Boots from installed NixOS on disk
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
diff --git a/baremetal/vm-cluster/legacy/launch-node01-dual.sh b/baremetal/vm-cluster/legacy/launch-node01-dual.sh
index 396a73d..f2c9cc3 100755
--- a/baremetal/vm-cluster/legacy/launch-node01-dual.sh
+++ b/baremetal/vm-cluster/legacy/launch-node01-dual.sh
@@ -1,7 +1,7 @@
#!/usr/bin/env bash
set -euo pipefail
-# PlasmaCloud VM Cluster - Node 01 (ISO Boot + Dual Networking)
+# UltraCloud VM Cluster - Node 01 (ISO Boot + Dual Networking)
# Features:
# - Multicast socket for inter-VM L2 communication (eth0)
# - SLIRP with SSH port forward for host access (eth1)
diff --git a/baremetal/vm-cluster/legacy/launch-node01-from-disk.sh b/baremetal/vm-cluster/legacy/launch-node01-from-disk.sh
index 3d867b3..b2e45b7 100755
--- a/baremetal/vm-cluster/legacy/launch-node01-from-disk.sh
+++ b/baremetal/vm-cluster/legacy/launch-node01-from-disk.sh
@@ -1,7 +1,7 @@
#!/usr/bin/env bash
set -euo pipefail
-# PlasmaCloud VM Cluster - Node 01 (Boot from installed NixOS on disk)
+# UltraCloud VM Cluster - Node 01 (Boot from installed NixOS on disk)
# UEFI boot with OVMF firmware
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
diff --git a/baremetal/vm-cluster/legacy/launch-node01-iso.sh b/baremetal/vm-cluster/legacy/launch-node01-iso.sh
index 6e06601..bd504fc 100755
--- a/baremetal/vm-cluster/legacy/launch-node01-iso.sh
+++ b/baremetal/vm-cluster/legacy/launch-node01-iso.sh
@@ -1,8 +1,8 @@
#!/usr/bin/env bash
set -euo pipefail
-# PlasmaCloud VM Cluster - Node 01 (ISO Boot)
-# Boots from PlasmaCloud ISO for manual NixOS installation
+# UltraCloud VM Cluster - Node 01 (ISO Boot)
+# Boots from UltraCloud ISO for manual NixOS installation
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
DISK="${SCRIPT_DIR}/node01.qcow2"
diff --git a/baremetal/vm-cluster/legacy/launch-node01-netboot.sh b/baremetal/vm-cluster/legacy/launch-node01-netboot.sh
index e8e5d53..915f707 100755
--- a/baremetal/vm-cluster/legacy/launch-node01-netboot.sh
+++ b/baremetal/vm-cluster/legacy/launch-node01-netboot.sh
@@ -1,7 +1,7 @@
#!/usr/bin/env bash
set -euo pipefail
-# PlasmaCloud VM Cluster - Node 01 (Netboot with SSH Key)
+# UltraCloud VM Cluster - Node 01 (Netboot with SSH Key)
# Features:
# - Direct kernel/initrd boot (no ISO required)
# - SSH key authentication baked in (no password setup needed)
diff --git a/baremetal/vm-cluster/legacy/launch-node01-vde.sh b/baremetal/vm-cluster/legacy/launch-node01-vde.sh
index b4c7553..e022c29 100755
--- a/baremetal/vm-cluster/legacy/launch-node01-vde.sh
+++ b/baremetal/vm-cluster/legacy/launch-node01-vde.sh
@@ -1,7 +1,7 @@
#!/usr/bin/env bash
set -euo pipefail
-# PlasmaCloud VM Cluster - Node 01 (VDE Networking)
+# UltraCloud VM Cluster - Node 01 (VDE Networking)
# Uses VDE switch instead of multicast sockets
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
diff --git a/baremetal/vm-cluster/legacy/launch-node02-alpine.sh b/baremetal/vm-cluster/legacy/launch-node02-alpine.sh
index 2d27a1a..c71ed8c 100755
--- a/baremetal/vm-cluster/legacy/launch-node02-alpine.sh
+++ b/baremetal/vm-cluster/legacy/launch-node02-alpine.sh
@@ -1,7 +1,7 @@
#!/usr/bin/env bash
set -euo pipefail
-# PlasmaCloud VM Cluster - Node 02 (Alpine Bootstrap)
+# UltraCloud VM Cluster - Node 02 (Alpine Bootstrap)
# Features:
# - Alpine virt ISO for automated SSH setup
# - Multicast socket for inter-VM L2 communication (eth0)
diff --git a/baremetal/vm-cluster/legacy/launch-node02-disk.sh b/baremetal/vm-cluster/legacy/launch-node02-disk.sh
index cbe51a5..ded4841 100755
--- a/baremetal/vm-cluster/legacy/launch-node02-disk.sh
+++ b/baremetal/vm-cluster/legacy/launch-node02-disk.sh
@@ -1,7 +1,7 @@
#!/usr/bin/env bash
set -euo pipefail
-# PlasmaCloud VM Cluster - Node 02 (Disk Boot)
+# UltraCloud VM Cluster - Node 02 (Disk Boot)
# Boots from installed NixOS on disk
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
diff --git a/baremetal/vm-cluster/legacy/launch-node02-from-disk.sh b/baremetal/vm-cluster/legacy/launch-node02-from-disk.sh
index d848380..bc9c375 100755
--- a/baremetal/vm-cluster/legacy/launch-node02-from-disk.sh
+++ b/baremetal/vm-cluster/legacy/launch-node02-from-disk.sh
@@ -1,7 +1,7 @@
#!/usr/bin/env bash
set -euo pipefail
-# PlasmaCloud VM Cluster - Node 02 (Boot from installed NixOS on disk)
+# UltraCloud VM Cluster - Node 02 (Boot from installed NixOS on disk)
# Boots from the NixOS installation created by nixos-anywhere
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
diff --git a/baremetal/vm-cluster/legacy/launch-node02-iso.sh b/baremetal/vm-cluster/legacy/launch-node02-iso.sh
index 20423c2..0ac3e62 100755
--- a/baremetal/vm-cluster/legacy/launch-node02-iso.sh
+++ b/baremetal/vm-cluster/legacy/launch-node02-iso.sh
@@ -1,7 +1,7 @@
#!/usr/bin/env bash
set -euo pipefail
-# PlasmaCloud VM Cluster - Node 02 (ISO Boot)
+# UltraCloud VM Cluster - Node 02 (ISO Boot)
# Boots from NixOS ISO for provisioning via nixos-anywhere
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
diff --git a/baremetal/vm-cluster/legacy/launch-node02-netboot.sh b/baremetal/vm-cluster/legacy/launch-node02-netboot.sh
index 76d4ddd..54b1245 100755
--- a/baremetal/vm-cluster/legacy/launch-node02-netboot.sh
+++ b/baremetal/vm-cluster/legacy/launch-node02-netboot.sh
@@ -1,7 +1,7 @@
#!/usr/bin/env bash
set -euo pipefail
-# PlasmaCloud VM Cluster - Node 01 (Netboot with SSH Key)
+# UltraCloud VM Cluster - Node 01 (Netboot with SSH Key)
# Features:
# - Direct kernel/initrd boot (no ISO required)
# - SSH key authentication baked in (no password setup needed)
diff --git a/baremetal/vm-cluster/legacy/launch-node02-recovery.sh b/baremetal/vm-cluster/legacy/launch-node02-recovery.sh
index 6d68c63..9b51b33 100755
--- a/baremetal/vm-cluster/legacy/launch-node02-recovery.sh
+++ b/baremetal/vm-cluster/legacy/launch-node02-recovery.sh
@@ -1,7 +1,7 @@
#!/usr/bin/env bash
set -euo pipefail
-# PlasmaCloud VM Cluster - Node 02 (Recovery Boot)
+# UltraCloud VM Cluster - Node 02 (Recovery Boot)
# Boots from disk using new kernel/initrd from nix store
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
diff --git a/baremetal/vm-cluster/legacy/launch-node02-vde.sh b/baremetal/vm-cluster/legacy/launch-node02-vde.sh
index 766e612..a1aae9e 100755
--- a/baremetal/vm-cluster/legacy/launch-node02-vde.sh
+++ b/baremetal/vm-cluster/legacy/launch-node02-vde.sh
@@ -1,7 +1,7 @@
#!/usr/bin/env bash
set -euo pipefail
-# PlasmaCloud VM Cluster - Node 02 (VDE Networking)
+# UltraCloud VM Cluster - Node 02 (VDE Networking)
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
DISK="${SCRIPT_DIR}/node02.qcow2"
diff --git a/baremetal/vm-cluster/legacy/launch-node03-disk.sh b/baremetal/vm-cluster/legacy/launch-node03-disk.sh
index fff89da..99d6e8e 100755
--- a/baremetal/vm-cluster/legacy/launch-node03-disk.sh
+++ b/baremetal/vm-cluster/legacy/launch-node03-disk.sh
@@ -1,7 +1,7 @@
#!/usr/bin/env bash
set -euo pipefail
-# PlasmaCloud VM Cluster - Node 03 (Disk Boot)
+# UltraCloud VM Cluster - Node 03 (Disk Boot)
# Boots from installed NixOS on disk
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
diff --git a/baremetal/vm-cluster/legacy/launch-node03-from-disk.sh b/baremetal/vm-cluster/legacy/launch-node03-from-disk.sh
index c3c0a47..6853c72 100755
--- a/baremetal/vm-cluster/legacy/launch-node03-from-disk.sh
+++ b/baremetal/vm-cluster/legacy/launch-node03-from-disk.sh
@@ -1,7 +1,7 @@
#!/usr/bin/env bash
set -euo pipefail
-# PlasmaCloud VM Cluster - Node 03 (Boot from installed NixOS on disk)
+# UltraCloud VM Cluster - Node 03 (Boot from installed NixOS on disk)
# Boots from the NixOS installation created by nixos-anywhere
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
diff --git a/baremetal/vm-cluster/legacy/launch-node03-iso.sh b/baremetal/vm-cluster/legacy/launch-node03-iso.sh
index ba46d33..5652606 100755
--- a/baremetal/vm-cluster/legacy/launch-node03-iso.sh
+++ b/baremetal/vm-cluster/legacy/launch-node03-iso.sh
@@ -1,7 +1,7 @@
#!/usr/bin/env bash
set -euo pipefail
-# PlasmaCloud VM Cluster - Node 03 (ISO Boot)
+# UltraCloud VM Cluster - Node 03 (ISO Boot)
# Boots from NixOS ISO for provisioning via nixos-anywhere
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
diff --git a/baremetal/vm-cluster/legacy/launch-node03-netboot.sh b/baremetal/vm-cluster/legacy/launch-node03-netboot.sh
index 801bf61..58263f8 100755
--- a/baremetal/vm-cluster/legacy/launch-node03-netboot.sh
+++ b/baremetal/vm-cluster/legacy/launch-node03-netboot.sh
@@ -1,7 +1,7 @@
#!/usr/bin/env bash
set -euo pipefail
-# PlasmaCloud VM Cluster - Node 01 (Netboot with SSH Key)
+# UltraCloud VM Cluster - Node 01 (Netboot with SSH Key)
# Features:
# - Direct kernel/initrd boot (no ISO required)
# - SSH key authentication baked in (no password setup needed)
diff --git a/baremetal/vm-cluster/legacy/launch-node03-recovery.sh b/baremetal/vm-cluster/legacy/launch-node03-recovery.sh
index 97198f9..5d605d7 100755
--- a/baremetal/vm-cluster/legacy/launch-node03-recovery.sh
+++ b/baremetal/vm-cluster/legacy/launch-node03-recovery.sh
@@ -1,7 +1,7 @@
#!/usr/bin/env bash
set -euo pipefail
-# PlasmaCloud VM Cluster - Node 03 (Recovery Boot)
+# UltraCloud VM Cluster - Node 03 (Recovery Boot)
# Boots from disk using new kernel/initrd from nix store
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
diff --git a/baremetal/vm-cluster/legacy/launch-node03-vde.sh b/baremetal/vm-cluster/legacy/launch-node03-vde.sh
index 0683bc9..17e94ea 100755
--- a/baremetal/vm-cluster/legacy/launch-node03-vde.sh
+++ b/baremetal/vm-cluster/legacy/launch-node03-vde.sh
@@ -1,7 +1,7 @@
#!/usr/bin/env bash
set -euo pipefail
-# PlasmaCloud VM Cluster - Node 03 (VDE Networking)
+# UltraCloud VM Cluster - Node 03 (VDE Networking)
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
DISK="${SCRIPT_DIR}/node03.qcow2"
diff --git a/baremetal/vm-cluster/legacy/pxe-server-setup.sh b/baremetal/vm-cluster/legacy/pxe-server-setup.sh
index 87c7a4c..5781758 100644
--- a/baremetal/vm-cluster/legacy/pxe-server-setup.sh
+++ b/baremetal/vm-cluster/legacy/pxe-server-setup.sh
@@ -5,7 +5,7 @@
set -e
-echo "=== PlasmaCloud PXE Server Setup ==="
+echo "=== UltraCloud PXE Server Setup ==="
echo "This script will:"
echo "1. Install Alpine Linux to disk"
echo "2. Configure static networking (192.168.100.1)"
@@ -61,7 +61,7 @@ chroot /mnt apk add --no-cache \
# 8. Configure dnsmasq in the new system
cat > /mnt/etc/dnsmasq.conf <<'EOF'
-# PlasmaCloud PXE Server dnsmasq configuration
+# UltraCloud PXE Server dnsmasq configuration
# Interface to listen on (multicast network)
interface=eth0
@@ -109,14 +109,14 @@ chroot /mnt rc-update add dnsmasq default
chroot /mnt rc-update add sshd default
# 13. Set root password (for SSH access)
-echo "root:plasmacloud" | chroot /mnt chpasswd
+echo "root:ultracloud" | chroot /mnt chpasswd
echo ""
echo "=== Installation Complete ==="
echo "System will reboot from disk"
echo "PXE server will be available at: 192.168.100.1"
echo "DHCP range: 192.168.100.100-150"
-echo "SSH: ssh root@192.168.100.1 (password: plasmacloud)"
+echo "SSH: ssh root@192.168.100.1 (password: ultracloud)"
echo ""
echo "Press Enter to reboot..."
read
diff --git a/baremetal/vm-cluster/pxe-server/configuration.nix b/baremetal/vm-cluster/pxe-server/configuration.nix
index 22d3e29..424f8e2 100644
--- a/baremetal/vm-cluster/pxe-server/configuration.nix
+++ b/baremetal/vm-cluster/pxe-server/configuration.nix
@@ -79,7 +79,7 @@
services.deployer = {
enable = true;
bindAddr = "0.0.0.0:8080";
- clusterId = "plasmacloud-vm-cluster";
+ clusterId = "ultracloud-vm-cluster";
requireChainfire = false;
allowUnauthenticated = true;
allowUnknownNodes = true;
@@ -87,7 +87,7 @@
};
# Root password (for SSH access)
- users.users.root.password = "plasmacloud";
+ users.users.root.password = "ultracloud";
# Packages
environment.systemPackages = with pkgs; [
diff --git a/bin/cloud-cli b/bin/cloud-cli
index e368204..1cb2b0a 100755
--- a/bin/cloud-cli
+++ b/bin/cloud-cli
@@ -86,7 +86,7 @@ def cmd_list_vms(args):
def main():
global DEFAULT_API_URL
- parser = argparse.ArgumentParser(description="PhotonCloud CLI")
+ parser = argparse.ArgumentParser(description="UltraCloud CLI")
parser.add_argument("--token", help="Auth token", default=os.environ.get("CLOUD_TOKEN"))
parser.add_argument("--url", help="API URL", default=DEFAULT_API_URL)
diff --git a/chainfire/baremetal/pxe-server/assets/nixos/control-plane/netboot.ipxe b/chainfire/baremetal/pxe-server/assets/nixos/control-plane/netboot.ipxe
index d2c5805..4cd821a 100644
--- a/chainfire/baremetal/pxe-server/assets/nixos/control-plane/netboot.ipxe
+++ b/chainfire/baremetal/pxe-server/assets/nixos/control-plane/netboot.ipxe
@@ -1,13 +1,13 @@
#!ipxe
-# PlasmaCloud Netboot - control-plane
+# UltraCloud Netboot - control-plane
# Generated: 2025-12-10 21:58:15 UTC
# Set variables
set boot-server ${boot-url}
# Display info
-echo Loading PlasmaCloud (control-plane profile)...
+echo Loading UltraCloud (control-plane profile)...
echo Kernel: bzImage
echo Initrd: initrd
echo
diff --git a/chainfire/baremetal/pxe-server/ipxe/boot.ipxe b/chainfire/baremetal/pxe-server/ipxe/boot.ipxe
index 28aa1ad..397b6ea 100644
--- a/chainfire/baremetal/pxe-server/ipxe/boot.ipxe
+++ b/chainfire/baremetal/pxe-server/ipxe/boot.ipxe
@@ -182,7 +182,7 @@ set kernel-params ${kernel-params} centra.profile=${profile}
set kernel-params ${kernel-params} centra.hostname=${hostname}
set kernel-params ${kernel-params} centra.mac=${mac}
set kernel-params ${kernel-params} centra.provisioning-server=${provisioning-server}
-set kernel-params ${kernel-params} plasmacloud.deployer_url=${deployer-url}
+set kernel-params ${kernel-params} ultracloud.deployer_url=${deployer-url}
set kernel-params ${kernel-params} console=tty0 console=ttyS0,115200n8
# For debugging, enable these:
diff --git a/chainfire/baremetal/pxe-server/nixos-module.nix b/chainfire/baremetal/pxe-server/nixos-module.nix
index f45f210..8334e74 100644
--- a/chainfire/baremetal/pxe-server/nixos-module.nix
+++ b/chainfire/baremetal/pxe-server/nixos-module.nix
@@ -146,9 +146,9 @@ let
set kernel-params ''${kernel-params} centra.hostname=''${hostname}
set kernel-params ''${kernel-params} centra.mac=''${mac}
set kernel-params ''${kernel-params} centra.provisioning-server=''${provisioning-server}
- set kernel-params ''${kernel-params} plasmacloud.deployer_url=''${deployer-url}
- ${optionalString (cfg.bootstrap.bootstrapToken != null) "set kernel-params ''${kernel-params} plasmacloud.bootstrap_token=${cfg.bootstrap.bootstrapToken}"}
- ${optionalString (cfg.bootstrap.caCertUrl != null) "set kernel-params ''${kernel-params} plasmacloud.ca_cert_url=${cfg.bootstrap.caCertUrl}"}
+ set kernel-params ''${kernel-params} ultracloud.deployer_url=''${deployer-url}
+ ${optionalString (cfg.bootstrap.bootstrapToken != null) "set kernel-params ''${kernel-params} ultracloud.bootstrap_token=${cfg.bootstrap.bootstrapToken}"}
+ ${optionalString (cfg.bootstrap.caCertUrl != null) "set kernel-params ''${kernel-params} ultracloud.ca_cert_url=${cfg.bootstrap.caCertUrl}"}
set kernel-params ''${kernel-params} console=tty0 console=ttyS0,115200n8
kernel ''${nixos-url}/bzImage ''${kernel-params} || goto failed
diff --git a/chainfire/chainfire-client/src/metadata.rs b/chainfire/chainfire-client/src/metadata.rs
index 8e1aad5..d160611 100644
--- a/chainfire/chainfire-client/src/metadata.rs
+++ b/chainfire/chainfire-client/src/metadata.rs
@@ -1,6 +1,6 @@
//! Metadata-oriented KV facade for Chainfire (and test backends).
//!
-//! This module exists to standardize how PhotonCloud services interact with
+//! This module exists to standardize how UltraCloud services interact with
//! control-plane metadata: versioned reads, CAS, prefix scans, etc.
use async_trait::async_trait;
diff --git a/client-common/Cargo.toml b/client-common/Cargo.toml
index d4b0318..339d171 100644
--- a/client-common/Cargo.toml
+++ b/client-common/Cargo.toml
@@ -2,9 +2,9 @@
name = "photocloud-client-common"
version = "0.1.0"
edition = "2021"
-authors = ["PhotonCloud"]
+authors = ["UltraCloud"]
license = "MIT OR Apache-2.0"
-description = "Shared client config types (endpoint/auth/retry) for PhotonCloud SDKs"
+description = "Shared client config types (endpoint/auth/retry) for UltraCloud SDKs"
[dependencies]
tonic = { version = "0.12", features = ["tls"] }
diff --git a/client-common/src/lib.rs b/client-common/src/lib.rs
index 8ca690f..f3901e3 100644
--- a/client-common/src/lib.rs
+++ b/client-common/src/lib.rs
@@ -1,4 +1,4 @@
-//! Shared client config types (endpoint/auth/retry) for PhotonCloud SDKs.
+//! Shared client config types (endpoint/auth/retry) for UltraCloud SDKs.
//!
//! Lightweight, type-only helpers to keep SDK crates consistent without
//! forcing a unified SDK dependency tree.
diff --git a/coronafs/Cargo.toml b/coronafs/Cargo.toml
index ae89bdd..6b93b47 100644
--- a/coronafs/Cargo.toml
+++ b/coronafs/Cargo.toml
@@ -9,8 +9,8 @@ version = "0.1.0"
edition = "2021"
license = "MIT OR Apache-2.0"
rust-version = "1.75"
-authors = ["PhotonCloud Contributors"]
-repository = "https://github.com/photoncloud/photoncloud"
+authors = ["UltraCloud Contributors"]
+repository = "https://github.com/ultracloud/ultracloud"
[workspace.dependencies]
axum = "0.8"
diff --git a/coronafs/README.md b/coronafs/README.md
index cc9189e..e5f9d9c 100644
--- a/coronafs/README.md
+++ b/coronafs/README.md
@@ -1,6 +1,6 @@
# CoronaFS
-CoronaFS is PhotonCloud's mutable VM-volume layer.
+CoronaFS is UltraCloud's mutable VM-volume layer.
Current implementation:
diff --git a/crates/photon-auth-client/Cargo.toml b/crates/photon-auth-client/Cargo.toml
index 5c6c416..0c048b8 100644
--- a/crates/photon-auth-client/Cargo.toml
+++ b/crates/photon-auth-client/Cargo.toml
@@ -3,7 +3,7 @@ name = "photon-auth-client"
version = "0.1.0"
edition = "2021"
license = "MIT OR Apache-2.0"
-description = "Shared IAM auth client wrapper for PhotonCloud services"
+description = "Shared IAM auth client wrapper for UltraCloud services"
[dependencies]
anyhow = "1.0"
diff --git a/crates/photon-config/Cargo.toml b/crates/photon-config/Cargo.toml
index 61de74e..54cf55d 100644
--- a/crates/photon-config/Cargo.toml
+++ b/crates/photon-config/Cargo.toml
@@ -3,7 +3,7 @@ name = "photon-config"
version = "0.1.0"
edition = "2021"
license = "MIT OR Apache-2.0"
-description = "Shared configuration loading helpers for PhotonCloud"
+description = "Shared configuration loading helpers for UltraCloud"
[dependencies]
anyhow = "1.0"
diff --git a/crates/photon-runtime/Cargo.toml b/crates/photon-runtime/Cargo.toml
index 6e0f9ab..229ec3f 100644
--- a/crates/photon-runtime/Cargo.toml
+++ b/crates/photon-runtime/Cargo.toml
@@ -3,7 +3,7 @@ name = "photon-runtime"
version = "0.1.0"
edition = "2021"
license = "MIT OR Apache-2.0"
-description = "Shared runtime helpers for PhotonCloud services"
+description = "Shared runtime helpers for UltraCloud services"
[dependencies]
anyhow = "1.0"
diff --git a/crates/photon-state/Cargo.toml b/crates/photon-state/Cargo.toml
index 34ba1b9..c34c2bb 100644
--- a/crates/photon-state/Cargo.toml
+++ b/crates/photon-state/Cargo.toml
@@ -3,7 +3,7 @@ name = "photon-state"
version = "0.1.0"
edition = "2021"
license = "MIT OR Apache-2.0"
-description = "Shared state backend types and validation for PhotonCloud services"
+description = "Shared state backend types and validation for UltraCloud services"
[dependencies]
anyhow = "1.0"
diff --git a/creditservice/Cargo.toml b/creditservice/Cargo.toml
index 59bad2b..7f9a769 100644
--- a/creditservice/Cargo.toml
+++ b/creditservice/Cargo.toml
@@ -13,8 +13,8 @@ version = "0.1.0"
edition = "2021"
license = "MIT OR Apache-2.0"
rust-version = "1.75"
-authors = ["PhotonCloud Contributors"]
-repository = "https://github.com/photoncloud/creditservice"
+authors = ["UltraCloud Contributors"]
+repository = "https://github.com/ultracloud/creditservice"
[workspace.dependencies]
# Internal crates
@@ -27,7 +27,7 @@ photon-config = { path = "../crates/photon-config" }
photon-runtime = { path = "../crates/photon-runtime" }
photon-state = { path = "../crates/photon-state" }
-# External dependencies (aligned with PhotonCloud stack)
+# External dependencies (aligned with UltraCloud stack)
tokio = { version = "1.40", features = ["full"] }
tokio-stream = "0.1"
futures = "0.3"
diff --git a/creditservice/README.md b/creditservice/README.md
index 9901ca3..ce33eec 100644
--- a/creditservice/README.md
+++ b/creditservice/README.md
@@ -1,6 +1,6 @@
# CreditService
-`creditservice` is a minimal reference service that proves PhotonCloud can integrate vendor-specific quota and credit control with platform auth and gateway admission.
+`creditservice` is a minimal reference service that proves UltraCloud can integrate vendor-specific quota and credit control with platform auth and gateway admission.
It is intentionally not a full billing product.
diff --git a/deployer/Cargo.lock b/deployer/Cargo.lock
index f1c60de..181a381 100644
--- a/deployer/Cargo.lock
+++ b/deployer/Cargo.lock
@@ -2028,28 +2028,6 @@ version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b4596b6d070b27117e987119b4dac604f3c58cfb0b191112e24771b2faeac1a6"
-[[package]]
-name = "plasmacloud-reconciler"
-version = "0.1.0"
-dependencies = [
- "anyhow",
- "chainfire-client",
- "chrono",
- "clap",
- "deployer-types",
- "fiberlb-api",
- "flashdns-api",
- "iam-client",
- "iam-types",
- "prismnet-api",
- "serde",
- "serde_json",
- "tokio",
- "tonic",
- "tracing",
- "tracing-subscriber",
-]
-
[[package]]
name = "polyval"
version = "0.6.2"
@@ -3404,6 +3382,28 @@ version = "1.19.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "562d481066bde0658276a35467c4af00bdc6ee726305698a55b86e61d7ad82bb"
+[[package]]
+name = "ultracloud-reconciler"
+version = "0.1.0"
+dependencies = [
+ "anyhow",
+ "chainfire-client",
+ "chrono",
+ "clap",
+ "deployer-types",
+ "fiberlb-api",
+ "flashdns-api",
+ "iam-client",
+ "iam-types",
+ "prismnet-api",
+ "serde",
+ "serde_json",
+ "tokio",
+ "tonic",
+ "tracing",
+ "tracing-subscriber",
+]
+
[[package]]
name = "unicode-bidi"
version = "0.3.18"
diff --git a/deployer/Cargo.toml b/deployer/Cargo.toml
index c35537b..27069bf 100644
--- a/deployer/Cargo.toml
+++ b/deployer/Cargo.toml
@@ -7,7 +7,7 @@ members = [
"crates/nix-agent",
"crates/cert-authority",
"crates/deployer-ctl",
- "crates/plasmacloud-reconciler",
+ "crates/ultracloud-reconciler",
"crates/fleet-scheduler",
]
@@ -15,9 +15,9 @@ members = [
version = "0.1.0"
edition = "2021"
rust-version = "1.75"
-authors = ["PhotonCloud Contributors"]
+authors = ["UltraCloud Contributors"]
license = "MIT OR Apache-2.0"
-repository = "https://github.com/centra/plasmacloud"
+repository = "https://github.com/centra/ultracloud"
[workspace.dependencies]
# Internal crates
diff --git a/deployer/crates/cert-authority/src/main.rs b/deployer/crates/cert-authority/src/main.rs
index f6215e8..5b87911 100644
--- a/deployer/crates/cert-authority/src/main.rs
+++ b/deployer/crates/cert-authority/src/main.rs
@@ -12,7 +12,7 @@ use serde::{Deserialize, Serialize};
use tracing::{info, warn};
use tracing_subscriber::EnvFilter;
-const DEFAULT_CLUSTER_NAMESPACE: &str = "photoncloud";
+const DEFAULT_CLUSTER_NAMESPACE: &str = "ultracloud";
const CERT_TTL_DAYS: u64 = 90;
const ROTATION_THRESHOLD_DAYS: u64 = 30;
@@ -111,12 +111,12 @@ async fn init_ca(cert_path: &PathBuf, key_path: &PathBuf) -> Result<()> {
.context("failed to generate CA key pair")?;
// CA証明書パラメータを設定
- let mut params = CertificateParams::new(vec!["PhotonCloud CA".to_string()])
+ let mut params = CertificateParams::new(vec!["UltraCloud CA".to_string()])
.context("failed to create certificate params")?;
let mut distinguished_name = DistinguishedName::new();
- distinguished_name.push(DnType::OrganizationName, "PhotonCloud");
- distinguished_name.push(DnType::CommonName, "PhotonCloud CA");
+ distinguished_name.push(DnType::OrganizationName, "UltraCloud");
+ distinguished_name.push(DnType::CommonName, "UltraCloud CA");
params.distinguished_name = distinguished_name;
params.is_ca = rcgen::IsCa::Ca(rcgen::BasicConstraints::Unconstrained);
params.key_usages = vec![
@@ -215,7 +215,7 @@ async fn issue_certificate(
ensure_dns_san(&mut csr_params.params, svc);
}
if csr_params.params.subject_alt_names.is_empty() {
- ensure_dns_san(&mut csr_params.params, "photoncloud-service");
+ ensure_dns_san(&mut csr_params.params, "ultracloud-service");
}
// CA署名証明書を生成(CSRの公開鍵を利用)
diff --git a/deployer/crates/deployer-ctl/src/chainfire.rs b/deployer/crates/deployer-ctl/src/chainfire.rs
index 19ea1f3..6420487 100644
--- a/deployer/crates/deployer-ctl/src/chainfire.rs
+++ b/deployer/crates/deployer-ctl/src/chainfire.rs
@@ -6,9 +6,10 @@ use anyhow::{Context, Result};
use chainfire_client::{Client, ClientError};
use chrono::Utc;
use deployer_types::{
- ClusterNodeRecord, ClusterStateSpec, CommissionState, DesiredSystemSpec, HostDeploymentSpec,
- HostDeploymentStatus, InstallPlan, InstallState, NodeConfig, NodeSpec, ObservedSystemState,
- PowerState, ServiceInstanceSpec, ServicePublicationState, ServiceSpec, ServiceStatusRecord,
+ BootstrapPlan, ClusterNodeRecord, ClusterStateSpec, CommissionState, DesiredSystemSpec,
+ HostDeploymentSpec, HostDeploymentStatus, InstallPlan, InstallState, NodeAssignment,
+ NodeConfig, NodeSpec, ObservedSystemState, PowerState, ServiceInstanceSpec,
+ ServicePublicationState, ServiceSpec, ServiceStatusRecord,
};
use serde::de::DeserializeOwned;
use serde_json::{json, Value};
@@ -32,10 +33,6 @@ fn deployer_node_config_key(deployer_namespace: &str, machine_id: &str) -> Vec Vec {
- format!("{}/nodes/mapping/{}", deployer_namespace, machine_id).into_bytes()
-}
-
fn key_node(cluster_namespace: &str, cluster_id: &str, node_id: &str) -> Vec {
format!(
"{}nodes/{}",
@@ -217,7 +214,8 @@ fn merge_install_plan(
}
fn node_config_from_spec(node: &NodeSpec) -> NodeConfig {
- NodeConfig {
+ let assignment = NodeAssignment {
+ node_id: node.node_id.clone(),
hostname: node.hostname.clone(),
role: node
.roles
@@ -225,15 +223,21 @@ fn node_config_from_spec(node: &NodeSpec) -> NodeConfig {
.cloned()
.unwrap_or_else(|| "worker".to_string()),
ip: node.ip.clone(),
- services: Vec::new(),
- ssh_authorized_keys: Vec::new(),
labels: node.labels.clone(),
pool: node.pool.clone(),
node_class: node.node_class.clone(),
failure_domain: node.failure_domain.clone(),
+ };
+ let bootstrap_plan = BootstrapPlan {
+ services: Vec::new(),
nix_profile: node.nix_profile.clone(),
install_plan: node.install_plan.clone(),
- }
+ };
+ NodeConfig::from_parts(
+ assignment,
+ bootstrap_plan,
+ deployer_types::BootstrapSecrets::default(),
+ )
}
fn desired_system_from_spec(node: &NodeSpec) -> Option {
@@ -340,7 +344,7 @@ fn resolve_nodes(spec: &ClusterStateSpec) -> Result> {
.or_insert_with(|| pool.to_string());
resolved
.labels
- .entry("pool.photoncloud.io/name".to_string())
+ .entry("pool.ultracloud.io/name".to_string())
.or_insert_with(|| pool.to_string());
}
@@ -351,7 +355,7 @@ fn resolve_nodes(spec: &ClusterStateSpec) -> Result> {
.or_insert_with(|| node_class.to_string());
resolved
.labels
- .entry("nodeclass.photoncloud.io/name".to_string())
+ .entry("nodeclass.ultracloud.io/name".to_string())
.or_insert_with(|| node_class.to_string());
}
@@ -590,13 +594,7 @@ pub async fn bootstrap_cluster(
serde_json::to_vec(&config)?,
)
.await?;
- client
- .put(
- &deployer_node_mapping_key(deployer_namespace, machine_id),
- node.node_id.as_bytes(),
- )
- .await?;
- info!(node_id = %node.node_id, machine_id = %machine_id, "seeded deployer bootstrap mapping");
+ info!(node_id = %node.node_id, machine_id = %machine_id, "seeded deployer bootstrap config");
}
}
@@ -710,12 +708,6 @@ pub async fn apply_cluster_state(
serde_json::to_vec(&config)?,
)
.await?;
- client
- .put(
- &deployer_node_mapping_key(deployer_namespace, machine_id),
- node.node_id.as_bytes(),
- )
- .await?;
}
}
@@ -1460,10 +1452,6 @@ async fn prune_cluster_state(
String::from_utf8_lossy(&deployer_node_config_key(deployer_namespace, machine_id))
.to_string(),
);
- desired_deployer_keys.insert(
- String::from_utf8_lossy(&deployer_node_mapping_key(deployer_namespace, machine_id))
- .to_string(),
- );
}
}
@@ -1607,7 +1595,7 @@ mod tests {
assert_eq!(node.labels.get("pool").map(String::as_str), Some("general"));
assert_eq!(
node.labels
- .get("nodeclass.photoncloud.io/name")
+ .get("nodeclass.ultracloud.io/name")
.map(String::as_str),
Some("worker-linux")
);
@@ -1665,7 +1653,7 @@ mod tests {
#[test]
fn test_is_prunable_key_keeps_observed_system() {
- let prefix = cluster_prefix("photoncloud", "test-cluster");
+ let prefix = cluster_prefix("ultracloud", "test-cluster");
assert!(is_prunable_key(&format!("{}nodes/node01", prefix), &prefix));
assert!(is_prunable_key(
&format!("{}nodes/node01/desired-system", prefix),
@@ -1707,5 +1695,4 @@ fn is_prunable_key(key: &str, prefix: &str) -> bool {
fn is_prunable_deployer_key(key: &str, deployer_namespace: &str) -> bool {
key.starts_with(&format!("{}/nodes/config/", deployer_namespace))
- || key.starts_with(&format!("{}/nodes/mapping/", deployer_namespace))
}
diff --git a/deployer/crates/deployer-ctl/src/main.rs b/deployer/crates/deployer-ctl/src/main.rs
index 0f870a4..40c4ca0 100644
--- a/deployer/crates/deployer-ctl/src/main.rs
+++ b/deployer/crates/deployer-ctl/src/main.rs
@@ -8,7 +8,7 @@ mod chainfire;
mod power;
mod remote;
-/// Deployer control CLI for PhotonCloud.
+/// Deployer control CLI for UltraCloud.
///
/// - 初回ブートストラップ時に Chainfire 上の Cluster/Node/Service 定義を作成
/// - 既存の Deployer クラスタに対して宣言的な設定を apply する
@@ -20,12 +20,12 @@ struct Cli {
#[arg(long, global = true, default_value = "http://127.0.0.1:7000")]
chainfire_endpoint: String,
- /// PhotonCloud Cluster ID (論理名)
+ /// UltraCloud Cluster ID (論理名)
#[arg(long, global = true)]
cluster_id: Option,
- /// PhotonCloud cluster namespace (default: photoncloud)
- #[arg(long, global = true, default_value = "photoncloud")]
+ /// UltraCloud cluster namespace (default: ultracloud)
+ #[arg(long, global = true, default_value = "ultracloud")]
cluster_namespace: String,
/// Deployer namespace used for machine_id -> NodeConfig bootstrap mappings
@@ -49,7 +49,7 @@ enum Command {
config: PathBuf,
},
- /// 宣言的な PhotonCloud クラスタ設定を Chainfire に apply する (GitOps 的に利用可能)
+ /// 宣言的な UltraCloud クラスタ設定を Chainfire に apply する (GitOps 的に利用可能)
Apply {
/// Cluster/Node/Service/Instance/MTLSPolicy を含むJSON/YAML
#[arg(long)]
@@ -60,7 +60,7 @@ enum Command {
prune: bool,
},
- /// Chainfire 上の PhotonCloud 関連キーをダンプする (デバッグ用途)
+ /// Chainfire 上の UltraCloud 関連キーをダンプする (デバッグ用途)
Dump {
/// ダンプ対象の prefix (未指定の場合は cluster-namespace を使用)
#[arg(long, default_value = "")]
diff --git a/deployer/crates/deployer-server/src/admin.rs b/deployer/crates/deployer-server/src/admin.rs
index c9cddfd..3a1510f 100644
--- a/deployer/crates/deployer-server/src/admin.rs
+++ b/deployer/crates/deployer-server/src/admin.rs
@@ -1,11 +1,8 @@
-//! Admin API endpoints for node management
-//!
-//! These endpoints allow administrators to pre-register nodes,
-//! list registered nodes, and manage node configurations.
+//! Admin API endpoints for node management.
use axum::{extract::State, http::HeaderMap, http::StatusCode, Json};
use chrono::{DateTime, Utc};
-use deployer_types::{InstallPlan, NodeConfig};
+use deployer_types::NodeConfig;
use serde::{Deserialize, Serialize};
use std::collections::HashSet;
use std::sync::Arc;
@@ -41,45 +38,16 @@ fn adjust_state_for_heartbeat(
state
}
-/// Pre-registration request payload
+/// Pre-registration request payload.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PreRegisterRequest {
/// Machine ID (from /etc/machine-id)
pub machine_id: String,
- /// Assigned node identifier
- pub node_id: String,
- /// Node role (control-plane, worker, storage, etc.)
- pub role: String,
- /// Optional: Node IP address
- #[serde(skip_serializing_if = "Option::is_none")]
- pub ip: Option,
- /// Optional: Services to run on this node
- #[serde(default)]
- pub services: Vec,
- /// Optional: SSH authorized keys for bootstrap access
- #[serde(default)]
- pub ssh_authorized_keys: Vec,
- /// Optional desired labels applied to the node
- #[serde(default)]
- pub labels: std::collections::HashMap,
- /// Optional pool assignment
- #[serde(default)]
- pub pool: Option,
- /// Optional node class assignment
- #[serde(default)]
- pub node_class: Option,
- /// Optional failure domain
- #[serde(default)]
- pub failure_domain: Option,
- /// Optional nix profile/flake attr
- #[serde(default)]
- pub nix_profile: Option,
- /// Optional explicit install plan for bootstrap installers.
- #[serde(default)]
- pub install_plan: Option,
+ /// Canonical bootstrap configuration that should be served back during phone-home.
+ pub node_config: NodeConfig,
}
-/// Pre-registration response payload
+/// Pre-registration response payload.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PreRegisterResponse {
pub success: bool,
@@ -89,14 +57,14 @@ pub struct PreRegisterResponse {
pub node_id: String,
}
-/// List nodes response payload
+/// List nodes response payload.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ListNodesResponse {
pub nodes: Vec,
pub total: usize,
}
-/// Node summary for listing
+/// Node summary for listing.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct NodeSummary {
pub node_id: String,
@@ -107,9 +75,6 @@ pub struct NodeSummary {
}
/// POST /api/v1/admin/nodes
-///
-/// Pre-register a machine mapping before it boots.
-/// This allows administrators to configure node assignments in advance.
pub async fn pre_register(
State(state): State>,
headers: HeaderMap,
@@ -117,42 +82,28 @@ pub async fn pre_register(
) -> Result, (StatusCode, String)> {
require_admin_auth(&state, &headers)?;
validate_identifier(&request.machine_id, "machine_id")?;
- validate_identifier(&request.node_id, "node_id")?;
- if let Some(ref ip) = request.ip {
- validate_ip(ip, "ip")?;
+ validate_identifier(&request.node_config.assignment.node_id, "node_id")?;
+ if !request.node_config.assignment.ip.is_empty() {
+ validate_ip(&request.node_config.assignment.ip, "ip")?;
}
+ let node_id = request.node_config.assignment.node_id.clone();
info!(
machine_id = %request.machine_id,
- node_id = %request.node_id,
- role = %request.role,
+ node_id = %node_id,
+ role = %request.node_config.assignment.role,
"Pre-registration request"
);
- let config = NodeConfig {
- hostname: request.node_id.clone(),
- role: request.role.clone(),
- ip: request.ip.clone().unwrap_or_default(),
- services: request.services.clone(),
- ssh_authorized_keys: request.ssh_authorized_keys.clone(),
- labels: request.labels.clone(),
- pool: request.pool.clone(),
- node_class: request.node_class.clone(),
- failure_domain: request.failure_domain.clone(),
- nix_profile: request.nix_profile.clone(),
- install_plan: request.install_plan.clone(),
- };
-
- // Conflict detection across configured backends
if let Some(local_storage) = &state.local_storage {
let storage = local_storage.lock().await;
- if let Some((existing_node, _)) = storage.get_node_config(&request.machine_id) {
- if existing_node != request.node_id {
+ if let Some(existing) = storage.get_node_config(&request.machine_id) {
+ if existing.assignment.node_id != node_id {
return Err((
StatusCode::CONFLICT,
format!(
"machine_id {} already mapped to {}",
- request.machine_id, existing_node
+ request.machine_id, existing.assignment.node_id
),
));
}
@@ -161,8 +112,8 @@ pub async fn pre_register(
if let Some(storage_mutex) = &state.storage {
let mut storage = storage_mutex.lock().await;
- if let Some(existing_node) = storage
- .get_node_mapping(&request.machine_id)
+ if let Some(existing) = storage
+ .get_node_config(&request.machine_id)
.await
.map_err(|e| {
(
@@ -171,12 +122,12 @@ pub async fn pre_register(
)
})?
{
- if existing_node != request.node_id {
+ if existing.assignment.node_id != node_id {
return Err((
StatusCode::CONFLICT,
format!(
"machine_id {} already mapped to {}",
- request.machine_id, existing_node
+ request.machine_id, existing.assignment.node_id
),
));
}
@@ -185,13 +136,13 @@ pub async fn pre_register(
{
let map = state.machine_configs.read().await;
- if let Some((existing_node, _)) = map.get(&request.machine_id) {
- if existing_node != &request.node_id {
+ if let Some(existing) = map.get(&request.machine_id) {
+ if existing.assignment.node_id != node_id {
return Err((
StatusCode::CONFLICT,
format!(
"machine_id {} already mapped to {}",
- request.machine_id, existing_node
+ request.machine_id, existing.assignment.node_id
),
));
}
@@ -201,7 +152,7 @@ pub async fn pre_register(
let mut stored_locally = false;
if let Some(local_storage) = &state.local_storage {
let mut storage = local_storage.lock().await;
- if let Err(e) = storage.register_node(&request.machine_id, &request.node_id, &config) {
+ if let Err(e) = storage.register_node(&request.machine_id, &request.node_config) {
error!(
machine_id = %request.machine_id,
error = %e,
@@ -211,30 +162,29 @@ pub async fn pre_register(
stored_locally = true;
info!(
machine_id = %request.machine_id,
- node_id = %request.node_id,
+ node_id = %node_id,
"Node pre-registered in local storage"
);
}
}
- // Try ChainFire storage
if let Some(storage_mutex) = &state.storage {
let mut storage = storage_mutex.lock().await;
match storage
- .register_node(&request.machine_id, &request.node_id, &config)
+ .register_node(&request.machine_id, &request.node_config)
.await
{
Ok(_) => {
info!(
machine_id = %request.machine_id,
- node_id = %request.node_id,
+ node_id = %node_id,
"Node pre-registered in ChainFire"
);
return Ok(Json(PreRegisterResponse {
success: true,
message: Some("Node pre-registered successfully".to_string()),
machine_id: request.machine_id,
- node_id: request.node_id,
+ node_id,
}));
}
Err(StorageError::Conflict(msg)) => {
@@ -256,15 +206,15 @@ pub async fn pre_register(
}
}
- // Fallback to in-memory storage
- state.machine_configs.write().await.insert(
- request.machine_id.clone(),
- (request.node_id.clone(), config),
- );
+ state
+ .machine_configs
+ .write()
+ .await
+ .insert(request.machine_id.clone(), request.node_config.clone());
debug!(
machine_id = %request.machine_id,
- node_id = %request.node_id,
+ node_id = %node_id,
"Node pre-registered in-memory (ChainFire unavailable)"
);
@@ -276,13 +226,11 @@ pub async fn pre_register(
"Node pre-registered (in-memory)".to_string()
}),
machine_id: request.machine_id,
- node_id: request.node_id,
+ node_id,
}))
}
/// GET /api/v1/admin/nodes
-///
-/// List all registered nodes.
pub async fn list_nodes(
State(state): State>,
headers: HeaderMap,
@@ -303,7 +251,6 @@ pub async fn list_nodes(
let cluster_namespace = state.config.cluster_namespace.trim();
let cluster_enabled = cluster_id.is_some() && !cluster_namespace.is_empty();
- // Prefer cluster node state from ChainFire (kept fresh by node-agent)
if cluster_enabled {
if let Some(storage_mutex) = &state.storage {
let mut storage = storage_mutex.lock().await;
@@ -340,7 +287,6 @@ pub async fn list_nodes(
}
}
- // Fallback to local cluster nodes if ChainFire data is unavailable or missing nodes
if cluster_enabled {
if let Some(local_storage) = &state.local_storage {
let storage = local_storage.lock().await;
@@ -371,7 +317,6 @@ pub async fn list_nodes(
}
}
- // Try ChainFire storage first
if let Some(storage_mutex) = &state.storage {
let mut storage = storage_mutex.lock().await;
match storage.list_nodes().await {
@@ -402,7 +347,6 @@ pub async fn list_nodes(
}
Err(e) => {
error!(error = %e, "Failed to list nodes from ChainFire");
- // Continue with in-memory fallback
}
}
}
@@ -434,7 +378,6 @@ pub async fn list_nodes(
}
}
- // Also include in-memory nodes (may have duplicates if ChainFire is available)
let in_memory = state.nodes.read().await;
for info in in_memory.values() {
if seen.contains(&info.id) {
@@ -459,20 +402,20 @@ pub async fn list_nodes(
seen.insert(info.id.clone());
}
- // Include pre-registered nodes that haven't phone-home yet (ChainFire)
if let Some(storage_mutex) = &state.storage {
let mut storage = storage_mutex.lock().await;
match storage.list_machine_configs().await {
Ok(configs) => {
- for (_machine_id, node_id, config) in configs {
+ for (_machine_id, config) in configs {
+ let node_id = config.assignment.node_id.clone();
if seen.contains(&node_id) {
continue;
}
nodes.push(NodeSummary {
node_id: node_id.clone(),
- hostname: config.hostname.clone(),
- ip: config.ip.clone(),
- role: config.role.clone(),
+ hostname: config.assignment.hostname.clone(),
+ ip: config.assignment.ip.clone(),
+ role: config.assignment.role.clone(),
state: "pre-registered".to_string(),
});
seen.insert(node_id);
@@ -484,38 +427,38 @@ pub async fn list_nodes(
}
}
- // Include pre-registered nodes from local storage
if let Some(local_storage) = &state.local_storage {
let storage = local_storage.lock().await;
- for (_machine_id, node_id, config) in storage.list_machine_configs() {
+ for (_machine_id, config) in storage.list_machine_configs() {
+ let node_id = config.assignment.node_id.clone();
if seen.contains(&node_id) {
continue;
}
nodes.push(NodeSummary {
node_id: node_id.clone(),
- hostname: config.hostname.clone(),
- ip: config.ip.clone(),
- role: config.role.clone(),
+ hostname: config.assignment.hostname.clone(),
+ ip: config.assignment.ip.clone(),
+ role: config.assignment.role.clone(),
state: "pre-registered".to_string(),
});
seen.insert(node_id);
}
}
- // Include pre-registered nodes from in-memory config map
let configs = state.machine_configs.read().await;
- for (_machine_id, (node_id, config)) in configs.iter() {
- if seen.contains(node_id) {
+ for config in configs.values() {
+ let node_id = config.assignment.node_id.clone();
+ if seen.contains(&node_id) {
continue;
}
nodes.push(NodeSummary {
node_id: node_id.clone(),
- hostname: config.hostname.clone(),
- ip: config.ip.clone(),
- role: config.role.clone(),
+ hostname: config.assignment.hostname.clone(),
+ ip: config.assignment.ip.clone(),
+ role: config.assignment.role.clone(),
state: "pre-registered".to_string(),
});
- seen.insert(node_id.clone());
+ seen.insert(node_id);
}
let total = nodes.len();
@@ -528,6 +471,7 @@ mod tests {
use crate::config::Config;
use crate::state::AppState;
use axum::http::HeaderMap;
+ use deployer_types::{BootstrapPlan, BootstrapSecrets, NodeAssignment};
fn test_headers() -> HeaderMap {
let mut headers = HeaderMap::new();
@@ -542,23 +486,39 @@ mod tests {
Arc::new(AppState::with_config(config))
}
+ fn test_node_config() -> NodeConfig {
+ NodeConfig::from_parts(
+ NodeAssignment {
+ node_id: "node-test".to_string(),
+ hostname: "node-test".to_string(),
+ role: "worker".to_string(),
+ ip: "10.0.1.50".to_string(),
+ labels: std::collections::HashMap::new(),
+ pool: None,
+ node_class: None,
+ failure_domain: None,
+ },
+ BootstrapPlan {
+ services: vec!["chainfire".to_string()],
+ nix_profile: None,
+ install_plan: None,
+ },
+ BootstrapSecrets {
+ ssh_authorized_keys: vec!["ssh-ed25519 AAAA... test".to_string()],
+ ssh_host_key: None,
+ tls_cert: None,
+ tls_key: None,
+ },
+ )
+ }
+
#[tokio::test]
async fn test_pre_register() {
let state = test_state();
let request = PreRegisterRequest {
machine_id: "new-machine-abc".to_string(),
- node_id: "node-test".to_string(),
- role: "worker".to_string(),
- ip: Some("10.0.1.50".to_string()),
- services: vec!["chainfire".to_string()],
- ssh_authorized_keys: vec!["ssh-ed25519 AAAA... test".to_string()],
- labels: std::collections::HashMap::new(),
- pool: None,
- node_class: None,
- failure_domain: None,
- nix_profile: None,
- install_plan: None,
+ node_config: test_node_config(),
};
let result =
@@ -570,12 +530,10 @@ mod tests {
assert_eq!(response.machine_id, "new-machine-abc");
assert_eq!(response.node_id, "node-test");
- // Verify stored in machine_configs
let configs = state.machine_configs.read().await;
- assert!(configs.contains_key("new-machine-abc"));
- let (node_id, config) = configs.get("new-machine-abc").unwrap();
- assert_eq!(node_id, "node-test");
- assert_eq!(config.role, "worker");
+ let config = configs.get("new-machine-abc").expect("stored config");
+ assert_eq!(config.assignment.node_id, "node-test");
+ assert_eq!(config.assignment.role, "worker");
}
#[tokio::test]
diff --git a/deployer/crates/deployer-server/src/bootstrap_assets.rs b/deployer/crates/deployer-server/src/bootstrap_assets.rs
index 1b6dc01..9f23f07 100644
--- a/deployer/crates/deployer-server/src/bootstrap_assets.rs
+++ b/deployer/crates/deployer-server/src/bootstrap_assets.rs
@@ -47,7 +47,7 @@ pub async fn flake_bundle(
),
(
header::CONTENT_DISPOSITION,
- HeaderValue::from_static("attachment; filename=\"plasmacloud-flake-bundle.tar.gz\""),
+ HeaderValue::from_static("attachment; filename=\"ultracloud-flake-bundle.tar.gz\""),
),
];
diff --git a/deployer/crates/deployer-server/src/cloud_init.rs b/deployer/crates/deployer-server/src/cloud_init.rs
index 592e02f..04587a9 100644
--- a/deployer/crates/deployer-server/src/cloud_init.rs
+++ b/deployer/crates/deployer-server/src/cloud_init.rs
@@ -20,7 +20,7 @@ pub async fn meta_data(
require_bootstrap_auth(&state, &headers)?;
validate_identifier(&machine_id, "machine_id")?;
- let Some((node_id, config)) = lookup_node_config(&state, &machine_id).await else {
+ let Some(config) = lookup_node_config(&state, &machine_id).await else {
return Err((
StatusCode::NOT_FOUND,
"machine-id not registered".to_string(),
@@ -29,7 +29,7 @@ pub async fn meta_data(
let body = format!(
"instance-id: {}\nlocal-hostname: {}\n",
- node_id, config.hostname
+ config.assignment.node_id, config.assignment.hostname
);
Ok(([(axum::http::header::CONTENT_TYPE, "text/plain")], body))
}
@@ -43,14 +43,14 @@ pub async fn user_data(
require_bootstrap_auth(&state, &headers)?;
validate_identifier(&machine_id, "machine_id")?;
- let Some((node_id, config)) = lookup_node_config(&state, &machine_id).await else {
+ let Some(config) = lookup_node_config(&state, &machine_id).await else {
return Err((
StatusCode::NOT_FOUND,
"machine-id not registered".to_string(),
));
};
- let body = render_user_data(&node_id, &config)
+ let body = render_user_data(&config)
.map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?;
Ok((
[(axum::http::header::CONTENT_TYPE, "text/cloud-config")],
@@ -80,9 +80,9 @@ fn indent_multiline(input: &str, indent: usize) -> String {
.join("\n")
}
-fn render_user_data(node_id: &str, config: &NodeConfig) -> anyhow::Result {
+fn render_user_data(config: &NodeConfig) -> anyhow::Result {
let node_config_json = serde_json::to_string_pretty(config)?;
- let ssh_keys = render_yaml_list(&config.ssh_authorized_keys, 2);
+ let ssh_keys = render_yaml_list(&config.bootstrap_secrets.ssh_authorized_keys, 2);
Ok(format!(
r#"#cloud-config
@@ -92,18 +92,18 @@ manage_etc_hosts: true
ssh_authorized_keys:
{ssh_keys}
write_files:
- - path: /etc/plasmacloud/node-id
+ - path: /etc/ultracloud/node-id
permissions: "0644"
content: |
{node_id_block}
- - path: /etc/plasmacloud/node-config.json
- permissions: "0644"
+ - path: /etc/ultracloud/node-config.json
+ permissions: "0600"
content: |
{node_config_block}
"#,
- hostname = config.hostname,
+ hostname = config.assignment.hostname,
ssh_keys = ssh_keys,
- node_id_block = indent_multiline(node_id, 6),
+ node_id_block = indent_multiline(&config.assignment.node_id, 6),
node_config_block = indent_multiline(&node_config_json, 6),
))
}
@@ -115,38 +115,52 @@ mod tests {
use crate::state::AppState;
use axum::body::Body;
use axum::http::Request;
- use deployer_types::InstallPlan;
+ use deployer_types::{BootstrapPlan, BootstrapSecrets, InstallPlan, NodeAssignment};
use tower::ServiceExt;
fn test_config() -> NodeConfig {
- NodeConfig {
- hostname: "node01".to_string(),
- role: "worker".to_string(),
- ip: "10.0.0.11".to_string(),
- services: vec!["prismnet".to_string()],
- ssh_authorized_keys: vec!["ssh-ed25519 AAAATEST test".to_string()],
- labels: std::collections::HashMap::from([("tier".to_string(), "general".to_string())]),
- pool: Some("general".to_string()),
- node_class: Some("worker".to_string()),
- failure_domain: Some("rack-a".to_string()),
- nix_profile: Some("profiles/worker".to_string()),
- install_plan: Some(InstallPlan {
- nixos_configuration: Some("worker-golden".to_string()),
- disko_config_path: Some("profiles/worker/disko.nix".to_string()),
- target_disk: Some("/dev/vda".to_string()),
- target_disk_by_id: None,
- }),
- }
+ NodeConfig::from_parts(
+ NodeAssignment {
+ node_id: "node01".to_string(),
+ hostname: "node01".to_string(),
+ role: "worker".to_string(),
+ ip: "10.0.0.11".to_string(),
+ labels: std::collections::HashMap::from([(
+ "tier".to_string(),
+ "general".to_string(),
+ )]),
+ pool: Some("general".to_string()),
+ node_class: Some("worker".to_string()),
+ failure_domain: Some("rack-a".to_string()),
+ },
+ BootstrapPlan {
+ services: vec!["prismnet".to_string()],
+ nix_profile: Some("profiles/worker".to_string()),
+ install_plan: Some(InstallPlan {
+ nixos_configuration: Some("worker-golden".to_string()),
+ disko_config_path: Some("profiles/worker/disko.nix".to_string()),
+ target_disk: Some("/dev/vda".to_string()),
+ target_disk_by_id: None,
+ }),
+ },
+ BootstrapSecrets {
+ ssh_authorized_keys: vec!["ssh-ed25519 AAAATEST test".to_string()],
+ ssh_host_key: None,
+ tls_cert: None,
+ tls_key: None,
+ },
+ )
}
#[test]
fn test_render_user_data_contains_node_config() {
- let rendered = render_user_data("node01", &test_config()).unwrap();
+ let rendered = render_user_data(&test_config()).unwrap();
assert!(rendered.contains("#cloud-config"));
assert!(rendered.contains("hostname: node01"));
- assert!(rendered.contains("/etc/plasmacloud/node-config.json"));
+ assert!(rendered.contains("/etc/ultracloud/node-config.json"));
assert!(rendered.contains("\"nix_profile\": \"profiles/worker\""));
assert!(rendered.contains("\"nixos_configuration\": \"worker-golden\""));
+ assert!(rendered.contains("\"node_id\": \"node01\""));
}
#[tokio::test]
@@ -154,10 +168,11 @@ mod tests {
let mut config = Config::default();
config.bootstrap_token = Some("test-token".to_string());
let state = Arc::new(AppState::with_config(config));
- state.machine_configs.write().await.insert(
- "machine-1".to_string(),
- ("node01".to_string(), test_config()),
- );
+ state
+ .machine_configs
+ .write()
+ .await
+ .insert("machine-1".to_string(), test_config());
let app = crate::build_router(state);
let response = app
diff --git a/deployer/crates/deployer-server/src/config.rs b/deployer/crates/deployer-server/src/config.rs
index a79ddc4..f42fe21 100644
--- a/deployer/crates/deployer-server/src/config.rs
+++ b/deployer/crates/deployer-server/src/config.rs
@@ -14,11 +14,11 @@ pub struct Config {
#[serde(default)]
pub chainfire: ChainFireConfig,
- /// PhotonCloud cluster ID (for writing desired state under photoncloud/clusters/...)
+ /// UltraCloud cluster ID (for writing desired state under ultracloud/clusters/...)
#[serde(default)]
pub cluster_id: Option,
- /// Namespace prefix for PhotonCloud cluster state
+ /// Namespace prefix for UltraCloud cluster state
#[serde(default = "default_cluster_namespace")]
pub cluster_namespace: String,
@@ -30,7 +30,7 @@ pub struct Config {
#[serde(default = "default_local_state_path")]
pub local_state_path: Option,
- /// Optional tar.gz bundle containing the PhotonCloud flake source tree for bootstrap installs
+ /// Optional tar.gz bundle containing the UltraCloud flake source tree for bootstrap installs
#[serde(default)]
pub bootstrap_flake_bundle_path: Option,
@@ -168,7 +168,7 @@ fn default_chainfire_namespace() -> String {
}
fn default_cluster_namespace() -> String {
- "photoncloud".to_string()
+ "ultracloud".to_string()
}
fn default_heartbeat_timeout() -> u64 {
@@ -222,7 +222,7 @@ mod tests {
let config = Config::default();
assert_eq!(config.bind_addr.to_string(), "0.0.0.0:8080");
assert_eq!(config.chainfire.namespace, "deployer");
- assert_eq!(config.cluster_namespace, "photoncloud");
+ assert_eq!(config.cluster_namespace, "ultracloud");
assert!(config.cluster_id.is_none());
assert_eq!(config.heartbeat_timeout_secs, 300);
assert_eq!(
@@ -259,7 +259,7 @@ mod tests {
bind_addr = "127.0.0.1:18080"
cluster_id = "cluster-a"
allow_unauthenticated = true
- bootstrap_flake_bundle_path = "/tmp/plasmacloud-flake-bundle.tar.gz"
+ bootstrap_flake_bundle_path = "/tmp/ultracloud-flake-bundle.tar.gz"
[chainfire]
endpoints = ["http://10.0.0.1:2379"]
@@ -273,7 +273,7 @@ mod tests {
assert_eq!(config.cluster_id.as_deref(), Some("cluster-a"));
assert_eq!(
config.bootstrap_flake_bundle_path,
- Some(PathBuf::from("/tmp/plasmacloud-flake-bundle.tar.gz"))
+ Some(PathBuf::from("/tmp/ultracloud-flake-bundle.tar.gz"))
);
assert!(config.allow_unauthenticated);
assert_eq!(config.chainfire.namespace, "bootstrap");
diff --git a/deployer/crates/deployer-server/src/lib.rs b/deployer/crates/deployer-server/src/lib.rs
index a93a58b..c72a5d4 100644
--- a/deployer/crates/deployer-server/src/lib.rs
+++ b/deployer/crates/deployer-server/src/lib.rs
@@ -79,7 +79,7 @@ pub async fn run(config: Config) -> anyhow::Result<()> {
if state.config.cluster_id.is_none() {
tracing::warn!(
- "cluster_id not set; cluster node state won't be written to photoncloud/clusters"
+ "cluster_id not set; cluster node state won't be written to ultracloud/clusters"
);
}
diff --git a/deployer/crates/deployer-server/src/local_storage.rs b/deployer/crates/deployer-server/src/local_storage.rs
index 4ae1cb4..4c17c6f 100644
--- a/deployer/crates/deployer-server/src/local_storage.rs
+++ b/deployer/crates/deployer-server/src/local_storage.rs
@@ -19,7 +19,7 @@ use deployer_types::{NodeConfig, NodeInfo};
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
struct LocalState {
- machine_configs: HashMap,
+ machine_configs: HashMap,
nodes: HashMap,
cluster_nodes: HashMap,
ssh_host_keys: HashMap,
@@ -62,29 +62,23 @@ impl LocalStorage {
Ok(Self { state_path, state })
}
- pub fn register_node(
- &mut self,
- machine_id: &str,
- node_id: &str,
- config: &NodeConfig,
- ) -> Result<()> {
- if let Some((existing_id, _)) = self.state.machine_configs.get(machine_id) {
- if existing_id != node_id {
+ pub fn register_node(&mut self, machine_id: &str, config: &NodeConfig) -> Result<()> {
+ if let Some(existing) = self.state.machine_configs.get(machine_id) {
+ if existing.assignment.node_id != config.assignment.node_id {
anyhow::bail!(
"machine_id {} already mapped to {}",
machine_id,
- existing_id
+ existing.assignment.node_id
);
}
}
- self.state.machine_configs.insert(
- machine_id.to_string(),
- (node_id.to_string(), config.clone()),
- );
+ self.state
+ .machine_configs
+ .insert(machine_id.to_string(), config.clone());
self.save()
}
- pub fn get_node_config(&self, machine_id: &str) -> Option<(String, NodeConfig)> {
+ pub fn get_node_config(&self, machine_id: &str) -> Option {
self.state.machine_configs.get(machine_id).cloned()
}
@@ -103,13 +97,11 @@ impl LocalStorage {
self.state.nodes.values().cloned().collect()
}
- pub fn list_machine_configs(&self) -> Vec<(String, String, NodeConfig)> {
+ pub fn list_machine_configs(&self) -> Vec<(String, NodeConfig)> {
self.state
.machine_configs
.iter()
- .map(|(machine_id, (node_id, config))| {
- (machine_id.clone(), node_id.clone(), config.clone())
- })
+ .map(|(machine_id, config)| (machine_id.clone(), config.clone()))
.collect()
}
@@ -297,6 +289,7 @@ fn generate_ssh_host_key(node_id: &str, parent: Option<&Path>) -> Result
#[cfg(test)]
mod tests {
use super::*;
+ use deployer_types::{BootstrapPlan, BootstrapSecrets, NodeAssignment};
use std::collections::HashMap;
use std::fs;
@@ -315,22 +308,27 @@ mod tests {
let dir = temp_state_dir();
let mut storage = LocalStorage::open(&dir).expect("open storage");
- let config = NodeConfig {
- hostname: "node01".to_string(),
- role: "control-plane".to_string(),
- ip: "10.0.1.10".to_string(),
- services: vec!["chainfire".to_string()],
- ssh_authorized_keys: vec![],
- labels: HashMap::new(),
- pool: None,
- node_class: None,
- failure_domain: None,
- nix_profile: None,
- install_plan: None,
- };
+ let config = NodeConfig::from_parts(
+ NodeAssignment {
+ node_id: "node01".to_string(),
+ hostname: "node01".to_string(),
+ role: "control-plane".to_string(),
+ ip: "10.0.1.10".to_string(),
+ labels: HashMap::new(),
+ pool: None,
+ node_class: None,
+ failure_domain: None,
+ },
+ BootstrapPlan {
+ services: vec!["chainfire".to_string()],
+ nix_profile: None,
+ install_plan: None,
+ },
+ BootstrapSecrets::default(),
+ );
storage
- .register_node("machine-1", "node01", &config)
+ .register_node("machine-1", &config)
.expect("register node");
let node_info = NodeInfo {
@@ -351,8 +349,8 @@ mod tests {
let reopened = LocalStorage::open(&dir).expect("reopen storage");
let loaded = reopened.get_node_config("machine-1");
assert!(loaded.is_some());
- let (_, loaded_config) = loaded.unwrap();
- assert_eq!(loaded_config.hostname, "node01");
+ let loaded_config = loaded.unwrap();
+ assert_eq!(loaded_config.assignment.hostname, "node01");
let loaded_node = reopened.get_node_info("node01").expect("node info");
assert_eq!(loaded_node.hostname, "node01");
diff --git a/deployer/crates/deployer-server/src/phone_home.rs b/deployer/crates/deployer-server/src/phone_home.rs
index 317eee7..9a83e78 100644
--- a/deployer/crates/deployer-server/src/phone_home.rs
+++ b/deployer/crates/deployer-server/src/phone_home.rs
@@ -1,11 +1,12 @@
use axum::{extract::State, http::HeaderMap, http::StatusCode, Json};
use chrono::Utc;
use deployer_types::{
- CommissionState, EnrollmentRuleSpec, HardwareFacts, InstallPlan, InstallState,
- NodeClassSpec, NodeConfig, NodeInfo, NodePoolSpec, NodeState, PhoneHomeRequest,
- PhoneHomeResponse, PowerState,
+ BootstrapPlan, BootstrapSecrets, CommissionState, EnrollmentRuleSpec, HardwareFacts,
+ InstallPlan, InstallState, NodeAssignment, NodeClassSpec, NodeConfig, NodeInfo, NodePoolSpec,
+ NodeState, PhoneHomeRequest, PhoneHomeResponse, PowerState,
};
use sha2::{Digest, Sha256};
+use std::collections::HashMap;
use std::sync::Arc;
use tracing::{debug, error, info, warn};
@@ -22,7 +23,7 @@ fn merge_install_plan(
}
fn merge_hardware_summary_metadata(
- metadata: &mut std::collections::HashMap,
+ metadata: &mut HashMap,
hardware_facts: Option<&HardwareFacts>,
) {
let Some(hardware_facts) = hardware_facts else {
@@ -36,7 +37,10 @@ fn merge_hardware_summary_metadata(
metadata.insert("hardware.cpu_cores".to_string(), cpu_cores.to_string());
}
if let Some(memory_bytes) = hardware_facts.memory_bytes {
- metadata.insert("hardware.memory_bytes".to_string(), memory_bytes.to_string());
+ metadata.insert(
+ "hardware.memory_bytes".to_string(),
+ memory_bytes.to_string(),
+ );
}
metadata.insert(
"hardware.disk_count".to_string(),
@@ -47,7 +51,10 @@ fn merge_hardware_summary_metadata(
hardware_facts.nics.len().to_string(),
);
if let Some(architecture) = hardware_facts.architecture.as_deref() {
- metadata.insert("hardware.architecture".to_string(), architecture.to_string());
+ metadata.insert(
+ "hardware.architecture".to_string(),
+ architecture.to_string(),
+ );
}
}
@@ -60,14 +67,6 @@ fn inventory_hash(hardware_facts: Option<&HardwareFacts>) -> Option {
}
/// POST /api/v1/phone-home
-///
-/// Handles node registration during first boot.
-/// Nodes send their machine-id, and Deployer returns:
-/// - Node configuration (hostname, role, IP, services)
-/// - SSH host key
-/// - TLS certificates (optional)
-///
-/// Uses ChainFire storage when available, falls back to in-memory.
pub async fn phone_home(
State(state): State>,
headers: HeaderMap,
@@ -87,18 +86,18 @@ pub async fn phone_home(
"Phone home request received"
);
- // Lookup node configuration (ChainFire or fallback)
- let (node_id, mut node_config) = match lookup_node_config(&state, &request.machine_id).await {
- Some((id, config)) => (id, config),
- None => {
- if let Some((id, config)) = resolve_enrollment_config(&state, &request).await? {
+ let mut node_config = match lookup_node_config(&state, &request.machine_id).await {
+ Some(config) => config,
+ None => match resolve_enrollment_config(&state, &request).await? {
+ Some(config) => {
info!(
machine_id = %request.machine_id,
- node_id = %id,
+ node_id = %config.assignment.node_id,
"Resolved unknown machine through enrollment rules"
);
- (id, config)
- } else {
+ config
+ }
+ None => {
if !state.config.allow_unknown_nodes {
warn!(
machine_id = %request.machine_id,
@@ -114,121 +113,95 @@ pub async fn phone_home(
machine_id = %request.machine_id,
"Unknown machine-id, assigning default configuration (unsafe)"
);
- // Assign default configuration for unknown machines (dev-only).
- // Prefer explicit node_id, then DHCP-provided hostname, then machine-id suffix.
- let node_id = request
- .node_id
- .as_ref()
- .map(|v| v.trim())
- .filter(|v| !v.is_empty())
- .map(|v| v.to_string())
- .or_else(|| {
- request
- .hostname
- .as_ref()
- .map(|v| v.trim())
- .filter(|v| !v.is_empty())
- .map(|v| v.to_string())
- })
- .unwrap_or_else(|| {
- let max_suffix_len = 128usize.saturating_sub("node-".len());
- let suffix_len = std::cmp::min(max_suffix_len, request.machine_id.len());
- format!("node-{}", &request.machine_id[..suffix_len])
- });
- let config = NodeConfig {
- hostname: node_id.clone(),
- role: "worker".to_string(),
- ip: request.ip.clone().unwrap_or_default(),
- services: vec![],
- ssh_authorized_keys: vec![],
- labels: std::collections::HashMap::new(),
- pool: None,
- node_class: None,
- failure_domain: request.metadata.get("failure_domain").cloned(),
- nix_profile: None,
- install_plan: None,
- };
- (node_id, config)
+ default_unknown_node_config(&request)
}
- }
+ },
};
- if let Some(request_ip) = request.ip.as_ref() {
- if !node_config.ip.is_empty() && node_config.ip != *request_ip {
- warn!(
- machine_id = %request.machine_id,
- requested_ip = %request_ip,
- expected_ip = %node_config.ip,
- "Node IP mismatch in phone-home"
- );
- return Err((StatusCode::BAD_REQUEST, "node ip mismatch".to_string()));
- }
- }
-
if let Some(requested_id) = request.node_id.as_ref() {
- if requested_id != &node_id {
+ if requested_id != &node_config.assignment.node_id {
warn!(
machine_id = %request.machine_id,
requested_id = %requested_id,
- expected_id = %node_id,
+ expected_id = %node_config.assignment.node_id,
"Node ID mismatch in phone-home"
);
return Err((StatusCode::BAD_REQUEST, "node_id mismatch".to_string()));
}
}
- if node_config.hostname.is_empty() {
- if let Some(hostname) = request.hostname.as_ref() {
- node_config.hostname = hostname.clone();
- } else {
- node_config.hostname = node_id.clone();
+ if node_config.assignment.hostname.is_empty() {
+ node_config.assignment.hostname = request
+ .hostname
+ .clone()
+ .filter(|value| !value.trim().is_empty())
+ .unwrap_or_else(|| node_config.assignment.node_id.clone());
+ }
+
+ if let Some(request_ip) = request.ip.as_ref() {
+ if !node_config.assignment.ip.is_empty() && node_config.assignment.ip != *request_ip {
+ warn!(
+ machine_id = %request.machine_id,
+ requested_ip = %request_ip,
+ expected_ip = %node_config.assignment.ip,
+ "Node IP mismatch in phone-home"
+ );
+ return Err((StatusCode::BAD_REQUEST, "node ip mismatch".to_string()));
}
}
- if node_config.ip.is_empty() {
+ if node_config.assignment.ip.is_empty() {
if let Some(ip) = request.ip.clone() {
- node_config.ip = ip;
+ node_config.assignment.ip = ip;
} else {
warn!(
machine_id = %request.machine_id,
- node_id = %node_id,
+ node_id = %node_config.assignment.node_id,
"Node config missing IP; refusing registration"
);
return Err((StatusCode::BAD_REQUEST, "node ip missing".to_string()));
}
}
- validate_ip(&node_config.ip, "node_config.ip")?;
+ validate_ip(&node_config.assignment.ip, "node_config.assignment.ip")?;
- // Ensure metadata contains authoritative role/service info
let mut metadata = request.metadata.clone();
- metadata.insert("role".to_string(), node_config.role.clone());
- metadata.insert("services".to_string(), node_config.services.join(","));
+ metadata.insert("role".to_string(), node_config.assignment.role.clone());
+ metadata.insert(
+ "services".to_string(),
+ node_config.bootstrap_plan.services.join(","),
+ );
merge_hardware_summary_metadata(&mut metadata, request.hardware_facts.as_ref());
- // Create NodeInfo for tracking
let node_info = NodeInfo {
- id: node_id.clone(),
+ id: node_config.assignment.node_id.clone(),
machine_id: Some(request.machine_id.clone()),
- hostname: node_config.hostname.clone(),
- ip: node_config.ip.clone(),
+ hostname: node_config.assignment.hostname.clone(),
+ ip: node_config.assignment.ip.clone(),
state: NodeState::Provisioning,
cluster_config_hash: request.cluster_config_hash.unwrap_or_default(),
last_heartbeat: Utc::now(),
metadata,
};
- // Persist config mapping for this machine (best-effort)
- if let Err(e) = persist_node_config(&state, &request.machine_id, &node_id, &node_config).await {
+ let mut response_config = node_config.clone();
+ response_config.bootstrap_secrets.ssh_host_key =
+ get_or_issue_ssh_host_key(&state, &node_info.id).await;
+
+ let (tls_cert, tls_key) =
+ get_or_issue_tls_material(&state, &node_info.id, &node_info.hostname, &node_info.ip).await;
+ response_config.bootstrap_secrets.tls_cert = tls_cert;
+ response_config.bootstrap_secrets.tls_key = tls_key;
+
+ if let Err(e) = persist_node_config(&state, &request.machine_id, &response_config).await {
warn!(
machine_id = %request.machine_id,
- node_id = %node_id,
+ node_id = %node_info.id,
error = %e,
"Failed to persist node configuration"
);
}
- // Store in ChainFire or in-memory
match store_node_info(&state, &node_info).await {
Ok(_) => {
let storage = if state.has_local_storage() {
@@ -241,7 +214,7 @@ pub async fn phone_home(
info!(
node_id = %node_info.id,
hostname = %node_info.hostname,
- role = %node_config.role,
+ role = %response_config.assignment.role,
storage = storage,
"Node registered successfully"
);
@@ -249,7 +222,7 @@ pub async fn phone_home(
if let Err(e) = store_cluster_node_if_configured(
&state,
&node_info,
- &node_config,
+ &response_config,
&request.machine_id,
request.hardware_facts.as_ref(),
)
@@ -262,66 +235,11 @@ pub async fn phone_home(
);
}
- let ssh_host_key = if let Some(local_storage) = &state.local_storage {
- let mut storage = local_storage.lock().await;
- match storage.get_or_generate_ssh_host_key(&node_info.id) {
- Ok(key) => Some(key),
- Err(e) => {
- warn!(error = %e, "Failed to generate ssh host key");
- None
- }
- }
- } else {
- None
- };
-
- let (tls_cert, tls_key) = if state.config.tls_self_signed
- || (state.config.tls_ca_cert_path.is_some()
- && state.config.tls_ca_key_path.is_some())
- {
- if let Some(local_storage) = &state.local_storage {
- let mut storage = local_storage.lock().await;
- match storage.get_or_generate_tls_cert(
- &node_info.id,
- &node_config.hostname,
- &node_config.ip,
- state.config.tls_ca_cert_path.as_deref(),
- state.config.tls_ca_key_path.as_deref(),
- ) {
- Ok((cert, key)) => (Some(cert), Some(key)),
- Err(e) => {
- warn!(error = %e, "Failed to issue node TLS certificate");
- (None, None)
- }
- }
- } else {
- match crate::tls::issue_node_cert(
- &node_info.id,
- &node_config.hostname,
- &node_config.ip,
- state.config.tls_ca_cert_path.as_deref(),
- state.config.tls_ca_key_path.as_deref(),
- ) {
- Ok((cert, key)) => (Some(cert), Some(key)),
- Err(e) => {
- warn!(error = %e, "Failed to issue node TLS certificate");
- (None, None)
- }
- }
- }
- } else {
- (None, None)
- };
-
Ok(Json(PhoneHomeResponse {
success: true,
message: Some(format!("Node {} registered successfully", node_info.id)),
- node_id: node_id.clone(),
state: NodeState::Provisioning,
- node_config: Some(node_config),
- ssh_host_key,
- tls_cert,
- tls_key,
+ node_config: response_config,
}))
}
Err(e) => {
@@ -339,39 +257,132 @@ pub async fn phone_home(
}
}
-/// Lookup node configuration by machine-id
-///
-/// Tries ChainFire first, then falls back to in-memory storage.
-pub(crate) async fn lookup_node_config(
+fn default_unknown_node_config(request: &PhoneHomeRequest) -> NodeConfig {
+ let node_id = request
+ .node_id
+ .as_ref()
+ .map(|v| v.trim())
+ .filter(|v| !v.is_empty())
+ .map(|v| v.to_string())
+ .or_else(|| {
+ request
+ .hostname
+ .as_ref()
+ .map(|v| v.trim())
+ .filter(|v| !v.is_empty())
+ .map(|v| v.to_string())
+ })
+ .unwrap_or_else(|| {
+ let max_suffix_len = 128usize.saturating_sub("node-".len());
+ let suffix_len = std::cmp::min(max_suffix_len, request.machine_id.len());
+ format!("node-{}", &request.machine_id[..suffix_len])
+ });
+
+ NodeConfig::from_parts(
+ NodeAssignment {
+ node_id: node_id.clone(),
+ hostname: request
+ .hostname
+ .clone()
+ .filter(|value| !value.trim().is_empty())
+ .unwrap_or_else(|| node_id.clone()),
+ role: "worker".to_string(),
+ ip: request.ip.clone().unwrap_or_default(),
+ labels: HashMap::new(),
+ pool: None,
+ node_class: None,
+ failure_domain: request.metadata.get("failure_domain").cloned(),
+ },
+ BootstrapPlan::default(),
+ BootstrapSecrets::default(),
+ )
+}
+
+async fn get_or_issue_ssh_host_key(state: &AppState, node_id: &str) -> Option {
+ let Some(local_storage) = &state.local_storage else {
+ return None;
+ };
+
+ let mut storage = local_storage.lock().await;
+ match storage.get_or_generate_ssh_host_key(node_id) {
+ Ok(key) => Some(key),
+ Err(e) => {
+ warn!(error = %e, "Failed to generate ssh host key");
+ None
+ }
+ }
+}
+
+async fn get_or_issue_tls_material(
state: &AppState,
- machine_id: &str,
-) -> Option<(String, NodeConfig)> {
+ node_id: &str,
+ hostname: &str,
+ ip: &str,
+) -> (Option, Option) {
+ if !(state.config.tls_self_signed
+ || (state.config.tls_ca_cert_path.is_some() && state.config.tls_ca_key_path.is_some()))
+ {
+ return (None, None);
+ }
+
+ if let Some(local_storage) = &state.local_storage {
+ let mut storage = local_storage.lock().await;
+ match storage.get_or_generate_tls_cert(
+ node_id,
+ hostname,
+ ip,
+ state.config.tls_ca_cert_path.as_deref(),
+ state.config.tls_ca_key_path.as_deref(),
+ ) {
+ Ok((cert, key)) => (Some(cert), Some(key)),
+ Err(e) => {
+ warn!(error = %e, "Failed to issue node TLS certificate");
+ (None, None)
+ }
+ }
+ } else {
+ match crate::tls::issue_node_cert(
+ node_id,
+ hostname,
+ ip,
+ state.config.tls_ca_cert_path.as_deref(),
+ state.config.tls_ca_key_path.as_deref(),
+ ) {
+ Ok((cert, key)) => (Some(cert), Some(key)),
+ Err(e) => {
+ warn!(error = %e, "Failed to issue node TLS certificate");
+ (None, None)
+ }
+ }
+ }
+}
+
+/// Lookup node configuration by machine-id.
+pub(crate) async fn lookup_node_config(state: &AppState, machine_id: &str) -> Option {
debug!(machine_id = %machine_id, "Looking up node configuration");
- // Try local storage first
if let Some(local_storage) = &state.local_storage {
let storage = local_storage.lock().await;
- if let Some((node_id, config)) = storage.get_node_config(machine_id) {
+ if let Some(config) = storage.get_node_config(machine_id) {
debug!(
machine_id = %machine_id,
- node_id = %node_id,
+ node_id = %config.assignment.node_id,
"Found config in local storage"
);
- return Some((node_id, config));
+ return Some(config);
}
}
- // Try ChainFire storage first
if let Some(storage_mutex) = &state.storage {
let mut storage = storage_mutex.lock().await;
match storage.get_node_config(machine_id).await {
- Ok(Some((node_id, config))) => {
+ Ok(Some(config)) => {
debug!(
machine_id = %machine_id,
- node_id = %node_id,
+ node_id = %config.assignment.node_id,
"Found config in ChainFire"
);
- return Some((node_id, config));
+ return Some(config);
}
Ok(None) => {
debug!(machine_id = %machine_id, "Not found in ChainFire");
@@ -386,54 +397,56 @@ pub(crate) async fn lookup_node_config(
}
}
- // Fallback to in-memory storage
let configs = state.machine_configs.read().await;
- if let Some((node_id, config)) = configs.get(machine_id) {
+ if let Some(config) = configs.get(machine_id) {
debug!(
machine_id = %machine_id,
- node_id = %node_id,
+ node_id = %config.assignment.node_id,
"Found config in in-memory storage"
);
- return Some((node_id.clone(), config.clone()));
+ return Some(config.clone());
}
- // Hardcoded test mappings (for development/testing)
if state.config.allow_test_mappings {
match machine_id {
"test-machine-01" => {
- return Some((
- "node01".to_string(),
- NodeConfig {
+ return Some(NodeConfig::from_parts(
+ NodeAssignment {
+ node_id: "node01".to_string(),
hostname: "node01".to_string(),
role: "control-plane".to_string(),
ip: "10.0.1.10".to_string(),
- services: vec!["chainfire".to_string(), "flaredb".to_string()],
- ssh_authorized_keys: vec![],
- labels: std::collections::HashMap::new(),
+ labels: HashMap::new(),
pool: None,
node_class: None,
failure_domain: None,
+ },
+ BootstrapPlan {
+ services: vec!["chainfire".to_string(), "flaredb".to_string()],
nix_profile: None,
install_plan: None,
},
+ BootstrapSecrets::default(),
));
}
"test-machine-02" => {
- return Some((
- "node02".to_string(),
- NodeConfig {
+ return Some(NodeConfig::from_parts(
+ NodeAssignment {
+ node_id: "node02".to_string(),
hostname: "node02".to_string(),
role: "worker".to_string(),
ip: "10.0.1.11".to_string(),
- services: vec!["chainfire".to_string()],
- ssh_authorized_keys: vec![],
- labels: std::collections::HashMap::new(),
+ labels: HashMap::new(),
pool: None,
node_class: None,
failure_domain: None,
+ },
+ BootstrapPlan {
+ services: vec!["chainfire".to_string()],
nix_profile: None,
install_plan: None,
},
+ BootstrapSecrets::default(),
));
}
_ => {}
@@ -446,7 +459,7 @@ pub(crate) async fn lookup_node_config(
async fn resolve_enrollment_config(
state: &AppState,
request: &PhoneHomeRequest,
-) -> Result