nix-nos削除

This commit is contained in:
centra 2026-04-04 16:33:03 +09:00
parent 4ab47b1726
commit b8ebd24d4e
Signed by: centra
GPG key ID: 0C09689D20B25ACA
156 changed files with 6077 additions and 3982 deletions

View file

@ -59,7 +59,7 @@ jobs:
--github-output "$GITHUB_OUTPUT"
# Run CI gates for changed workspaces
# Uses the provider-agnostic 'photoncloud-gate' defined in nix/ci/flake.nix
# Uses the provider-agnostic 'ultracloud-gate' defined in nix/ci/flake.nix
gate:
needs: filter
if: ${{ needs.filter.outputs.any_changed == 'true' }}
@ -74,7 +74,7 @@ jobs:
- uses: DeterminateSystems/nix-installer-action@v11
- uses: DeterminateSystems/magic-nix-cache-action@v8
- name: Run PhotonCloud Gate
- name: Run UltraCloud Gate
run: |
nix run ./nix/ci#gate-ci -- --workspace ${{ matrix.workspace }} --tier 0 --no-logs

View file

@ -1,6 +1,6 @@
# Contributing
PhotonCloud uses Nix as the primary development and validation entrypoint.
UltraCloud uses Nix as the primary development and validation entrypoint.
## Setup

View file

@ -1,4 +1,4 @@
# PhotonCloud Makefile
# UltraCloud Makefile
# Unifies build and test commands
.PHONY: all build cluster-up cluster-down cluster-status cluster-validate cluster-smoke cluster-matrix cluster-bench-storage clean

View file

@ -1,6 +1,6 @@
# PhotonCloud
# UltraCloud
PhotonCloud is a Nix-first cloud platform workspace that assembles a small control plane, network services, VM hosting, shared storage, object storage, and gateway services into one reproducible repository.
UltraCloud is a Nix-first cloud platform workspace that assembles a small control plane, network services, VM hosting, shared storage, object storage, and gateway services into one reproducible repository.
The canonical local proof path is the six-node VM cluster under [`nix/test-cluster`](/home/centra/cloud/nix/test-cluster/README.md). It builds all guest images on the host, boots them as hardware-like QEMU nodes, and validates real multi-node behavior.
@ -47,6 +47,6 @@ nix run ./nix/test-cluster#cluster -- fresh-smoke
## Scope
PhotonCloud is centered on reproducible infrastructure behavior rather than polished end-user product surfaces. Some services, such as `creditservice`, are intentionally minimal reference implementations that prove integration points rather than full products.
UltraCloud is centered on reproducible infrastructure behavior rather than polished end-user product surfaces. Some services, such as `creditservice`, are intentionally minimal reference implementations that prove integration points rather than full products.
Host-level NixOS rollout validation is also expected to stay reproducible: the `deployer-vm-smoke` VM test now proves that `nix-agent` can activate a prebuilt target system closure directly, without recompiling the stack inside the guest.

View file

@ -10,8 +10,8 @@ version = "0.1.0"
edition = "2021"
license = "MIT OR Apache-2.0"
rust-version = "1.75"
authors = ["PlasmaCloud Contributors"]
repository = "https://github.com/yourorg/plasmacloud"
authors = ["UltraCloud Contributors"]
repository = "https://github.com/yourorg/ultracloud"
[workspace.dependencies]
# Internal crates

View file

@ -1,8 +1,8 @@
#!/usr/bin/env bash
# ==============================================================================
# PlasmaCloud NixOS Netboot Image Builder
# UltraCloud NixOS Netboot Image Builder
# ==============================================================================
# This script builds netboot images for bare-metal provisioning of PlasmaCloud.
# This script builds netboot images for bare-metal provisioning of UltraCloud.
#
# Usage:
# ./build-images.sh [--profile PROFILE] [--output-dir DIR] [--help]
@ -61,7 +61,7 @@ print_error() {
print_banner() {
echo ""
echo "╔════════════════════════════════════════════════════════════════╗"
echo "║ PlasmaCloud NixOS Netboot Image Builder ║"
echo "║ UltraCloud NixOS Netboot Image Builder ║"
echo "║ Building bare-metal provisioning images ║"
echo "╚════════════════════════════════════════════════════════════════╝"
echo ""
@ -72,11 +72,11 @@ print_usage() {
cat << EOF
Usage: $0 [OPTIONS]
Build NixOS netboot images for PlasmaCloud bare-metal provisioning.
Build NixOS netboot images for UltraCloud bare-metal provisioning.
OPTIONS:
--profile PROFILE Build specific profile:
- control-plane: All 8 PlasmaCloud services
- control-plane: All 8 UltraCloud services
- worker: Compute-focused services (PlasmaVMC, PrismNET)
- all-in-one: All services for single-node deployment
- all: Build all profiles (default)
@ -107,9 +107,9 @@ OUTPUT:
- netboot.ipxe iPXE boot script
ENVIRONMENT:
PLASMACLOUD_DEPLOYER_URL Optional deployer endpoint embedded into generated netboot.ipxe
PLASMACLOUD_BOOTSTRAP_TOKEN Optional bootstrap token embedded into generated netboot.ipxe
PLASMACLOUD_CA_CERT_URL Optional CA certificate URL embedded into generated netboot.ipxe
ULTRACLOUD_DEPLOYER_URL Optional deployer endpoint embedded into generated netboot.ipxe
ULTRACLOUD_BOOTSTRAP_TOKEN Optional bootstrap token embedded into generated netboot.ipxe
ULTRACLOUD_CA_CERT_URL Optional CA certificate URL embedded into generated netboot.ipxe
EOF
}
@ -157,14 +157,14 @@ build_profile() {
fi
local deployer_kernel_args=""
if [ -n "${PLASMACLOUD_DEPLOYER_URL:-}" ]; then
deployer_kernel_args+=" plasmacloud.deployer_url=${PLASMACLOUD_DEPLOYER_URL}"
if [ -n "${ULTRACLOUD_DEPLOYER_URL:-}" ]; then
deployer_kernel_args+=" ultracloud.deployer_url=${ULTRACLOUD_DEPLOYER_URL}"
fi
if [ -n "${PLASMACLOUD_BOOTSTRAP_TOKEN:-}" ]; then
deployer_kernel_args+=" plasmacloud.bootstrap_token=${PLASMACLOUD_BOOTSTRAP_TOKEN}"
if [ -n "${ULTRACLOUD_BOOTSTRAP_TOKEN:-}" ]; then
deployer_kernel_args+=" ultracloud.bootstrap_token=${ULTRACLOUD_BOOTSTRAP_TOKEN}"
fi
if [ -n "${PLASMACLOUD_CA_CERT_URL:-}" ]; then
deployer_kernel_args+=" plasmacloud.ca_cert_url=${PLASMACLOUD_CA_CERT_URL}"
if [ -n "${ULTRACLOUD_CA_CERT_URL:-}" ]; then
deployer_kernel_args+=" ultracloud.ca_cert_url=${ULTRACLOUD_CA_CERT_URL}"
fi
# Generate iPXE boot script
@ -172,14 +172,14 @@ build_profile() {
cat > "$profile_dir/netboot.ipxe" << EOF
#!ipxe
# PlasmaCloud Netboot - $profile
# UltraCloud Netboot - $profile
# Generated: $(date -u +"%Y-%m-%d %H:%M:%S UTC")
# Set variables
set boot-server \${boot-url}
# Display info
echo Loading PlasmaCloud ($profile profile)...
echo Loading UltraCloud ($profile profile)...
echo Kernel: bzImage
echo Initrd: initrd
echo

View file

@ -57,7 +57,7 @@ echo ""
sleep 2
echo "rc-service sshd restart" # Restart with new config
sleep 2
echo "echo 'root:plasmacloud' | chpasswd" # Set root password
echo "echo 'root:ultracloud' | chpasswd" # Set root password
sleep 2
echo "ip addr show" # Show network info
sleep 2
@ -72,7 +72,7 @@ echo ""
echo "=== SSH Setup Complete ==="
echo "SSH should now be accessible via:"
echo " ssh -p 2202 root@localhost"
echo " Password: plasmacloud"
echo " Password: ultracloud"
echo ""
echo "Test with: ssh -o StrictHostKeyChecking=no -p 2202 root@localhost 'echo SSH_OK'"
echo ""

View file

@ -1,7 +1,7 @@
#!/usr/bin/env bash
set -euo pipefail
# PlasmaCloud VM Cluster - Node 01 (Disk Boot)
# UltraCloud VM Cluster - Node 01 (Disk Boot)
# Boots from installed NixOS on disk
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"

View file

@ -1,7 +1,7 @@
#!/usr/bin/env bash
set -euo pipefail
# PlasmaCloud VM Cluster - Node 01 (ISO Boot + Dual Networking)
# UltraCloud VM Cluster - Node 01 (ISO Boot + Dual Networking)
# Features:
# - Multicast socket for inter-VM L2 communication (eth0)
# - SLIRP with SSH port forward for host access (eth1)

View file

@ -1,7 +1,7 @@
#!/usr/bin/env bash
set -euo pipefail
# PlasmaCloud VM Cluster - Node 01 (Boot from installed NixOS on disk)
# UltraCloud VM Cluster - Node 01 (Boot from installed NixOS on disk)
# UEFI boot with OVMF firmware
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"

View file

@ -1,8 +1,8 @@
#!/usr/bin/env bash
set -euo pipefail
# PlasmaCloud VM Cluster - Node 01 (ISO Boot)
# Boots from PlasmaCloud ISO for manual NixOS installation
# UltraCloud VM Cluster - Node 01 (ISO Boot)
# Boots from UltraCloud ISO for manual NixOS installation
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
DISK="${SCRIPT_DIR}/node01.qcow2"

View file

@ -1,7 +1,7 @@
#!/usr/bin/env bash
set -euo pipefail
# PlasmaCloud VM Cluster - Node 01 (Netboot with SSH Key)
# UltraCloud VM Cluster - Node 01 (Netboot with SSH Key)
# Features:
# - Direct kernel/initrd boot (no ISO required)
# - SSH key authentication baked in (no password setup needed)

View file

@ -1,7 +1,7 @@
#!/usr/bin/env bash
set -euo pipefail
# PlasmaCloud VM Cluster - Node 01 (VDE Networking)
# UltraCloud VM Cluster - Node 01 (VDE Networking)
# Uses VDE switch instead of multicast sockets
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"

View file

@ -1,7 +1,7 @@
#!/usr/bin/env bash
set -euo pipefail
# PlasmaCloud VM Cluster - Node 02 (Alpine Bootstrap)
# UltraCloud VM Cluster - Node 02 (Alpine Bootstrap)
# Features:
# - Alpine virt ISO for automated SSH setup
# - Multicast socket for inter-VM L2 communication (eth0)

View file

@ -1,7 +1,7 @@
#!/usr/bin/env bash
set -euo pipefail
# PlasmaCloud VM Cluster - Node 02 (Disk Boot)
# UltraCloud VM Cluster - Node 02 (Disk Boot)
# Boots from installed NixOS on disk
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"

View file

@ -1,7 +1,7 @@
#!/usr/bin/env bash
set -euo pipefail
# PlasmaCloud VM Cluster - Node 02 (Boot from installed NixOS on disk)
# UltraCloud VM Cluster - Node 02 (Boot from installed NixOS on disk)
# Boots from the NixOS installation created by nixos-anywhere
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"

View file

@ -1,7 +1,7 @@
#!/usr/bin/env bash
set -euo pipefail
# PlasmaCloud VM Cluster - Node 02 (ISO Boot)
# UltraCloud VM Cluster - Node 02 (ISO Boot)
# Boots from NixOS ISO for provisioning via nixos-anywhere
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"

View file

@ -1,7 +1,7 @@
#!/usr/bin/env bash
set -euo pipefail
# PlasmaCloud VM Cluster - Node 01 (Netboot with SSH Key)
# UltraCloud VM Cluster - Node 01 (Netboot with SSH Key)
# Features:
# - Direct kernel/initrd boot (no ISO required)
# - SSH key authentication baked in (no password setup needed)

View file

@ -1,7 +1,7 @@
#!/usr/bin/env bash
set -euo pipefail
# PlasmaCloud VM Cluster - Node 02 (Recovery Boot)
# UltraCloud VM Cluster - Node 02 (Recovery Boot)
# Boots from disk using new kernel/initrd from nix store
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"

View file

@ -1,7 +1,7 @@
#!/usr/bin/env bash
set -euo pipefail
# PlasmaCloud VM Cluster - Node 02 (VDE Networking)
# UltraCloud VM Cluster - Node 02 (VDE Networking)
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
DISK="${SCRIPT_DIR}/node02.qcow2"

View file

@ -1,7 +1,7 @@
#!/usr/bin/env bash
set -euo pipefail
# PlasmaCloud VM Cluster - Node 03 (Disk Boot)
# UltraCloud VM Cluster - Node 03 (Disk Boot)
# Boots from installed NixOS on disk
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"

View file

@ -1,7 +1,7 @@
#!/usr/bin/env bash
set -euo pipefail
# PlasmaCloud VM Cluster - Node 03 (Boot from installed NixOS on disk)
# UltraCloud VM Cluster - Node 03 (Boot from installed NixOS on disk)
# Boots from the NixOS installation created by nixos-anywhere
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"

View file

@ -1,7 +1,7 @@
#!/usr/bin/env bash
set -euo pipefail
# PlasmaCloud VM Cluster - Node 03 (ISO Boot)
# UltraCloud VM Cluster - Node 03 (ISO Boot)
# Boots from NixOS ISO for provisioning via nixos-anywhere
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"

View file

@ -1,7 +1,7 @@
#!/usr/bin/env bash
set -euo pipefail
# PlasmaCloud VM Cluster - Node 01 (Netboot with SSH Key)
# UltraCloud VM Cluster - Node 01 (Netboot with SSH Key)
# Features:
# - Direct kernel/initrd boot (no ISO required)
# - SSH key authentication baked in (no password setup needed)

View file

@ -1,7 +1,7 @@
#!/usr/bin/env bash
set -euo pipefail
# PlasmaCloud VM Cluster - Node 03 (Recovery Boot)
# UltraCloud VM Cluster - Node 03 (Recovery Boot)
# Boots from disk using new kernel/initrd from nix store
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"

View file

@ -1,7 +1,7 @@
#!/usr/bin/env bash
set -euo pipefail
# PlasmaCloud VM Cluster - Node 03 (VDE Networking)
# UltraCloud VM Cluster - Node 03 (VDE Networking)
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
DISK="${SCRIPT_DIR}/node03.qcow2"

View file

@ -5,7 +5,7 @@
set -e
echo "=== PlasmaCloud PXE Server Setup ==="
echo "=== UltraCloud PXE Server Setup ==="
echo "This script will:"
echo "1. Install Alpine Linux to disk"
echo "2. Configure static networking (192.168.100.1)"
@ -61,7 +61,7 @@ chroot /mnt apk add --no-cache \
# 8. Configure dnsmasq in the new system
cat > /mnt/etc/dnsmasq.conf <<'EOF'
# PlasmaCloud PXE Server dnsmasq configuration
# UltraCloud PXE Server dnsmasq configuration
# Interface to listen on (multicast network)
interface=eth0
@ -109,14 +109,14 @@ chroot /mnt rc-update add dnsmasq default
chroot /mnt rc-update add sshd default
# 13. Set root password (for SSH access)
echo "root:plasmacloud" | chroot /mnt chpasswd
echo "root:ultracloud" | chroot /mnt chpasswd
echo ""
echo "=== Installation Complete ==="
echo "System will reboot from disk"
echo "PXE server will be available at: 192.168.100.1"
echo "DHCP range: 192.168.100.100-150"
echo "SSH: ssh root@192.168.100.1 (password: plasmacloud)"
echo "SSH: ssh root@192.168.100.1 (password: ultracloud)"
echo ""
echo "Press Enter to reboot..."
read

View file

@ -79,7 +79,7 @@
services.deployer = {
enable = true;
bindAddr = "0.0.0.0:8080";
clusterId = "plasmacloud-vm-cluster";
clusterId = "ultracloud-vm-cluster";
requireChainfire = false;
allowUnauthenticated = true;
allowUnknownNodes = true;
@ -87,7 +87,7 @@
};
# Root password (for SSH access)
users.users.root.password = "plasmacloud";
users.users.root.password = "ultracloud";
# Packages
environment.systemPackages = with pkgs; [

View file

@ -86,7 +86,7 @@ def cmd_list_vms(args):
def main():
global DEFAULT_API_URL
parser = argparse.ArgumentParser(description="PhotonCloud CLI")
parser = argparse.ArgumentParser(description="UltraCloud CLI")
parser.add_argument("--token", help="Auth token", default=os.environ.get("CLOUD_TOKEN"))
parser.add_argument("--url", help="API URL", default=DEFAULT_API_URL)

View file

@ -1,13 +1,13 @@
#!ipxe
# PlasmaCloud Netboot - control-plane
# UltraCloud Netboot - control-plane
# Generated: 2025-12-10 21:58:15 UTC
# Set variables
set boot-server ${boot-url}
# Display info
echo Loading PlasmaCloud (control-plane profile)...
echo Loading UltraCloud (control-plane profile)...
echo Kernel: bzImage
echo Initrd: initrd
echo

View file

@ -182,7 +182,7 @@ set kernel-params ${kernel-params} centra.profile=${profile}
set kernel-params ${kernel-params} centra.hostname=${hostname}
set kernel-params ${kernel-params} centra.mac=${mac}
set kernel-params ${kernel-params} centra.provisioning-server=${provisioning-server}
set kernel-params ${kernel-params} plasmacloud.deployer_url=${deployer-url}
set kernel-params ${kernel-params} ultracloud.deployer_url=${deployer-url}
set kernel-params ${kernel-params} console=tty0 console=ttyS0,115200n8
# For debugging, enable these:

View file

@ -146,9 +146,9 @@ let
set kernel-params ''${kernel-params} centra.hostname=''${hostname}
set kernel-params ''${kernel-params} centra.mac=''${mac}
set kernel-params ''${kernel-params} centra.provisioning-server=''${provisioning-server}
set kernel-params ''${kernel-params} plasmacloud.deployer_url=''${deployer-url}
${optionalString (cfg.bootstrap.bootstrapToken != null) "set kernel-params ''${kernel-params} plasmacloud.bootstrap_token=${cfg.bootstrap.bootstrapToken}"}
${optionalString (cfg.bootstrap.caCertUrl != null) "set kernel-params ''${kernel-params} plasmacloud.ca_cert_url=${cfg.bootstrap.caCertUrl}"}
set kernel-params ''${kernel-params} ultracloud.deployer_url=''${deployer-url}
${optionalString (cfg.bootstrap.bootstrapToken != null) "set kernel-params ''${kernel-params} ultracloud.bootstrap_token=${cfg.bootstrap.bootstrapToken}"}
${optionalString (cfg.bootstrap.caCertUrl != null) "set kernel-params ''${kernel-params} ultracloud.ca_cert_url=${cfg.bootstrap.caCertUrl}"}
set kernel-params ''${kernel-params} console=tty0 console=ttyS0,115200n8
kernel ''${nixos-url}/bzImage ''${kernel-params} || goto failed

View file

@ -1,6 +1,6 @@
//! Metadata-oriented KV facade for Chainfire (and test backends).
//!
//! This module exists to standardize how PhotonCloud services interact with
//! This module exists to standardize how UltraCloud services interact with
//! control-plane metadata: versioned reads, CAS, prefix scans, etc.
use async_trait::async_trait;

View file

@ -2,9 +2,9 @@
name = "photocloud-client-common"
version = "0.1.0"
edition = "2021"
authors = ["PhotonCloud"]
authors = ["UltraCloud"]
license = "MIT OR Apache-2.0"
description = "Shared client config types (endpoint/auth/retry) for PhotonCloud SDKs"
description = "Shared client config types (endpoint/auth/retry) for UltraCloud SDKs"
[dependencies]
tonic = { version = "0.12", features = ["tls"] }

View file

@ -1,4 +1,4 @@
//! Shared client config types (endpoint/auth/retry) for PhotonCloud SDKs.
//! Shared client config types (endpoint/auth/retry) for UltraCloud SDKs.
//!
//! Lightweight, type-only helpers to keep SDK crates consistent without
//! forcing a unified SDK dependency tree.

View file

@ -9,8 +9,8 @@ version = "0.1.0"
edition = "2021"
license = "MIT OR Apache-2.0"
rust-version = "1.75"
authors = ["PhotonCloud Contributors"]
repository = "https://github.com/photoncloud/photoncloud"
authors = ["UltraCloud Contributors"]
repository = "https://github.com/ultracloud/ultracloud"
[workspace.dependencies]
axum = "0.8"

View file

@ -1,6 +1,6 @@
# CoronaFS
CoronaFS is PhotonCloud's mutable VM-volume layer.
CoronaFS is UltraCloud's mutable VM-volume layer.
Current implementation:

View file

@ -3,7 +3,7 @@ name = "photon-auth-client"
version = "0.1.0"
edition = "2021"
license = "MIT OR Apache-2.0"
description = "Shared IAM auth client wrapper for PhotonCloud services"
description = "Shared IAM auth client wrapper for UltraCloud services"
[dependencies]
anyhow = "1.0"

View file

@ -3,7 +3,7 @@ name = "photon-config"
version = "0.1.0"
edition = "2021"
license = "MIT OR Apache-2.0"
description = "Shared configuration loading helpers for PhotonCloud"
description = "Shared configuration loading helpers for UltraCloud"
[dependencies]
anyhow = "1.0"

View file

@ -3,7 +3,7 @@ name = "photon-runtime"
version = "0.1.0"
edition = "2021"
license = "MIT OR Apache-2.0"
description = "Shared runtime helpers for PhotonCloud services"
description = "Shared runtime helpers for UltraCloud services"
[dependencies]
anyhow = "1.0"

View file

@ -3,7 +3,7 @@ name = "photon-state"
version = "0.1.0"
edition = "2021"
license = "MIT OR Apache-2.0"
description = "Shared state backend types and validation for PhotonCloud services"
description = "Shared state backend types and validation for UltraCloud services"
[dependencies]
anyhow = "1.0"

View file

@ -13,8 +13,8 @@ version = "0.1.0"
edition = "2021"
license = "MIT OR Apache-2.0"
rust-version = "1.75"
authors = ["PhotonCloud Contributors"]
repository = "https://github.com/photoncloud/creditservice"
authors = ["UltraCloud Contributors"]
repository = "https://github.com/ultracloud/creditservice"
[workspace.dependencies]
# Internal crates
@ -27,7 +27,7 @@ photon-config = { path = "../crates/photon-config" }
photon-runtime = { path = "../crates/photon-runtime" }
photon-state = { path = "../crates/photon-state" }
# External dependencies (aligned with PhotonCloud stack)
# External dependencies (aligned with UltraCloud stack)
tokio = { version = "1.40", features = ["full"] }
tokio-stream = "0.1"
futures = "0.3"

View file

@ -1,6 +1,6 @@
# CreditService
`creditservice` is a minimal reference service that proves PhotonCloud can integrate vendor-specific quota and credit control with platform auth and gateway admission.
`creditservice` is a minimal reference service that proves UltraCloud can integrate vendor-specific quota and credit control with platform auth and gateway admission.
It is intentionally not a full billing product.

44
deployer/Cargo.lock generated
View file

@ -2028,28 +2028,6 @@ version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b4596b6d070b27117e987119b4dac604f3c58cfb0b191112e24771b2faeac1a6"
[[package]]
name = "plasmacloud-reconciler"
version = "0.1.0"
dependencies = [
"anyhow",
"chainfire-client",
"chrono",
"clap",
"deployer-types",
"fiberlb-api",
"flashdns-api",
"iam-client",
"iam-types",
"prismnet-api",
"serde",
"serde_json",
"tokio",
"tonic",
"tracing",
"tracing-subscriber",
]
[[package]]
name = "polyval"
version = "0.6.2"
@ -3404,6 +3382,28 @@ version = "1.19.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "562d481066bde0658276a35467c4af00bdc6ee726305698a55b86e61d7ad82bb"
[[package]]
name = "ultracloud-reconciler"
version = "0.1.0"
dependencies = [
"anyhow",
"chainfire-client",
"chrono",
"clap",
"deployer-types",
"fiberlb-api",
"flashdns-api",
"iam-client",
"iam-types",
"prismnet-api",
"serde",
"serde_json",
"tokio",
"tonic",
"tracing",
"tracing-subscriber",
]
[[package]]
name = "unicode-bidi"
version = "0.3.18"

View file

@ -7,7 +7,7 @@ members = [
"crates/nix-agent",
"crates/cert-authority",
"crates/deployer-ctl",
"crates/plasmacloud-reconciler",
"crates/ultracloud-reconciler",
"crates/fleet-scheduler",
]
@ -15,9 +15,9 @@ members = [
version = "0.1.0"
edition = "2021"
rust-version = "1.75"
authors = ["PhotonCloud Contributors"]
authors = ["UltraCloud Contributors"]
license = "MIT OR Apache-2.0"
repository = "https://github.com/centra/plasmacloud"
repository = "https://github.com/centra/ultracloud"
[workspace.dependencies]
# Internal crates

View file

@ -12,7 +12,7 @@ use serde::{Deserialize, Serialize};
use tracing::{info, warn};
use tracing_subscriber::EnvFilter;
const DEFAULT_CLUSTER_NAMESPACE: &str = "photoncloud";
const DEFAULT_CLUSTER_NAMESPACE: &str = "ultracloud";
const CERT_TTL_DAYS: u64 = 90;
const ROTATION_THRESHOLD_DAYS: u64 = 30;
@ -111,12 +111,12 @@ async fn init_ca(cert_path: &PathBuf, key_path: &PathBuf) -> Result<()> {
.context("failed to generate CA key pair")?;
// CA証明書パラメータを設定
let mut params = CertificateParams::new(vec!["PhotonCloud CA".to_string()])
let mut params = CertificateParams::new(vec!["UltraCloud CA".to_string()])
.context("failed to create certificate params")?;
let mut distinguished_name = DistinguishedName::new();
distinguished_name.push(DnType::OrganizationName, "PhotonCloud");
distinguished_name.push(DnType::CommonName, "PhotonCloud CA");
distinguished_name.push(DnType::OrganizationName, "UltraCloud");
distinguished_name.push(DnType::CommonName, "UltraCloud CA");
params.distinguished_name = distinguished_name;
params.is_ca = rcgen::IsCa::Ca(rcgen::BasicConstraints::Unconstrained);
params.key_usages = vec![
@ -215,7 +215,7 @@ async fn issue_certificate(
ensure_dns_san(&mut csr_params.params, svc);
}
if csr_params.params.subject_alt_names.is_empty() {
ensure_dns_san(&mut csr_params.params, "photoncloud-service");
ensure_dns_san(&mut csr_params.params, "ultracloud-service");
}
// CA署名証明書を生成CSRの公開鍵を利用

View file

@ -6,9 +6,10 @@ use anyhow::{Context, Result};
use chainfire_client::{Client, ClientError};
use chrono::Utc;
use deployer_types::{
ClusterNodeRecord, ClusterStateSpec, CommissionState, DesiredSystemSpec, HostDeploymentSpec,
HostDeploymentStatus, InstallPlan, InstallState, NodeConfig, NodeSpec, ObservedSystemState,
PowerState, ServiceInstanceSpec, ServicePublicationState, ServiceSpec, ServiceStatusRecord,
BootstrapPlan, ClusterNodeRecord, ClusterStateSpec, CommissionState, DesiredSystemSpec,
HostDeploymentSpec, HostDeploymentStatus, InstallPlan, InstallState, NodeAssignment,
NodeConfig, NodeSpec, ObservedSystemState, PowerState, ServiceInstanceSpec,
ServicePublicationState, ServiceSpec, ServiceStatusRecord,
};
use serde::de::DeserializeOwned;
use serde_json::{json, Value};
@ -32,10 +33,6 @@ fn deployer_node_config_key(deployer_namespace: &str, machine_id: &str) -> Vec<u
format!("{}/nodes/config/{}", deployer_namespace, machine_id).into_bytes()
}
fn deployer_node_mapping_key(deployer_namespace: &str, machine_id: &str) -> Vec<u8> {
format!("{}/nodes/mapping/{}", deployer_namespace, machine_id).into_bytes()
}
fn key_node(cluster_namespace: &str, cluster_id: &str, node_id: &str) -> Vec<u8> {
format!(
"{}nodes/{}",
@ -217,7 +214,8 @@ fn merge_install_plan(
}
fn node_config_from_spec(node: &NodeSpec) -> NodeConfig {
NodeConfig {
let assignment = NodeAssignment {
node_id: node.node_id.clone(),
hostname: node.hostname.clone(),
role: node
.roles
@ -225,15 +223,21 @@ fn node_config_from_spec(node: &NodeSpec) -> NodeConfig {
.cloned()
.unwrap_or_else(|| "worker".to_string()),
ip: node.ip.clone(),
services: Vec::new(),
ssh_authorized_keys: Vec::new(),
labels: node.labels.clone(),
pool: node.pool.clone(),
node_class: node.node_class.clone(),
failure_domain: node.failure_domain.clone(),
};
let bootstrap_plan = BootstrapPlan {
services: Vec::new(),
nix_profile: node.nix_profile.clone(),
install_plan: node.install_plan.clone(),
}
};
NodeConfig::from_parts(
assignment,
bootstrap_plan,
deployer_types::BootstrapSecrets::default(),
)
}
fn desired_system_from_spec(node: &NodeSpec) -> Option<DesiredSystemSpec> {
@ -340,7 +344,7 @@ fn resolve_nodes(spec: &ClusterStateSpec) -> Result<Vec<NodeSpec>> {
.or_insert_with(|| pool.to_string());
resolved
.labels
.entry("pool.photoncloud.io/name".to_string())
.entry("pool.ultracloud.io/name".to_string())
.or_insert_with(|| pool.to_string());
}
@ -351,7 +355,7 @@ fn resolve_nodes(spec: &ClusterStateSpec) -> Result<Vec<NodeSpec>> {
.or_insert_with(|| node_class.to_string());
resolved
.labels
.entry("nodeclass.photoncloud.io/name".to_string())
.entry("nodeclass.ultracloud.io/name".to_string())
.or_insert_with(|| node_class.to_string());
}
@ -590,13 +594,7 @@ pub async fn bootstrap_cluster(
serde_json::to_vec(&config)?,
)
.await?;
client
.put(
&deployer_node_mapping_key(deployer_namespace, machine_id),
node.node_id.as_bytes(),
)
.await?;
info!(node_id = %node.node_id, machine_id = %machine_id, "seeded deployer bootstrap mapping");
info!(node_id = %node.node_id, machine_id = %machine_id, "seeded deployer bootstrap config");
}
}
@ -710,12 +708,6 @@ pub async fn apply_cluster_state(
serde_json::to_vec(&config)?,
)
.await?;
client
.put(
&deployer_node_mapping_key(deployer_namespace, machine_id),
node.node_id.as_bytes(),
)
.await?;
}
}
@ -1460,10 +1452,6 @@ async fn prune_cluster_state(
String::from_utf8_lossy(&deployer_node_config_key(deployer_namespace, machine_id))
.to_string(),
);
desired_deployer_keys.insert(
String::from_utf8_lossy(&deployer_node_mapping_key(deployer_namespace, machine_id))
.to_string(),
);
}
}
@ -1607,7 +1595,7 @@ mod tests {
assert_eq!(node.labels.get("pool").map(String::as_str), Some("general"));
assert_eq!(
node.labels
.get("nodeclass.photoncloud.io/name")
.get("nodeclass.ultracloud.io/name")
.map(String::as_str),
Some("worker-linux")
);
@ -1665,7 +1653,7 @@ mod tests {
#[test]
fn test_is_prunable_key_keeps_observed_system() {
let prefix = cluster_prefix("photoncloud", "test-cluster");
let prefix = cluster_prefix("ultracloud", "test-cluster");
assert!(is_prunable_key(&format!("{}nodes/node01", prefix), &prefix));
assert!(is_prunable_key(
&format!("{}nodes/node01/desired-system", prefix),
@ -1707,5 +1695,4 @@ fn is_prunable_key(key: &str, prefix: &str) -> bool {
fn is_prunable_deployer_key(key: &str, deployer_namespace: &str) -> bool {
key.starts_with(&format!("{}/nodes/config/", deployer_namespace))
|| key.starts_with(&format!("{}/nodes/mapping/", deployer_namespace))
}

View file

@ -8,7 +8,7 @@ mod chainfire;
mod power;
mod remote;
/// Deployer control CLI for PhotonCloud.
/// Deployer control CLI for UltraCloud.
///
/// - 初回ブートストラップ時に Chainfire 上の Cluster/Node/Service 定義を作成
/// - 既存の Deployer クラスタに対して宣言的な設定を apply する
@ -20,12 +20,12 @@ struct Cli {
#[arg(long, global = true, default_value = "http://127.0.0.1:7000")]
chainfire_endpoint: String,
/// PhotonCloud Cluster ID (論理名)
/// UltraCloud Cluster ID (論理名)
#[arg(long, global = true)]
cluster_id: Option<String>,
/// PhotonCloud cluster namespace (default: photoncloud)
#[arg(long, global = true, default_value = "photoncloud")]
/// UltraCloud cluster namespace (default: ultracloud)
#[arg(long, global = true, default_value = "ultracloud")]
cluster_namespace: String,
/// Deployer namespace used for machine_id -> NodeConfig bootstrap mappings
@ -49,7 +49,7 @@ enum Command {
config: PathBuf,
},
/// 宣言的な PhotonCloud クラスタ設定を Chainfire に apply する (GitOps 的に利用可能)
/// 宣言的な UltraCloud クラスタ設定を Chainfire に apply する (GitOps 的に利用可能)
Apply {
/// Cluster/Node/Service/Instance/MTLSPolicy を含むJSON/YAML
#[arg(long)]
@ -60,7 +60,7 @@ enum Command {
prune: bool,
},
/// Chainfire 上の PhotonCloud 関連キーをダンプする (デバッグ用途)
/// Chainfire 上の UltraCloud 関連キーをダンプする (デバッグ用途)
Dump {
/// ダンプ対象の prefix (未指定の場合は cluster-namespace を使用)
#[arg(long, default_value = "")]

View file

@ -1,11 +1,8 @@
//! Admin API endpoints for node management
//!
//! These endpoints allow administrators to pre-register nodes,
//! list registered nodes, and manage node configurations.
//! Admin API endpoints for node management.
use axum::{extract::State, http::HeaderMap, http::StatusCode, Json};
use chrono::{DateTime, Utc};
use deployer_types::{InstallPlan, NodeConfig};
use deployer_types::NodeConfig;
use serde::{Deserialize, Serialize};
use std::collections::HashSet;
use std::sync::Arc;
@ -41,45 +38,16 @@ fn adjust_state_for_heartbeat(
state
}
/// Pre-registration request payload
/// Pre-registration request payload.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PreRegisterRequest {
/// Machine ID (from /etc/machine-id)
pub machine_id: String,
/// Assigned node identifier
pub node_id: String,
/// Node role (control-plane, worker, storage, etc.)
pub role: String,
/// Optional: Node IP address
#[serde(skip_serializing_if = "Option::is_none")]
pub ip: Option<String>,
/// Optional: Services to run on this node
#[serde(default)]
pub services: Vec<String>,
/// Optional: SSH authorized keys for bootstrap access
#[serde(default)]
pub ssh_authorized_keys: Vec<String>,
/// Optional desired labels applied to the node
#[serde(default)]
pub labels: std::collections::HashMap<String, String>,
/// Optional pool assignment
#[serde(default)]
pub pool: Option<String>,
/// Optional node class assignment
#[serde(default)]
pub node_class: Option<String>,
/// Optional failure domain
#[serde(default)]
pub failure_domain: Option<String>,
/// Optional nix profile/flake attr
#[serde(default)]
pub nix_profile: Option<String>,
/// Optional explicit install plan for bootstrap installers.
#[serde(default)]
pub install_plan: Option<InstallPlan>,
/// Canonical bootstrap configuration that should be served back during phone-home.
pub node_config: NodeConfig,
}
/// Pre-registration response payload
/// Pre-registration response payload.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PreRegisterResponse {
pub success: bool,
@ -89,14 +57,14 @@ pub struct PreRegisterResponse {
pub node_id: String,
}
/// List nodes response payload
/// List nodes response payload.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ListNodesResponse {
pub nodes: Vec<NodeSummary>,
pub total: usize,
}
/// Node summary for listing
/// Node summary for listing.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct NodeSummary {
pub node_id: String,
@ -107,9 +75,6 @@ pub struct NodeSummary {
}
/// POST /api/v1/admin/nodes
///
/// Pre-register a machine mapping before it boots.
/// This allows administrators to configure node assignments in advance.
pub async fn pre_register(
State(state): State<Arc<AppState>>,
headers: HeaderMap,
@ -117,42 +82,28 @@ pub async fn pre_register(
) -> Result<Json<PreRegisterResponse>, (StatusCode, String)> {
require_admin_auth(&state, &headers)?;
validate_identifier(&request.machine_id, "machine_id")?;
validate_identifier(&request.node_id, "node_id")?;
if let Some(ref ip) = request.ip {
validate_ip(ip, "ip")?;
validate_identifier(&request.node_config.assignment.node_id, "node_id")?;
if !request.node_config.assignment.ip.is_empty() {
validate_ip(&request.node_config.assignment.ip, "ip")?;
}
let node_id = request.node_config.assignment.node_id.clone();
info!(
machine_id = %request.machine_id,
node_id = %request.node_id,
role = %request.role,
node_id = %node_id,
role = %request.node_config.assignment.role,
"Pre-registration request"
);
let config = NodeConfig {
hostname: request.node_id.clone(),
role: request.role.clone(),
ip: request.ip.clone().unwrap_or_default(),
services: request.services.clone(),
ssh_authorized_keys: request.ssh_authorized_keys.clone(),
labels: request.labels.clone(),
pool: request.pool.clone(),
node_class: request.node_class.clone(),
failure_domain: request.failure_domain.clone(),
nix_profile: request.nix_profile.clone(),
install_plan: request.install_plan.clone(),
};
// Conflict detection across configured backends
if let Some(local_storage) = &state.local_storage {
let storage = local_storage.lock().await;
if let Some((existing_node, _)) = storage.get_node_config(&request.machine_id) {
if existing_node != request.node_id {
if let Some(existing) = storage.get_node_config(&request.machine_id) {
if existing.assignment.node_id != node_id {
return Err((
StatusCode::CONFLICT,
format!(
"machine_id {} already mapped to {}",
request.machine_id, existing_node
request.machine_id, existing.assignment.node_id
),
));
}
@ -161,8 +112,8 @@ pub async fn pre_register(
if let Some(storage_mutex) = &state.storage {
let mut storage = storage_mutex.lock().await;
if let Some(existing_node) = storage
.get_node_mapping(&request.machine_id)
if let Some(existing) = storage
.get_node_config(&request.machine_id)
.await
.map_err(|e| {
(
@ -171,12 +122,12 @@ pub async fn pre_register(
)
})?
{
if existing_node != request.node_id {
if existing.assignment.node_id != node_id {
return Err((
StatusCode::CONFLICT,
format!(
"machine_id {} already mapped to {}",
request.machine_id, existing_node
request.machine_id, existing.assignment.node_id
),
));
}
@ -185,13 +136,13 @@ pub async fn pre_register(
{
let map = state.machine_configs.read().await;
if let Some((existing_node, _)) = map.get(&request.machine_id) {
if existing_node != &request.node_id {
if let Some(existing) = map.get(&request.machine_id) {
if existing.assignment.node_id != node_id {
return Err((
StatusCode::CONFLICT,
format!(
"machine_id {} already mapped to {}",
request.machine_id, existing_node
request.machine_id, existing.assignment.node_id
),
));
}
@ -201,7 +152,7 @@ pub async fn pre_register(
let mut stored_locally = false;
if let Some(local_storage) = &state.local_storage {
let mut storage = local_storage.lock().await;
if let Err(e) = storage.register_node(&request.machine_id, &request.node_id, &config) {
if let Err(e) = storage.register_node(&request.machine_id, &request.node_config) {
error!(
machine_id = %request.machine_id,
error = %e,
@ -211,30 +162,29 @@ pub async fn pre_register(
stored_locally = true;
info!(
machine_id = %request.machine_id,
node_id = %request.node_id,
node_id = %node_id,
"Node pre-registered in local storage"
);
}
}
// Try ChainFire storage
if let Some(storage_mutex) = &state.storage {
let mut storage = storage_mutex.lock().await;
match storage
.register_node(&request.machine_id, &request.node_id, &config)
.register_node(&request.machine_id, &request.node_config)
.await
{
Ok(_) => {
info!(
machine_id = %request.machine_id,
node_id = %request.node_id,
node_id = %node_id,
"Node pre-registered in ChainFire"
);
return Ok(Json(PreRegisterResponse {
success: true,
message: Some("Node pre-registered successfully".to_string()),
machine_id: request.machine_id,
node_id: request.node_id,
node_id,
}));
}
Err(StorageError::Conflict(msg)) => {
@ -256,15 +206,15 @@ pub async fn pre_register(
}
}
// Fallback to in-memory storage
state.machine_configs.write().await.insert(
request.machine_id.clone(),
(request.node_id.clone(), config),
);
state
.machine_configs
.write()
.await
.insert(request.machine_id.clone(), request.node_config.clone());
debug!(
machine_id = %request.machine_id,
node_id = %request.node_id,
node_id = %node_id,
"Node pre-registered in-memory (ChainFire unavailable)"
);
@ -276,13 +226,11 @@ pub async fn pre_register(
"Node pre-registered (in-memory)".to_string()
}),
machine_id: request.machine_id,
node_id: request.node_id,
node_id,
}))
}
/// GET /api/v1/admin/nodes
///
/// List all registered nodes.
pub async fn list_nodes(
State(state): State<Arc<AppState>>,
headers: HeaderMap,
@ -303,7 +251,6 @@ pub async fn list_nodes(
let cluster_namespace = state.config.cluster_namespace.trim();
let cluster_enabled = cluster_id.is_some() && !cluster_namespace.is_empty();
// Prefer cluster node state from ChainFire (kept fresh by node-agent)
if cluster_enabled {
if let Some(storage_mutex) = &state.storage {
let mut storage = storage_mutex.lock().await;
@ -340,7 +287,6 @@ pub async fn list_nodes(
}
}
// Fallback to local cluster nodes if ChainFire data is unavailable or missing nodes
if cluster_enabled {
if let Some(local_storage) = &state.local_storage {
let storage = local_storage.lock().await;
@ -371,7 +317,6 @@ pub async fn list_nodes(
}
}
// Try ChainFire storage first
if let Some(storage_mutex) = &state.storage {
let mut storage = storage_mutex.lock().await;
match storage.list_nodes().await {
@ -402,7 +347,6 @@ pub async fn list_nodes(
}
Err(e) => {
error!(error = %e, "Failed to list nodes from ChainFire");
// Continue with in-memory fallback
}
}
}
@ -434,7 +378,6 @@ pub async fn list_nodes(
}
}
// Also include in-memory nodes (may have duplicates if ChainFire is available)
let in_memory = state.nodes.read().await;
for info in in_memory.values() {
if seen.contains(&info.id) {
@ -459,20 +402,20 @@ pub async fn list_nodes(
seen.insert(info.id.clone());
}
// Include pre-registered nodes that haven't phone-home yet (ChainFire)
if let Some(storage_mutex) = &state.storage {
let mut storage = storage_mutex.lock().await;
match storage.list_machine_configs().await {
Ok(configs) => {
for (_machine_id, node_id, config) in configs {
for (_machine_id, config) in configs {
let node_id = config.assignment.node_id.clone();
if seen.contains(&node_id) {
continue;
}
nodes.push(NodeSummary {
node_id: node_id.clone(),
hostname: config.hostname.clone(),
ip: config.ip.clone(),
role: config.role.clone(),
hostname: config.assignment.hostname.clone(),
ip: config.assignment.ip.clone(),
role: config.assignment.role.clone(),
state: "pre-registered".to_string(),
});
seen.insert(node_id);
@ -484,38 +427,38 @@ pub async fn list_nodes(
}
}
// Include pre-registered nodes from local storage
if let Some(local_storage) = &state.local_storage {
let storage = local_storage.lock().await;
for (_machine_id, node_id, config) in storage.list_machine_configs() {
for (_machine_id, config) in storage.list_machine_configs() {
let node_id = config.assignment.node_id.clone();
if seen.contains(&node_id) {
continue;
}
nodes.push(NodeSummary {
node_id: node_id.clone(),
hostname: config.hostname.clone(),
ip: config.ip.clone(),
role: config.role.clone(),
hostname: config.assignment.hostname.clone(),
ip: config.assignment.ip.clone(),
role: config.assignment.role.clone(),
state: "pre-registered".to_string(),
});
seen.insert(node_id);
}
}
// Include pre-registered nodes from in-memory config map
let configs = state.machine_configs.read().await;
for (_machine_id, (node_id, config)) in configs.iter() {
if seen.contains(node_id) {
for config in configs.values() {
let node_id = config.assignment.node_id.clone();
if seen.contains(&node_id) {
continue;
}
nodes.push(NodeSummary {
node_id: node_id.clone(),
hostname: config.hostname.clone(),
ip: config.ip.clone(),
role: config.role.clone(),
hostname: config.assignment.hostname.clone(),
ip: config.assignment.ip.clone(),
role: config.assignment.role.clone(),
state: "pre-registered".to_string(),
});
seen.insert(node_id.clone());
seen.insert(node_id);
}
let total = nodes.len();
@ -528,6 +471,7 @@ mod tests {
use crate::config::Config;
use crate::state::AppState;
use axum::http::HeaderMap;
use deployer_types::{BootstrapPlan, BootstrapSecrets, NodeAssignment};
fn test_headers() -> HeaderMap {
let mut headers = HeaderMap::new();
@ -542,23 +486,39 @@ mod tests {
Arc::new(AppState::with_config(config))
}
fn test_node_config() -> NodeConfig {
NodeConfig::from_parts(
NodeAssignment {
node_id: "node-test".to_string(),
hostname: "node-test".to_string(),
role: "worker".to_string(),
ip: "10.0.1.50".to_string(),
labels: std::collections::HashMap::new(),
pool: None,
node_class: None,
failure_domain: None,
},
BootstrapPlan {
services: vec!["chainfire".to_string()],
nix_profile: None,
install_plan: None,
},
BootstrapSecrets {
ssh_authorized_keys: vec!["ssh-ed25519 AAAA... test".to_string()],
ssh_host_key: None,
tls_cert: None,
tls_key: None,
},
)
}
#[tokio::test]
async fn test_pre_register() {
let state = test_state();
let request = PreRegisterRequest {
machine_id: "new-machine-abc".to_string(),
node_id: "node-test".to_string(),
role: "worker".to_string(),
ip: Some("10.0.1.50".to_string()),
services: vec!["chainfire".to_string()],
ssh_authorized_keys: vec!["ssh-ed25519 AAAA... test".to_string()],
labels: std::collections::HashMap::new(),
pool: None,
node_class: None,
failure_domain: None,
nix_profile: None,
install_plan: None,
node_config: test_node_config(),
};
let result =
@ -570,12 +530,10 @@ mod tests {
assert_eq!(response.machine_id, "new-machine-abc");
assert_eq!(response.node_id, "node-test");
// Verify stored in machine_configs
let configs = state.machine_configs.read().await;
assert!(configs.contains_key("new-machine-abc"));
let (node_id, config) = configs.get("new-machine-abc").unwrap();
assert_eq!(node_id, "node-test");
assert_eq!(config.role, "worker");
let config = configs.get("new-machine-abc").expect("stored config");
assert_eq!(config.assignment.node_id, "node-test");
assert_eq!(config.assignment.role, "worker");
}
#[tokio::test]

View file

@ -47,7 +47,7 @@ pub async fn flake_bundle(
),
(
header::CONTENT_DISPOSITION,
HeaderValue::from_static("attachment; filename=\"plasmacloud-flake-bundle.tar.gz\""),
HeaderValue::from_static("attachment; filename=\"ultracloud-flake-bundle.tar.gz\""),
),
];

View file

@ -20,7 +20,7 @@ pub async fn meta_data(
require_bootstrap_auth(&state, &headers)?;
validate_identifier(&machine_id, "machine_id")?;
let Some((node_id, config)) = lookup_node_config(&state, &machine_id).await else {
let Some(config) = lookup_node_config(&state, &machine_id).await else {
return Err((
StatusCode::NOT_FOUND,
"machine-id not registered".to_string(),
@ -29,7 +29,7 @@ pub async fn meta_data(
let body = format!(
"instance-id: {}\nlocal-hostname: {}\n",
node_id, config.hostname
config.assignment.node_id, config.assignment.hostname
);
Ok(([(axum::http::header::CONTENT_TYPE, "text/plain")], body))
}
@ -43,14 +43,14 @@ pub async fn user_data(
require_bootstrap_auth(&state, &headers)?;
validate_identifier(&machine_id, "machine_id")?;
let Some((node_id, config)) = lookup_node_config(&state, &machine_id).await else {
let Some(config) = lookup_node_config(&state, &machine_id).await else {
return Err((
StatusCode::NOT_FOUND,
"machine-id not registered".to_string(),
));
};
let body = render_user_data(&node_id, &config)
let body = render_user_data(&config)
.map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?;
Ok((
[(axum::http::header::CONTENT_TYPE, "text/cloud-config")],
@ -80,9 +80,9 @@ fn indent_multiline(input: &str, indent: usize) -> String {
.join("\n")
}
fn render_user_data(node_id: &str, config: &NodeConfig) -> anyhow::Result<String> {
fn render_user_data(config: &NodeConfig) -> anyhow::Result<String> {
let node_config_json = serde_json::to_string_pretty(config)?;
let ssh_keys = render_yaml_list(&config.ssh_authorized_keys, 2);
let ssh_keys = render_yaml_list(&config.bootstrap_secrets.ssh_authorized_keys, 2);
Ok(format!(
r#"#cloud-config
@ -92,18 +92,18 @@ manage_etc_hosts: true
ssh_authorized_keys:
{ssh_keys}
write_files:
- path: /etc/plasmacloud/node-id
- path: /etc/ultracloud/node-id
permissions: "0644"
content: |
{node_id_block}
- path: /etc/plasmacloud/node-config.json
permissions: "0644"
- path: /etc/ultracloud/node-config.json
permissions: "0600"
content: |
{node_config_block}
"#,
hostname = config.hostname,
hostname = config.assignment.hostname,
ssh_keys = ssh_keys,
node_id_block = indent_multiline(node_id, 6),
node_id_block = indent_multiline(&config.assignment.node_id, 6),
node_config_block = indent_multiline(&node_config_json, 6),
))
}
@ -115,20 +115,26 @@ mod tests {
use crate::state::AppState;
use axum::body::Body;
use axum::http::Request;
use deployer_types::InstallPlan;
use deployer_types::{BootstrapPlan, BootstrapSecrets, InstallPlan, NodeAssignment};
use tower::ServiceExt;
fn test_config() -> NodeConfig {
NodeConfig {
NodeConfig::from_parts(
NodeAssignment {
node_id: "node01".to_string(),
hostname: "node01".to_string(),
role: "worker".to_string(),
ip: "10.0.0.11".to_string(),
services: vec!["prismnet".to_string()],
ssh_authorized_keys: vec!["ssh-ed25519 AAAATEST test".to_string()],
labels: std::collections::HashMap::from([("tier".to_string(), "general".to_string())]),
labels: std::collections::HashMap::from([(
"tier".to_string(),
"general".to_string(),
)]),
pool: Some("general".to_string()),
node_class: Some("worker".to_string()),
failure_domain: Some("rack-a".to_string()),
},
BootstrapPlan {
services: vec!["prismnet".to_string()],
nix_profile: Some("profiles/worker".to_string()),
install_plan: Some(InstallPlan {
nixos_configuration: Some("worker-golden".to_string()),
@ -136,17 +142,25 @@ mod tests {
target_disk: Some("/dev/vda".to_string()),
target_disk_by_id: None,
}),
}
},
BootstrapSecrets {
ssh_authorized_keys: vec!["ssh-ed25519 AAAATEST test".to_string()],
ssh_host_key: None,
tls_cert: None,
tls_key: None,
},
)
}
#[test]
fn test_render_user_data_contains_node_config() {
let rendered = render_user_data("node01", &test_config()).unwrap();
let rendered = render_user_data(&test_config()).unwrap();
assert!(rendered.contains("#cloud-config"));
assert!(rendered.contains("hostname: node01"));
assert!(rendered.contains("/etc/plasmacloud/node-config.json"));
assert!(rendered.contains("/etc/ultracloud/node-config.json"));
assert!(rendered.contains("\"nix_profile\": \"profiles/worker\""));
assert!(rendered.contains("\"nixos_configuration\": \"worker-golden\""));
assert!(rendered.contains("\"node_id\": \"node01\""));
}
#[tokio::test]
@ -154,10 +168,11 @@ mod tests {
let mut config = Config::default();
config.bootstrap_token = Some("test-token".to_string());
let state = Arc::new(AppState::with_config(config));
state.machine_configs.write().await.insert(
"machine-1".to_string(),
("node01".to_string(), test_config()),
);
state
.machine_configs
.write()
.await
.insert("machine-1".to_string(), test_config());
let app = crate::build_router(state);
let response = app

View file

@ -14,11 +14,11 @@ pub struct Config {
#[serde(default)]
pub chainfire: ChainFireConfig,
/// PhotonCloud cluster ID (for writing desired state under photoncloud/clusters/...)
/// UltraCloud cluster ID (for writing desired state under ultracloud/clusters/...)
#[serde(default)]
pub cluster_id: Option<String>,
/// Namespace prefix for PhotonCloud cluster state
/// Namespace prefix for UltraCloud cluster state
#[serde(default = "default_cluster_namespace")]
pub cluster_namespace: String,
@ -30,7 +30,7 @@ pub struct Config {
#[serde(default = "default_local_state_path")]
pub local_state_path: Option<PathBuf>,
/// Optional tar.gz bundle containing the PhotonCloud flake source tree for bootstrap installs
/// Optional tar.gz bundle containing the UltraCloud flake source tree for bootstrap installs
#[serde(default)]
pub bootstrap_flake_bundle_path: Option<PathBuf>,
@ -168,7 +168,7 @@ fn default_chainfire_namespace() -> String {
}
fn default_cluster_namespace() -> String {
"photoncloud".to_string()
"ultracloud".to_string()
}
fn default_heartbeat_timeout() -> u64 {
@ -222,7 +222,7 @@ mod tests {
let config = Config::default();
assert_eq!(config.bind_addr.to_string(), "0.0.0.0:8080");
assert_eq!(config.chainfire.namespace, "deployer");
assert_eq!(config.cluster_namespace, "photoncloud");
assert_eq!(config.cluster_namespace, "ultracloud");
assert!(config.cluster_id.is_none());
assert_eq!(config.heartbeat_timeout_secs, 300);
assert_eq!(
@ -259,7 +259,7 @@ mod tests {
bind_addr = "127.0.0.1:18080"
cluster_id = "cluster-a"
allow_unauthenticated = true
bootstrap_flake_bundle_path = "/tmp/plasmacloud-flake-bundle.tar.gz"
bootstrap_flake_bundle_path = "/tmp/ultracloud-flake-bundle.tar.gz"
[chainfire]
endpoints = ["http://10.0.0.1:2379"]
@ -273,7 +273,7 @@ mod tests {
assert_eq!(config.cluster_id.as_deref(), Some("cluster-a"));
assert_eq!(
config.bootstrap_flake_bundle_path,
Some(PathBuf::from("/tmp/plasmacloud-flake-bundle.tar.gz"))
Some(PathBuf::from("/tmp/ultracloud-flake-bundle.tar.gz"))
);
assert!(config.allow_unauthenticated);
assert_eq!(config.chainfire.namespace, "bootstrap");

View file

@ -79,7 +79,7 @@ pub async fn run(config: Config) -> anyhow::Result<()> {
if state.config.cluster_id.is_none() {
tracing::warn!(
"cluster_id not set; cluster node state won't be written to photoncloud/clusters"
"cluster_id not set; cluster node state won't be written to ultracloud/clusters"
);
}

View file

@ -19,7 +19,7 @@ use deployer_types::{NodeConfig, NodeInfo};
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
struct LocalState {
machine_configs: HashMap<String, (String, NodeConfig)>,
machine_configs: HashMap<String, NodeConfig>,
nodes: HashMap<String, NodeInfo>,
cluster_nodes: HashMap<String, ClusterNodeRecord>,
ssh_host_keys: HashMap<String, String>,
@ -62,29 +62,23 @@ impl LocalStorage {
Ok(Self { state_path, state })
}
pub fn register_node(
&mut self,
machine_id: &str,
node_id: &str,
config: &NodeConfig,
) -> Result<()> {
if let Some((existing_id, _)) = self.state.machine_configs.get(machine_id) {
if existing_id != node_id {
pub fn register_node(&mut self, machine_id: &str, config: &NodeConfig) -> Result<()> {
if let Some(existing) = self.state.machine_configs.get(machine_id) {
if existing.assignment.node_id != config.assignment.node_id {
anyhow::bail!(
"machine_id {} already mapped to {}",
machine_id,
existing_id
existing.assignment.node_id
);
}
}
self.state.machine_configs.insert(
machine_id.to_string(),
(node_id.to_string(), config.clone()),
);
self.state
.machine_configs
.insert(machine_id.to_string(), config.clone());
self.save()
}
pub fn get_node_config(&self, machine_id: &str) -> Option<(String, NodeConfig)> {
pub fn get_node_config(&self, machine_id: &str) -> Option<NodeConfig> {
self.state.machine_configs.get(machine_id).cloned()
}
@ -103,13 +97,11 @@ impl LocalStorage {
self.state.nodes.values().cloned().collect()
}
pub fn list_machine_configs(&self) -> Vec<(String, String, NodeConfig)> {
pub fn list_machine_configs(&self) -> Vec<(String, NodeConfig)> {
self.state
.machine_configs
.iter()
.map(|(machine_id, (node_id, config))| {
(machine_id.clone(), node_id.clone(), config.clone())
})
.map(|(machine_id, config)| (machine_id.clone(), config.clone()))
.collect()
}
@ -297,6 +289,7 @@ fn generate_ssh_host_key(node_id: &str, parent: Option<&Path>) -> Result<String>
#[cfg(test)]
mod tests {
use super::*;
use deployer_types::{BootstrapPlan, BootstrapSecrets, NodeAssignment};
use std::collections::HashMap;
use std::fs;
@ -315,22 +308,27 @@ mod tests {
let dir = temp_state_dir();
let mut storage = LocalStorage::open(&dir).expect("open storage");
let config = NodeConfig {
let config = NodeConfig::from_parts(
NodeAssignment {
node_id: "node01".to_string(),
hostname: "node01".to_string(),
role: "control-plane".to_string(),
ip: "10.0.1.10".to_string(),
services: vec!["chainfire".to_string()],
ssh_authorized_keys: vec![],
labels: HashMap::new(),
pool: None,
node_class: None,
failure_domain: None,
},
BootstrapPlan {
services: vec!["chainfire".to_string()],
nix_profile: None,
install_plan: None,
};
},
BootstrapSecrets::default(),
);
storage
.register_node("machine-1", "node01", &config)
.register_node("machine-1", &config)
.expect("register node");
let node_info = NodeInfo {
@ -351,8 +349,8 @@ mod tests {
let reopened = LocalStorage::open(&dir).expect("reopen storage");
let loaded = reopened.get_node_config("machine-1");
assert!(loaded.is_some());
let (_, loaded_config) = loaded.unwrap();
assert_eq!(loaded_config.hostname, "node01");
let loaded_config = loaded.unwrap();
assert_eq!(loaded_config.assignment.hostname, "node01");
let loaded_node = reopened.get_node_info("node01").expect("node info");
assert_eq!(loaded_node.hostname, "node01");

View file

@ -1,11 +1,12 @@
use axum::{extract::State, http::HeaderMap, http::StatusCode, Json};
use chrono::Utc;
use deployer_types::{
CommissionState, EnrollmentRuleSpec, HardwareFacts, InstallPlan, InstallState,
NodeClassSpec, NodeConfig, NodeInfo, NodePoolSpec, NodeState, PhoneHomeRequest,
PhoneHomeResponse, PowerState,
BootstrapPlan, BootstrapSecrets, CommissionState, EnrollmentRuleSpec, HardwareFacts,
InstallPlan, InstallState, NodeAssignment, NodeClassSpec, NodeConfig, NodeInfo, NodePoolSpec,
NodeState, PhoneHomeRequest, PhoneHomeResponse, PowerState,
};
use sha2::{Digest, Sha256};
use std::collections::HashMap;
use std::sync::Arc;
use tracing::{debug, error, info, warn};
@ -22,7 +23,7 @@ fn merge_install_plan(
}
fn merge_hardware_summary_metadata(
metadata: &mut std::collections::HashMap<String, String>,
metadata: &mut HashMap<String, String>,
hardware_facts: Option<&HardwareFacts>,
) {
let Some(hardware_facts) = hardware_facts else {
@ -36,7 +37,10 @@ fn merge_hardware_summary_metadata(
metadata.insert("hardware.cpu_cores".to_string(), cpu_cores.to_string());
}
if let Some(memory_bytes) = hardware_facts.memory_bytes {
metadata.insert("hardware.memory_bytes".to_string(), memory_bytes.to_string());
metadata.insert(
"hardware.memory_bytes".to_string(),
memory_bytes.to_string(),
);
}
metadata.insert(
"hardware.disk_count".to_string(),
@ -47,7 +51,10 @@ fn merge_hardware_summary_metadata(
hardware_facts.nics.len().to_string(),
);
if let Some(architecture) = hardware_facts.architecture.as_deref() {
metadata.insert("hardware.architecture".to_string(), architecture.to_string());
metadata.insert(
"hardware.architecture".to_string(),
architecture.to_string(),
);
}
}
@ -60,14 +67,6 @@ fn inventory_hash(hardware_facts: Option<&HardwareFacts>) -> Option<String> {
}
/// POST /api/v1/phone-home
///
/// Handles node registration during first boot.
/// Nodes send their machine-id, and Deployer returns:
/// - Node configuration (hostname, role, IP, services)
/// - SSH host key
/// - TLS certificates (optional)
///
/// Uses ChainFire storage when available, falls back to in-memory.
pub async fn phone_home(
State(state): State<Arc<AppState>>,
headers: HeaderMap,
@ -87,18 +86,18 @@ pub async fn phone_home(
"Phone home request received"
);
// Lookup node configuration (ChainFire or fallback)
let (node_id, mut node_config) = match lookup_node_config(&state, &request.machine_id).await {
Some((id, config)) => (id, config),
None => {
if let Some((id, config)) = resolve_enrollment_config(&state, &request).await? {
let mut node_config = match lookup_node_config(&state, &request.machine_id).await {
Some(config) => config,
None => match resolve_enrollment_config(&state, &request).await? {
Some(config) => {
info!(
machine_id = %request.machine_id,
node_id = %id,
node_id = %config.assignment.node_id,
"Resolved unknown machine through enrollment rules"
);
(id, config)
} else {
config
}
None => {
if !state.config.allow_unknown_nodes {
warn!(
machine_id = %request.machine_id,
@ -114,8 +113,151 @@ pub async fn phone_home(
machine_id = %request.machine_id,
"Unknown machine-id, assigning default configuration (unsafe)"
);
// Assign default configuration for unknown machines (dev-only).
// Prefer explicit node_id, then DHCP-provided hostname, then machine-id suffix.
default_unknown_node_config(&request)
}
},
};
if let Some(requested_id) = request.node_id.as_ref() {
if requested_id != &node_config.assignment.node_id {
warn!(
machine_id = %request.machine_id,
requested_id = %requested_id,
expected_id = %node_config.assignment.node_id,
"Node ID mismatch in phone-home"
);
return Err((StatusCode::BAD_REQUEST, "node_id mismatch".to_string()));
}
}
if node_config.assignment.hostname.is_empty() {
node_config.assignment.hostname = request
.hostname
.clone()
.filter(|value| !value.trim().is_empty())
.unwrap_or_else(|| node_config.assignment.node_id.clone());
}
if let Some(request_ip) = request.ip.as_ref() {
if !node_config.assignment.ip.is_empty() && node_config.assignment.ip != *request_ip {
warn!(
machine_id = %request.machine_id,
requested_ip = %request_ip,
expected_ip = %node_config.assignment.ip,
"Node IP mismatch in phone-home"
);
return Err((StatusCode::BAD_REQUEST, "node ip mismatch".to_string()));
}
}
if node_config.assignment.ip.is_empty() {
if let Some(ip) = request.ip.clone() {
node_config.assignment.ip = ip;
} else {
warn!(
machine_id = %request.machine_id,
node_id = %node_config.assignment.node_id,
"Node config missing IP; refusing registration"
);
return Err((StatusCode::BAD_REQUEST, "node ip missing".to_string()));
}
}
validate_ip(&node_config.assignment.ip, "node_config.assignment.ip")?;
let mut metadata = request.metadata.clone();
metadata.insert("role".to_string(), node_config.assignment.role.clone());
metadata.insert(
"services".to_string(),
node_config.bootstrap_plan.services.join(","),
);
merge_hardware_summary_metadata(&mut metadata, request.hardware_facts.as_ref());
let node_info = NodeInfo {
id: node_config.assignment.node_id.clone(),
machine_id: Some(request.machine_id.clone()),
hostname: node_config.assignment.hostname.clone(),
ip: node_config.assignment.ip.clone(),
state: NodeState::Provisioning,
cluster_config_hash: request.cluster_config_hash.unwrap_or_default(),
last_heartbeat: Utc::now(),
metadata,
};
let mut response_config = node_config.clone();
response_config.bootstrap_secrets.ssh_host_key =
get_or_issue_ssh_host_key(&state, &node_info.id).await;
let (tls_cert, tls_key) =
get_or_issue_tls_material(&state, &node_info.id, &node_info.hostname, &node_info.ip).await;
response_config.bootstrap_secrets.tls_cert = tls_cert;
response_config.bootstrap_secrets.tls_key = tls_key;
if let Err(e) = persist_node_config(&state, &request.machine_id, &response_config).await {
warn!(
machine_id = %request.machine_id,
node_id = %node_info.id,
error = %e,
"Failed to persist node configuration"
);
}
match store_node_info(&state, &node_info).await {
Ok(_) => {
let storage = if state.has_local_storage() {
"local"
} else if state.has_storage() {
"chainfire"
} else {
"in-memory"
};
info!(
node_id = %node_info.id,
hostname = %node_info.hostname,
role = %response_config.assignment.role,
storage = storage,
"Node registered successfully"
);
if let Err(e) = store_cluster_node_if_configured(
&state,
&node_info,
&response_config,
&request.machine_id,
request.hardware_facts.as_ref(),
)
.await
{
warn!(
node_id = %node_info.id,
error = %e,
"Failed to store cluster node state"
);
}
Ok(Json(PhoneHomeResponse {
success: true,
message: Some(format!("Node {} registered successfully", node_info.id)),
state: NodeState::Provisioning,
node_config: response_config,
}))
}
Err(e) => {
error!(
machine_id = %request.machine_id,
error = %e,
"Failed to store node info"
);
Err((
StatusCode::INTERNAL_SERVER_ERROR,
format!("Failed to register node: {}", e),
))
}
}
}
fn default_unknown_node_config(request: &PhoneHomeRequest) -> NodeConfig {
let node_id = request
.node_id
.as_ref()
@ -135,156 +277,60 @@ pub async fn phone_home(
let suffix_len = std::cmp::min(max_suffix_len, request.machine_id.len());
format!("node-{}", &request.machine_id[..suffix_len])
});
let config = NodeConfig {
hostname: node_id.clone(),
NodeConfig::from_parts(
NodeAssignment {
node_id: node_id.clone(),
hostname: request
.hostname
.clone()
.filter(|value| !value.trim().is_empty())
.unwrap_or_else(|| node_id.clone()),
role: "worker".to_string(),
ip: request.ip.clone().unwrap_or_default(),
services: vec![],
ssh_authorized_keys: vec![],
labels: std::collections::HashMap::new(),
labels: HashMap::new(),
pool: None,
node_class: None,
failure_domain: request.metadata.get("failure_domain").cloned(),
nix_profile: None,
install_plan: None,
};
(node_id, config)
}
}
};
if let Some(request_ip) = request.ip.as_ref() {
if !node_config.ip.is_empty() && node_config.ip != *request_ip {
warn!(
machine_id = %request.machine_id,
requested_ip = %request_ip,
expected_ip = %node_config.ip,
"Node IP mismatch in phone-home"
);
return Err((StatusCode::BAD_REQUEST, "node ip mismatch".to_string()));
}
}
if let Some(requested_id) = request.node_id.as_ref() {
if requested_id != &node_id {
warn!(
machine_id = %request.machine_id,
requested_id = %requested_id,
expected_id = %node_id,
"Node ID mismatch in phone-home"
);
return Err((StatusCode::BAD_REQUEST, "node_id mismatch".to_string()));
}
}
if node_config.hostname.is_empty() {
if let Some(hostname) = request.hostname.as_ref() {
node_config.hostname = hostname.clone();
} else {
node_config.hostname = node_id.clone();
}
}
if node_config.ip.is_empty() {
if let Some(ip) = request.ip.clone() {
node_config.ip = ip;
} else {
warn!(
machine_id = %request.machine_id,
node_id = %node_id,
"Node config missing IP; refusing registration"
);
return Err((StatusCode::BAD_REQUEST, "node ip missing".to_string()));
}
}
validate_ip(&node_config.ip, "node_config.ip")?;
// Ensure metadata contains authoritative role/service info
let mut metadata = request.metadata.clone();
metadata.insert("role".to_string(), node_config.role.clone());
metadata.insert("services".to_string(), node_config.services.join(","));
merge_hardware_summary_metadata(&mut metadata, request.hardware_facts.as_ref());
// Create NodeInfo for tracking
let node_info = NodeInfo {
id: node_id.clone(),
machine_id: Some(request.machine_id.clone()),
hostname: node_config.hostname.clone(),
ip: node_config.ip.clone(),
state: NodeState::Provisioning,
cluster_config_hash: request.cluster_config_hash.unwrap_or_default(),
last_heartbeat: Utc::now(),
metadata,
};
// Persist config mapping for this machine (best-effort)
if let Err(e) = persist_node_config(&state, &request.machine_id, &node_id, &node_config).await {
warn!(
machine_id = %request.machine_id,
node_id = %node_id,
error = %e,
"Failed to persist node configuration"
);
}
// Store in ChainFire or in-memory
match store_node_info(&state, &node_info).await {
Ok(_) => {
let storage = if state.has_local_storage() {
"local"
} else if state.has_storage() {
"chainfire"
} else {
"in-memory"
};
info!(
node_id = %node_info.id,
hostname = %node_info.hostname,
role = %node_config.role,
storage = storage,
"Node registered successfully"
);
if let Err(e) = store_cluster_node_if_configured(
&state,
&node_info,
&node_config,
&request.machine_id,
request.hardware_facts.as_ref(),
},
BootstrapPlan::default(),
BootstrapSecrets::default(),
)
.await
{
warn!(
node_id = %node_info.id,
error = %e,
"Failed to store cluster node state"
);
}
let ssh_host_key = if let Some(local_storage) = &state.local_storage {
async fn get_or_issue_ssh_host_key(state: &AppState, node_id: &str) -> Option<String> {
let Some(local_storage) = &state.local_storage else {
return None;
};
let mut storage = local_storage.lock().await;
match storage.get_or_generate_ssh_host_key(&node_info.id) {
match storage.get_or_generate_ssh_host_key(node_id) {
Ok(key) => Some(key),
Err(e) => {
warn!(error = %e, "Failed to generate ssh host key");
None
}
}
} else {
None
};
}
let (tls_cert, tls_key) = if state.config.tls_self_signed
|| (state.config.tls_ca_cert_path.is_some()
&& state.config.tls_ca_key_path.is_some())
async fn get_or_issue_tls_material(
state: &AppState,
node_id: &str,
hostname: &str,
ip: &str,
) -> (Option<String>, Option<String>) {
if !(state.config.tls_self_signed
|| (state.config.tls_ca_cert_path.is_some() && state.config.tls_ca_key_path.is_some()))
{
return (None, None);
}
if let Some(local_storage) = &state.local_storage {
let mut storage = local_storage.lock().await;
match storage.get_or_generate_tls_cert(
&node_info.id,
&node_config.hostname,
&node_config.ip,
node_id,
hostname,
ip,
state.config.tls_ca_cert_path.as_deref(),
state.config.tls_ca_key_path.as_deref(),
) {
@ -296,9 +342,9 @@ pub async fn phone_home(
}
} else {
match crate::tls::issue_node_cert(
&node_info.id,
&node_config.hostname,
&node_config.ip,
node_id,
hostname,
ip,
state.config.tls_ca_cert_path.as_deref(),
state.config.tls_ca_key_path.as_deref(),
) {
@ -309,69 +355,34 @@ pub async fn phone_home(
}
}
}
} else {
(None, None)
};
Ok(Json(PhoneHomeResponse {
success: true,
message: Some(format!("Node {} registered successfully", node_info.id)),
node_id: node_id.clone(),
state: NodeState::Provisioning,
node_config: Some(node_config),
ssh_host_key,
tls_cert,
tls_key,
}))
}
Err(e) => {
error!(
machine_id = %request.machine_id,
error = %e,
"Failed to store node info"
);
Err((
StatusCode::INTERNAL_SERVER_ERROR,
format!("Failed to register node: {}", e),
))
}
}
}
/// Lookup node configuration by machine-id
///
/// Tries ChainFire first, then falls back to in-memory storage.
pub(crate) async fn lookup_node_config(
state: &AppState,
machine_id: &str,
) -> Option<(String, NodeConfig)> {
/// Lookup node configuration by machine-id.
pub(crate) async fn lookup_node_config(state: &AppState, machine_id: &str) -> Option<NodeConfig> {
debug!(machine_id = %machine_id, "Looking up node configuration");
// Try local storage first
if let Some(local_storage) = &state.local_storage {
let storage = local_storage.lock().await;
if let Some((node_id, config)) = storage.get_node_config(machine_id) {
if let Some(config) = storage.get_node_config(machine_id) {
debug!(
machine_id = %machine_id,
node_id = %node_id,
node_id = %config.assignment.node_id,
"Found config in local storage"
);
return Some((node_id, config));
return Some(config);
}
}
// Try ChainFire storage first
if let Some(storage_mutex) = &state.storage {
let mut storage = storage_mutex.lock().await;
match storage.get_node_config(machine_id).await {
Ok(Some((node_id, config))) => {
Ok(Some(config)) => {
debug!(
machine_id = %machine_id,
node_id = %node_id,
node_id = %config.assignment.node_id,
"Found config in ChainFire"
);
return Some((node_id, config));
return Some(config);
}
Ok(None) => {
debug!(machine_id = %machine_id, "Not found in ChainFire");
@ -386,54 +397,56 @@ pub(crate) async fn lookup_node_config(
}
}
// Fallback to in-memory storage
let configs = state.machine_configs.read().await;
if let Some((node_id, config)) = configs.get(machine_id) {
if let Some(config) = configs.get(machine_id) {
debug!(
machine_id = %machine_id,
node_id = %node_id,
node_id = %config.assignment.node_id,
"Found config in in-memory storage"
);
return Some((node_id.clone(), config.clone()));
return Some(config.clone());
}
// Hardcoded test mappings (for development/testing)
if state.config.allow_test_mappings {
match machine_id {
"test-machine-01" => {
return Some((
"node01".to_string(),
NodeConfig {
return Some(NodeConfig::from_parts(
NodeAssignment {
node_id: "node01".to_string(),
hostname: "node01".to_string(),
role: "control-plane".to_string(),
ip: "10.0.1.10".to_string(),
services: vec!["chainfire".to_string(), "flaredb".to_string()],
ssh_authorized_keys: vec![],
labels: std::collections::HashMap::new(),
labels: HashMap::new(),
pool: None,
node_class: None,
failure_domain: None,
},
BootstrapPlan {
services: vec!["chainfire".to_string(), "flaredb".to_string()],
nix_profile: None,
install_plan: None,
},
BootstrapSecrets::default(),
));
}
"test-machine-02" => {
return Some((
"node02".to_string(),
NodeConfig {
return Some(NodeConfig::from_parts(
NodeAssignment {
node_id: "node02".to_string(),
hostname: "node02".to_string(),
role: "worker".to_string(),
ip: "10.0.1.11".to_string(),
services: vec!["chainfire".to_string()],
ssh_authorized_keys: vec![],
labels: std::collections::HashMap::new(),
labels: HashMap::new(),
pool: None,
node_class: None,
failure_domain: None,
},
BootstrapPlan {
services: vec!["chainfire".to_string()],
nix_profile: None,
install_plan: None,
},
BootstrapSecrets::default(),
));
}
_ => {}
@ -446,7 +459,7 @@ pub(crate) async fn lookup_node_config(
async fn resolve_enrollment_config(
state: &AppState,
request: &PhoneHomeRequest,
) -> Result<Option<(String, NodeConfig)>, (StatusCode, String)> {
) -> Result<Option<NodeConfig>, (StatusCode, String)> {
let Some(cluster_id) = state.config.cluster_id.as_deref() else {
return Ok(None);
};
@ -547,7 +560,7 @@ fn build_node_config_from_rule(
request: &PhoneHomeRequest,
node_classes: &[NodeClassSpec],
pools: &[NodePoolSpec],
) -> (String, NodeConfig) {
) -> NodeConfig {
let requested_id = request
.node_id
.as_ref()
@ -589,7 +602,7 @@ fn build_node_config_from_rule(
.or_else(|| node_class_spec.and_then(|node_class| node_class.roles.first().cloned()))
.unwrap_or_else(|| "worker".to_string());
let mut labels = std::collections::HashMap::new();
let mut labels = HashMap::new();
if let Some(node_class) = node_class_spec {
labels.extend(node_class.labels.clone());
}
@ -618,9 +631,9 @@ fn build_node_config_from_rule(
.cloned()
.or_else(|| request.metadata.get("topology.kubernetes.io/zone").cloned());
(
node_id.clone(),
NodeConfig {
NodeConfig::from_parts(
NodeAssignment {
node_id: node_id.clone(),
hostname: request
.hostname
.clone()
@ -628,12 +641,13 @@ fn build_node_config_from_rule(
.unwrap_or_else(|| node_id.clone()),
role,
ip: request.ip.clone().unwrap_or_default(),
services: rule.services.clone(),
ssh_authorized_keys: rule.ssh_authorized_keys.clone(),
labels,
pool,
node_class,
failure_domain,
},
BootstrapPlan {
services: rule.services.clone(),
nix_profile: rule
.nix_profile
.clone()
@ -643,14 +657,18 @@ fn build_node_config_from_rule(
node_class_spec.and_then(|node_class| node_class.install_plan.as_ref()),
),
},
BootstrapSecrets {
ssh_authorized_keys: rule.ssh_authorized_keys.clone(),
ssh_host_key: None,
tls_cert: None,
tls_key: None,
},
)
}
/// Store NodeInfo in ChainFire or in-memory
async fn store_node_info(state: &AppState, node_info: &NodeInfo) -> anyhow::Result<()> {
let mut stored = false;
// Prefer local storage when configured.
if let Some(local_storage) = &state.local_storage {
let mut storage = local_storage.lock().await;
match storage.store_node_info(node_info) {
@ -664,7 +682,6 @@ async fn store_node_info(state: &AppState, node_info: &NodeInfo) -> anyhow::Resu
}
}
// Also try ChainFire if available.
if let Some(storage_mutex) = &state.storage {
let mut chainfire = storage_mutex.lock().await;
match chainfire.store_node_info(node_info).await {
@ -682,7 +699,6 @@ async fn store_node_info(state: &AppState, node_info: &NodeInfo) -> anyhow::Resu
return Ok(());
}
// Fallback to in-memory storage when all configured backends fail.
state
.nodes
.write()
@ -697,19 +713,17 @@ async fn store_node_info(state: &AppState, node_info: &NodeInfo) -> anyhow::Resu
Ok(())
}
/// Persist node config mapping in ChainFire and in-memory fallback
async fn persist_node_config(
state: &AppState,
machine_id: &str,
node_id: &str,
config: &NodeConfig,
) -> anyhow::Result<()> {
if let Some(local_storage) = &state.local_storage {
let mut storage = local_storage.lock().await;
if let Err(e) = storage.register_node(machine_id, node_id, config) {
if let Err(e) = storage.register_node(machine_id, config) {
warn!(
machine_id = %machine_id,
node_id = %node_id,
node_id = %config.assignment.node_id,
error = %e,
"Failed to persist node config to local storage"
);
@ -718,35 +732,29 @@ async fn persist_node_config(
if let Some(storage_mutex) = &state.storage {
let mut storage = storage_mutex.lock().await;
if let Err(e) = storage.register_node(machine_id, node_id, config).await {
if let Err(e) = storage.register_node(machine_id, config).await {
warn!(
machine_id = %machine_id,
node_id = %node_id,
node_id = %config.assignment.node_id,
error = %e,
"Failed to persist node config to ChainFire"
);
}
}
// Keep in-memory mapping in sync as a fallback cache
{
let mut map = state.machine_configs.write().await;
if let Some((existing_node, _)) = map.get(machine_id) {
if existing_node != node_id {
if let Some(existing) = map.get(machine_id) {
if existing.assignment.node_id != config.assignment.node_id {
warn!(
machine_id = %machine_id,
existing_node = %existing_node,
requested_node = %node_id,
existing_node = %existing.assignment.node_id,
requested_node = %config.assignment.node_id,
"Skipping in-memory mapping update due to conflict"
);
return Ok(());
}
}
map.insert(
machine_id.to_string(),
(node_id.to_string(), config.clone()),
);
}
map.insert(machine_id.to_string(), config.clone());
Ok(())
}
@ -774,7 +782,7 @@ async fn store_cluster_node_if_configured(
return Ok(());
}
let mut labels = node_config.labels.clone();
let mut labels = node_config.assignment.labels.clone();
for (key, value) in &node_info.metadata {
labels.insert(key.clone(), value.clone());
}
@ -782,8 +790,8 @@ async fn store_cluster_node_if_configured(
labels.remove("services");
let mut roles = Vec::new();
if !node_config.role.trim().is_empty() {
roles.push(node_config.role.clone());
if !node_config.assignment.role.trim().is_empty() {
roles.push(node_config.assignment.role.clone());
} else if let Some(role) = node_info.metadata.get("role") {
if !role.trim().is_empty() {
roles.push(role.clone());
@ -797,15 +805,19 @@ async fn store_cluster_node_if_configured(
hostname: node_info.hostname.clone(),
roles,
labels,
pool: node_config.pool.clone(),
node_class: node_config.node_class.clone(),
failure_domain: node_config.failure_domain.clone(),
nix_profile: node_config.nix_profile.clone(),
install_plan: node_config.install_plan.clone(),
pool: node_config.assignment.pool.clone(),
node_class: node_config.assignment.node_class.clone(),
failure_domain: node_config.assignment.failure_domain.clone(),
nix_profile: node_config.bootstrap_plan.nix_profile.clone(),
install_plan: node_config.bootstrap_plan.install_plan.clone(),
hardware_facts: hardware_facts.cloned(),
state: Some(format!("{:?}", node_info.state).to_lowercase()),
commission_state: hardware_facts.map(|_| CommissionState::Discovered),
install_state: node_config.install_plan.as_ref().map(|_| InstallState::Pending),
install_state: node_config
.bootstrap_plan
.install_plan
.as_ref()
.map(|_| InstallState::Pending),
commissioned_at: None,
last_inventory_hash: inventory_hash(hardware_facts),
power_state: node_info
@ -852,7 +864,6 @@ mod tests {
use crate::config::Config;
use crate::state::AppState;
use axum::http::HeaderMap;
use std::collections::HashMap;
fn test_headers() -> HeaderMap {
let mut headers = HeaderMap::new();
@ -866,27 +877,39 @@ mod tests {
Arc::new(AppState::with_config(config))
}
#[tokio::test]
async fn test_phone_home_known_machine() {
let state = test_state();
// Pre-register a machine
let config = NodeConfig {
hostname: "node01".to_string(),
role: "control-plane".to_string(),
ip: "10.0.1.10".to_string(),
services: vec!["chainfire".to_string(), "flaredb".to_string()],
ssh_authorized_keys: vec![],
fn test_node_config(node_id: &str, role: &str, ip: &str, services: Vec<&str>) -> NodeConfig {
NodeConfig::from_parts(
NodeAssignment {
node_id: node_id.to_string(),
hostname: node_id.to_string(),
role: role.to_string(),
ip: ip.to_string(),
labels: HashMap::new(),
pool: None,
node_class: None,
failure_domain: None,
},
BootstrapPlan {
services: services.into_iter().map(str::to_string).collect(),
nix_profile: None,
install_plan: None,
};
},
BootstrapSecrets::default(),
)
}
#[tokio::test]
async fn test_phone_home_known_machine() {
let state = test_state();
state.machine_configs.write().await.insert(
"test-machine-01".to_string(),
("node01".to_string(), config),
test_node_config(
"node01",
"control-plane",
"10.0.1.10",
vec!["chainfire", "flaredb"],
),
);
let request = PhoneHomeRequest {
@ -904,16 +927,14 @@ mod tests {
let response = result.unwrap().0;
assert!(response.success);
assert_eq!(response.node_id, "node01");
assert_eq!(response.state, NodeState::Provisioning);
assert!(response.node_config.is_some());
assert!(response.ssh_host_key.is_none());
assert_eq!(response.node_config.assignment.node_id, "node01");
assert_eq!(response.node_config.assignment.role, "control-plane");
assert_eq!(
response.node_config.bootstrap_plan.services,
vec!["chainfire".to_string(), "flaredb".to_string()]
);
let config = response.node_config.unwrap();
assert_eq!(config.hostname, "node01");
assert_eq!(config.role, "control-plane");
// Verify node was stored
let nodes = state.nodes.read().await;
assert!(nodes.contains_key("node01"));
}
@ -940,35 +961,18 @@ mod tests {
let response = result.unwrap().0;
assert!(response.success);
assert!(response.node_id.starts_with("node-"));
assert_eq!(response.state, NodeState::Provisioning);
assert!(response.node_config.is_some());
let config = response.node_config.unwrap();
assert_eq!(config.role, "worker"); // Default role
assert!(response.node_config.assignment.node_id.starts_with("node-"));
assert_eq!(response.node_config.assignment.role, "worker");
}
#[tokio::test]
async fn test_phone_home_with_preregistered_config() {
let state = test_state();
// Pre-register a machine
let config = NodeConfig {
hostname: "my-node".to_string(),
role: "storage".to_string(),
ip: "10.0.2.50".to_string(),
services: vec!["lightningstor".to_string()],
ssh_authorized_keys: vec![],
labels: HashMap::new(),
pool: None,
node_class: None,
failure_domain: None,
nix_profile: None,
install_plan: None,
};
state.machine_configs.write().await.insert(
"preregistered-123".to_string(),
("my-node".to_string(), config),
test_node_config("my-node", "storage", "10.0.2.50", vec!["lightningstor"]),
);
let request = PhoneHomeRequest {
@ -986,11 +990,9 @@ mod tests {
let response = result.unwrap().0;
assert!(response.success);
assert_eq!(response.node_id, "my-node");
let config = response.node_config.unwrap();
assert_eq!(config.role, "storage");
assert_eq!(config.ip, "10.0.2.50");
assert_eq!(response.node_config.assignment.node_id, "my-node");
assert_eq!(response.node_config.assignment.role, "storage");
assert_eq!(response.node_config.assignment.ip, "10.0.2.50");
}
#[test]
@ -1077,14 +1079,18 @@ mod tests {
labels: HashMap::from([("pool-kind".to_string(), "accelerated".to_string())]),
}];
let (node_id, config) = build_node_config_from_rule(&rule, &request, &node_classes, &pools);
let config = build_node_config_from_rule(&rule, &request, &node_classes, &pools);
assert_eq!(node_id, "gpu-dyn-01");
assert_eq!(config.role, "worker");
assert_eq!(config.pool.as_deref(), Some("gpu"));
assert_eq!(config.node_class.as_deref(), Some("gpu-worker"));
assert_eq!(config.nix_profile.as_deref(), Some("profiles/gpu-worker"));
assert_eq!(config.assignment.node_id, "gpu-dyn-01");
assert_eq!(config.assignment.role, "worker");
assert_eq!(config.assignment.pool.as_deref(), Some("gpu"));
assert_eq!(config.assignment.node_class.as_deref(), Some("gpu-worker"));
assert_eq!(
config.bootstrap_plan.nix_profile.as_deref(),
Some("profiles/gpu-worker")
);
let install_plan = config
.bootstrap_plan
.install_plan
.expect("install_plan should inherit from class");
assert_eq!(
@ -1095,15 +1101,30 @@ mod tests {
install_plan.disko_config_path.as_deref(),
Some("profiles/gpu-worker/disko.nix")
);
assert_eq!(config.labels.get("tier").map(String::as_str), Some("gpu"));
assert_eq!(
config.labels.get("pool-kind").map(String::as_str),
config.assignment.labels.get("tier").map(String::as_str),
Some("gpu")
);
assert_eq!(
config
.assignment
.labels
.get("pool-kind")
.map(String::as_str),
Some("accelerated")
);
assert_eq!(
config.labels.get("accelerator").map(String::as_str),
config
.assignment
.labels
.get("accelerator")
.map(String::as_str),
Some("nvidia")
);
assert_eq!(config.failure_domain.as_deref(), Some("rack-z"));
assert_eq!(config.assignment.failure_domain.as_deref(), Some("rack-z"));
assert_eq!(
config.bootstrap_secrets.ssh_authorized_keys,
vec!["ssh-ed25519 test".to_string()]
);
}
}

View file

@ -22,8 +22,8 @@ pub struct AppState {
/// Key: node_id, Value: NodeInfo
pub nodes: RwLock<HashMap<String, NodeInfo>>,
/// Fallback in-memory machine_id → (node_id, NodeConfig) mapping
pub machine_configs: RwLock<HashMap<String, (String, deployer_types::NodeConfig)>>,
/// Fallback in-memory machine_id → bootstrap node config mapping
pub machine_configs: RwLock<HashMap<String, deployer_types::NodeConfig>>,
}
impl AppState {

View file

@ -7,7 +7,6 @@ use chainfire_client::Client as ChainFireClient;
use deployer_types::{EnrollmentRuleSpec, NodeClassSpec, NodeConfig, NodeInfo, NodePoolSpec};
use serde::de::DeserializeOwned;
use serde::Serialize;
use std::collections::HashMap;
use thiserror::Error;
use tracing::{debug, error, warn};
@ -63,11 +62,6 @@ impl NodeStorage {
format!("{}/nodes/info/{}", self.namespace, node_id)
}
/// Key for machine_id → node_id mapping
fn mapping_key(&self, machine_id: &str) -> String {
format!("{}/nodes/mapping/{}", self.namespace, machine_id)
}
fn cluster_node_key(&self, cluster_namespace: &str, cluster_id: &str, node_id: &str) -> String {
format!(
"{}/clusters/{}/nodes/{}",
@ -118,81 +112,49 @@ impl NodeStorage {
pub async fn register_node(
&mut self,
machine_id: &str,
node_id: &str,
config: &NodeConfig,
) -> Result<(), StorageError> {
let config_key = self.config_key(machine_id);
let mapping_key = self.mapping_key(machine_id);
let config_json = serde_json::to_vec(config)?;
if let Some(existing) = self.client.get(&mapping_key).await? {
let existing_node = String::from_utf8_lossy(&existing).to_string();
if existing_node != node_id {
if let Some(existing) = self.client.get(&config_key).await? {
let existing_config: NodeConfig = serde_json::from_slice(&existing)?;
if existing_config.assignment.node_id != config.assignment.node_id {
return Err(StorageError::Conflict(format!(
"machine_id {} already mapped to {}",
machine_id, existing_node
machine_id, existing_config.assignment.node_id
)));
}
}
debug!(
machine_id = %machine_id,
node_id = %node_id,
node_id = %config.assignment.node_id,
key = %config_key,
"Registering node config in ChainFire"
);
// Store config
self.client.put(&config_key, &config_json).await?;
// Store machine_id → node_id mapping
self.client.put(&mapping_key, node_id.as_bytes()).await?;
Ok(())
}
/// Lookup node_id mapping by machine_id
pub async fn get_node_mapping(
&mut self,
machine_id: &str,
) -> Result<Option<String>, StorageError> {
let mapping_key = self.mapping_key(machine_id);
match self.client.get(&mapping_key).await? {
Some(bytes) => Ok(Some(String::from_utf8_lossy(&bytes).to_string())),
None => Ok(None),
}
}
/// Lookup node config by machine_id
pub async fn get_node_config(
&mut self,
machine_id: &str,
) -> Result<Option<(String, NodeConfig)>, StorageError> {
) -> Result<Option<NodeConfig>, StorageError> {
let config_key = self.config_key(machine_id);
let mapping_key = self.mapping_key(machine_id);
debug!(machine_id = %machine_id, key = %config_key, "Looking up node config");
// Get node_id mapping
let node_id = match self.client.get(&mapping_key).await? {
Some(bytes) => String::from_utf8_lossy(&bytes).to_string(),
None => {
debug!(machine_id = %machine_id, "No mapping found");
return Ok(None);
}
};
// Get config
match self.client.get(&config_key).await? {
Some(bytes) => {
let config: NodeConfig = serde_json::from_slice(&bytes)?;
Ok(Some((node_id, config)))
Ok(Some(config))
}
None => {
warn!(
machine_id = %machine_id,
"Mapping exists but config not found"
);
debug!(machine_id = %machine_id, "No config found");
Ok(None)
}
}
@ -213,7 +175,7 @@ impl NodeStorage {
Ok(())
}
/// Store cluster node state under photoncloud/clusters/{cluster_id}/nodes/{node_id}
/// Store cluster node state under ultracloud/clusters/{cluster_id}/nodes/{node_id}
pub async fn store_cluster_node<T: Serialize>(
&mut self,
cluster_namespace: &str,
@ -234,7 +196,7 @@ impl NodeStorage {
Ok(())
}
/// List cluster nodes under photoncloud/clusters/{cluster_id}/nodes/
/// List cluster nodes under ultracloud/clusters/{cluster_id}/nodes/
pub async fn list_cluster_nodes(
&mut self,
cluster_namespace: &str,
@ -298,43 +260,6 @@ impl NodeStorage {
}
}
/// Pre-register a machine mapping (admin API)
///
/// This allows administrators to pre-configure node assignments
/// before machines boot and phone home.
pub async fn pre_register(
&mut self,
machine_id: &str,
node_id: &str,
role: &str,
ip: Option<&str>,
services: Vec<String>,
ssh_authorized_keys: Vec<String>,
) -> Result<(), StorageError> {
let config = NodeConfig {
hostname: node_id.to_string(),
role: role.to_string(),
ip: ip.unwrap_or("").to_string(),
services,
ssh_authorized_keys,
labels: HashMap::new(),
pool: None,
node_class: None,
failure_domain: None,
nix_profile: None,
install_plan: None,
};
debug!(
machine_id = %machine_id,
node_id = %node_id,
role = %role,
"Pre-registering node"
);
self.register_node(machine_id, node_id, &config).await
}
/// List all registered nodes
pub async fn list_nodes(&mut self) -> Result<Vec<NodeInfo>, StorageError> {
let prefix = format!("{}/nodes/info/", self.namespace);
@ -354,44 +279,24 @@ impl NodeStorage {
Ok(nodes)
}
/// List all pre-registered machine configs (machine_id -> node_id, config)
/// List all pre-registered machine configs (machine_id -> config)
pub async fn list_machine_configs(
&mut self,
) -> Result<Vec<(String, String, NodeConfig)>, StorageError> {
) -> Result<Vec<(String, NodeConfig)>, StorageError> {
let config_prefix = format!("{}/nodes/config/", self.namespace);
let mapping_prefix = format!("{}/nodes/mapping/", self.namespace);
let configs = self.client.get_prefix(&config_prefix).await?;
let mappings = self.client.get_prefix(&mapping_prefix).await?;
let mut config_map: HashMap<String, NodeConfig> = HashMap::new();
let mut results = Vec::new();
for (key, value) in configs {
let key_str = String::from_utf8_lossy(&key);
if let Some(machine_id) = key_str.strip_prefix(&config_prefix) {
if let Ok(config) = serde_json::from_slice::<NodeConfig>(&value) {
config_map.insert(machine_id.to_string(), config);
results.push((machine_id.to_string(), config));
} else {
warn!(key = %key_str, "Failed to deserialize node config");
}
}
}
let mut mappings_map: HashMap<String, String> = HashMap::new();
for (key, value) in mappings {
let key_str = String::from_utf8_lossy(&key);
if let Some(machine_id) = key_str.strip_prefix(&mapping_prefix) {
let node_id = String::from_utf8_lossy(&value).to_string();
mappings_map.insert(machine_id.to_string(), node_id);
}
}
let mut results = Vec::new();
for (machine_id, node_id) in mappings_map {
if let Some(config) = config_map.get(&machine_id) {
results.push((machine_id.clone(), node_id.clone(), config.clone()));
}
}
Ok(results)
}
}
@ -399,6 +304,7 @@ impl NodeStorage {
#[cfg(test)]
mod tests {
use super::*;
use deployer_types::{BootstrapPlan, BootstrapSecrets, NodeAssignment};
// Note: Integration tests require a running ChainFire instance.
// These unit tests verify serialization and key generation.
@ -411,44 +317,50 @@ mod tests {
let node_id = "node01";
let config_key = format!("{}/nodes/config/{}", namespace, machine_id);
let mapping_key = format!("{}/nodes/mapping/{}", namespace, machine_id);
let info_key = format!("{}/nodes/info/{}", namespace, node_id);
assert_eq!(config_key, "deployer/nodes/config/abc123");
assert_eq!(mapping_key, "deployer/nodes/mapping/abc123");
assert_eq!(info_key, "deployer/nodes/info/node01");
let cluster_namespace = "photoncloud";
let cluster_namespace = "ultracloud";
let cluster_id = "cluster-a";
let cluster_key = format!(
"{}/clusters/{}/nodes/{}",
cluster_namespace, cluster_id, node_id
);
assert_eq!(cluster_key, "photoncloud/clusters/cluster-a/nodes/node01");
assert_eq!(cluster_key, "ultracloud/clusters/cluster-a/nodes/node01");
}
#[test]
fn test_node_config_serialization() {
let config = NodeConfig {
let config = NodeConfig::from_parts(
NodeAssignment {
node_id: "node01".to_string(),
hostname: "node01".to_string(),
role: "control-plane".to_string(),
ip: "10.0.1.10".to_string(),
services: vec!["chainfire".to_string(), "flaredb".to_string()],
ssh_authorized_keys: vec![],
labels: HashMap::new(),
labels: std::collections::HashMap::new(),
pool: None,
node_class: None,
failure_domain: None,
},
BootstrapPlan {
services: vec!["chainfire".to_string(), "flaredb".to_string()],
nix_profile: None,
install_plan: None,
};
},
BootstrapSecrets::default(),
);
let json = serde_json::to_vec(&config).unwrap();
let deserialized: NodeConfig = serde_json::from_slice(&json).unwrap();
assert_eq!(deserialized.hostname, "node01");
assert_eq!(deserialized.role, "control-plane");
assert_eq!(deserialized.services.len(), 2);
assert!(deserialized.ssh_authorized_keys.is_empty());
assert_eq!(deserialized.assignment.hostname, "node01");
assert_eq!(deserialized.assignment.role, "control-plane");
assert_eq!(deserialized.bootstrap_plan.services.len(), 2);
assert!(deserialized
.bootstrap_secrets
.ssh_authorized_keys
.is_empty());
}
}

View file

@ -18,7 +18,7 @@ pub fn issue_node_cert(
dns_names.push(hostname.to_string());
}
if dns_names.is_empty() {
dns_names.push("photoncloud-node".to_string());
dns_names.push("ultracloud-node".to_string());
}
let mut params =

View file

@ -149,6 +149,71 @@ impl InstallPlan {
}
}
/// Stable node assignment returned by bootstrap enrollment.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Default)]
pub struct NodeAssignment {
pub node_id: String,
pub hostname: String,
pub role: String,
pub ip: String,
#[serde(default)]
pub labels: HashMap<String, String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub pool: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub node_class: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub failure_domain: Option<String>,
}
/// Bootstrap plan describing how the installer should materialize the node.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Default)]
pub struct BootstrapPlan {
#[serde(default)]
pub services: Vec<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub nix_profile: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub install_plan: Option<InstallPlan>,
}
/// Bootstrap credentials and trust material issued for a node.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Default)]
pub struct BootstrapSecrets {
#[serde(default)]
pub ssh_authorized_keys: Vec<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub ssh_host_key: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tls_cert: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tls_key: Option<String>,
}
/// Canonical bootstrap configuration for a node.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct NodeConfig {
pub assignment: NodeAssignment,
#[serde(default)]
pub bootstrap_plan: BootstrapPlan,
#[serde(default)]
pub bootstrap_secrets: BootstrapSecrets,
}
impl NodeConfig {
pub fn from_parts(
assignment: NodeAssignment,
bootstrap_plan: BootstrapPlan,
bootstrap_secrets: BootstrapSecrets,
) -> Self {
Self {
assignment,
bootstrap_plan,
bootstrap_secrets,
}
}
}
/// Basic inventory record for a physical disk observed during commissioning.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Default)]
pub struct DiskFact {
@ -209,41 +274,6 @@ pub struct HardwareFacts {
pub dmi: Option<DmiFact>,
}
/// Node configuration returned by Deployer
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct NodeConfig {
/// Node hostname
pub hostname: String,
/// Node role (control-plane, worker)
pub role: String,
/// Node IP address
pub ip: String,
/// Services to run on this node
#[serde(default)]
pub services: Vec<String>,
/// SSH authorized keys for bootstrap access
#[serde(default)]
pub ssh_authorized_keys: Vec<String>,
/// Desired labels applied at enrollment time
#[serde(default)]
pub labels: HashMap<String, String>,
/// Optional pool assignment
#[serde(default, skip_serializing_if = "Option::is_none")]
pub pool: Option<String>,
/// Optional node class assignment
#[serde(default, skip_serializing_if = "Option::is_none")]
pub node_class: Option<String>,
/// Optional failure domain
#[serde(default, skip_serializing_if = "Option::is_none")]
pub failure_domain: Option<String>,
/// Optional Nix profile or flake attr to apply after bootstrap
#[serde(default, skip_serializing_if = "Option::is_none")]
pub nix_profile: Option<String>,
/// Optional explicit install plan used by the bootstrap ISO/netboot path.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub install_plan: Option<InstallPlan>,
}
/// Phone Home request payload (machine-id based)
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PhoneHomeRequest {
@ -277,22 +307,10 @@ pub struct PhoneHomeResponse {
/// Human-readable message
#[serde(skip_serializing_if = "Option::is_none")]
pub message: Option<String>,
/// Assigned node identifier
pub node_id: String,
/// Assigned node state
pub state: NodeState,
/// Node configuration (topology, services, etc.)
#[serde(skip_serializing_if = "Option::is_none")]
pub node_config: Option<NodeConfig>,
/// SSH host private key (ed25519)
#[serde(skip_serializing_if = "Option::is_none")]
pub ssh_host_key: Option<String>,
/// TLS certificate for node services
#[serde(skip_serializing_if = "Option::is_none")]
pub tls_cert: Option<String>,
/// TLS private key for node services
#[serde(skip_serializing_if = "Option::is_none")]
pub tls_key: Option<String>,
/// Canonical bootstrap configuration returned by the bootstrap API.
pub node_config: NodeConfig,
}
fn default_max_instances_per_node() -> u32 {
@ -537,7 +555,7 @@ pub struct LoadBalancerPublicationSpec {
pub pool_protocol: Option<String>,
}
/// Desired service publication through PhotonCloud network components.
/// Desired service publication through UltraCloud network components.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Default)]
pub struct ServicePublicationSpec {
#[serde(default)]
@ -581,7 +599,7 @@ impl Default for ServiceDependencySpec {
}
}
/// Cluster node record stored under photoncloud/clusters/{cluster_id}/nodes/{node_id}.
/// Cluster node record stored under ultracloud/clusters/{cluster_id}/nodes/{node_id}.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct ClusterNodeRecord {
pub node_id: String,
@ -679,7 +697,7 @@ pub struct DesiredSystemSpec {
pub drain_before_apply: Option<bool>,
}
/// Cluster metadata (PhotonCloud scope).
/// Cluster metadata (UltraCloud scope).
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct ClusterSpec {
pub cluster_id: String,
@ -993,7 +1011,7 @@ pub fn cluster_node_pool(node: &ClusterNodeRecord) -> Option<&str> {
.or_else(|| node.labels.get("pool").map(String::as_str))
.or_else(|| {
node.labels
.get("pool.photoncloud.io/name")
.get("pool.ultracloud.io/name")
.map(String::as_str)
})
}
@ -1004,7 +1022,7 @@ pub fn cluster_node_class(node: &ClusterNodeRecord) -> Option<&str> {
.or_else(|| node.labels.get("node_class").map(String::as_str))
.or_else(|| {
node.labels
.get("nodeclass.photoncloud.io/name")
.get("nodeclass.ultracloud.io/name")
.map(String::as_str)
})
}
@ -1477,16 +1495,19 @@ mod tests {
#[test]
fn test_phone_home_response_with_secrets() {
let node_config = NodeConfig {
let node_config = NodeConfig::from_parts(
NodeAssignment {
node_id: "node01".to_string(),
hostname: "node01".to_string(),
role: "control-plane".to_string(),
ip: "10.0.1.10".to_string(),
services: vec!["chainfire".to_string(), "flaredb".to_string()],
ssh_authorized_keys: vec![],
labels: HashMap::new(),
pool: None,
node_class: None,
failure_domain: None,
},
BootstrapPlan {
services: vec!["chainfire".to_string(), "flaredb".to_string()],
nix_profile: None,
install_plan: Some(InstallPlan {
nixos_configuration: Some("node01".to_string()),
@ -1494,34 +1515,95 @@ mod tests {
target_disk: Some("/dev/vda".to_string()),
target_disk_by_id: None,
}),
};
},
BootstrapSecrets {
ssh_authorized_keys: vec![],
ssh_host_key: Some("ssh-key-data".to_string()),
tls_cert: None,
tls_key: None,
},
);
let response = PhoneHomeResponse {
success: true,
message: Some("Node registered".to_string()),
node_id: "node01".to_string(),
state: NodeState::Provisioning,
node_config: Some(node_config),
ssh_host_key: Some("ssh-key-data".to_string()),
tls_cert: None,
tls_key: None,
node_config,
};
let json = serde_json::to_string(&response).unwrap();
let deserialized: PhoneHomeResponse = serde_json::from_str(&json).unwrap();
assert_eq!(deserialized.node_id, "node01");
assert_eq!(deserialized.state, NodeState::Provisioning);
assert!(deserialized.node_config.is_some());
assert!(deserialized.ssh_host_key.is_some());
assert_eq!(deserialized.node_config.assignment.node_id, "node01");
assert_eq!(
deserialized
.node_config
.bootstrap_secrets
.ssh_host_key
.as_deref(),
Some("ssh-key-data")
);
let install_plan = deserialized
.node_config
.bootstrap_plan
.install_plan
.as_ref()
.and_then(|config| config.install_plan.as_ref())
.expect("install_plan should round-trip");
assert_eq!(install_plan.nixos_configuration.as_deref(), Some("node01"));
assert_eq!(install_plan.target_disk.as_deref(), Some("/dev/vda"));
}
#[test]
fn test_node_config_roundtrip() {
let config = NodeConfig::from_parts(
NodeAssignment {
node_id: "node02".to_string(),
hostname: "node02".to_string(),
role: "worker".to_string(),
ip: "10.0.1.12".to_string(),
labels: HashMap::from([("tier".to_string(), "general".to_string())]),
pool: Some("general".to_string()),
node_class: Some("worker-linux".to_string()),
failure_domain: Some("rack-b".to_string()),
},
BootstrapPlan {
services: vec!["plasmavmc".to_string()],
nix_profile: Some("profiles/worker-linux".to_string()),
install_plan: Some(InstallPlan {
nixos_configuration: Some("worker-linux".to_string()),
disko_config_path: Some("profiles/worker-linux/disko.nix".to_string()),
target_disk: None,
target_disk_by_id: Some("/dev/disk/by-id/worker-default".to_string()),
}),
},
BootstrapSecrets {
ssh_authorized_keys: vec!["ssh-ed25519 AAAATEST test".to_string()],
ssh_host_key: Some("ssh-host-key".to_string()),
tls_cert: None,
tls_key: None,
},
);
let json = serde_json::to_string(&config).unwrap();
let decoded: NodeConfig = serde_json::from_str(&json).unwrap();
assert_eq!(decoded.assignment.hostname, "node02");
assert_eq!(decoded.assignment.role, "worker");
assert_eq!(decoded.assignment.pool.as_deref(), Some("general"));
assert_eq!(
decoded.bootstrap_plan.nix_profile.as_deref(),
Some("profiles/worker-linux")
);
assert_eq!(
decoded
.bootstrap_plan
.install_plan
.as_ref()
.and_then(|plan| plan.target_disk_by_id.as_deref()),
Some("/dev/disk/by-id/worker-default")
);
assert_eq!(decoded.bootstrap_secrets.ssh_authorized_keys.len(), 1);
}
#[test]
fn test_service_schedule_defaults() {
let schedule = ServiceScheduleSpec::default();
@ -1844,7 +1926,7 @@ mod tests {
let observed = ObservedSystemState {
node_id: "node01".to_string(),
nixos_configuration: Some("node01".to_string()),
flake_root: Some("/opt/plasmacloud-src".to_string()),
flake_root: Some("/opt/ultracloud-src".to_string()),
target_system: Some("/nix/store/system-node01".to_string()),
configured_system: Some("/nix/store/system-node01".to_string()),
current_system: Some("/nix/store/system-old".to_string()),
@ -1872,7 +1954,7 @@ mod tests {
deployment_id: Some("worker-rollout".to_string()),
nixos_configuration: Some("node01".to_string()),
target_system: Some("/nix/store/system-node01".to_string()),
flake_ref: Some("/opt/plasmacloud-src".to_string()),
flake_ref: Some("/opt/ultracloud-src".to_string()),
switch_action: Some("switch".to_string()),
health_check_command: vec!["systemctl".to_string(), "is-system-running".to_string()],
rollback_on_failure: Some(true),
@ -1906,7 +1988,7 @@ mod tests {
},
nixos_configuration: Some("worker-golden".to_string()),
target_system: Some("/nix/store/worker-golden".to_string()),
flake_ref: Some("/opt/plasmacloud-src".to_string()),
flake_ref: Some("/opt/ultracloud-src".to_string()),
batch_size: Some(1),
max_unavailable: Some(1),
health_check_command: vec!["true".to_string()],

View file

@ -46,12 +46,12 @@ fn instances_prefix(cluster_namespace: &str, cluster_id: &str) -> Vec<u8> {
}
#[derive(Debug, Parser)]
#[command(author, version, about = "PhotonCloud non-Kubernetes fleet scheduler")]
#[command(author, version, about = "UltraCloud non-Kubernetes fleet scheduler")]
struct Cli {
#[arg(long, default_value = "http://127.0.0.1:7000")]
chainfire_endpoint: String,
#[arg(long, default_value = "photoncloud")]
#[arg(long, default_value = "ultracloud")]
cluster_namespace: String,
#[arg(long)]
@ -1507,7 +1507,7 @@ mod tests {
fn test_scheduler() -> Scheduler {
Scheduler::new(Cli {
chainfire_endpoint: "http://127.0.0.1:7000".to_string(),
cluster_namespace: "photoncloud".to_string(),
cluster_namespace: "ultracloud".to_string(),
cluster_id: "test-cluster".to_string(),
interval_secs: 1,
heartbeat_timeout_secs: 300,

View file

@ -51,7 +51,7 @@ struct Cli {
#[arg(long, default_value = "http://127.0.0.1:7000")]
chainfire_endpoint: String,
#[arg(long, default_value = "photoncloud")]
#[arg(long, default_value = "ultracloud")]
cluster_namespace: String,
#[arg(long)]
@ -796,7 +796,7 @@ mod tests {
let resolved = resolve_desired_system(
&test_node(),
None,
"/opt/plasmacloud-src",
"/opt/ultracloud-src",
"switch",
&[],
true,
@ -804,7 +804,7 @@ mod tests {
.expect("desired system should resolve");
assert_eq!(resolved.nixos_configuration.as_deref(), Some("node01"));
assert_eq!(resolved.target_system, None);
assert_eq!(resolved.flake_ref, "/opt/plasmacloud-src");
assert_eq!(resolved.flake_ref, "/opt/ultracloud-src");
assert_eq!(resolved.switch_action, "switch");
assert!(resolved.rollback_on_failure);
}
@ -826,7 +826,7 @@ mod tests {
let resolved = resolve_desired_system(
&test_node(),
Some(&desired),
"/opt/plasmacloud-src",
"/opt/ultracloud-src",
"switch",
&[],
false,
@ -856,7 +856,7 @@ mod tests {
let resolved = resolve_desired_system(
&test_node(),
Some(&desired),
"/opt/plasmacloud-src",
"/opt/ultracloud-src",
"switch",
&[],
true,
@ -868,7 +868,7 @@ mod tests {
resolved.target_system.as_deref(),
Some("/nix/store/node01-next")
);
assert_eq!(resolved.flake_ref, "/opt/plasmacloud-src");
assert_eq!(resolved.flake_ref, "/opt/ultracloud-src");
}
#[test]
@ -888,14 +888,14 @@ mod tests {
let resolved = resolve_desired_system(
&test_node(),
Some(&desired),
"/opt/plasmacloud-src",
"/opt/ultracloud-src",
"switch",
&["systemctl".to_string(), "is-system-running".to_string()],
true,
)
.expect("desired system should resolve");
assert_eq!(resolved.flake_ref, "/opt/plasmacloud-src");
assert_eq!(resolved.flake_ref, "/opt/ultracloud-src");
assert_eq!(resolved.switch_action, "switch");
assert_eq!(
resolved.health_check_command,
@ -907,15 +907,15 @@ mod tests {
#[test]
fn target_flake_attr_is_rendered_from_root_and_configuration() {
assert_eq!(
target_flake_attr("/opt/plasmacloud-src", "node01"),
"/opt/plasmacloud-src#nixosConfigurations.node01.config.system.build.toplevel"
target_flake_attr("/opt/ultracloud-src", "node01"),
"/opt/ultracloud-src#nixosConfigurations.node01.config.system.build.toplevel"
);
}
#[test]
fn read_symlink_target_returns_none_for_missing_path() {
assert_eq!(
read_symlink_target("/tmp/photoncloud-nix-agent-missing-link"),
read_symlink_target("/tmp/ultracloud-nix-agent-missing-link"),
None
);
}
@ -925,7 +925,7 @@ mod tests {
let desired = ResolvedDesiredSystem {
nixos_configuration: Some("node01".to_string()),
target_system: None,
flake_ref: "/opt/plasmacloud-src".to_string(),
flake_ref: "/opt/ultracloud-src".to_string(),
switch_action: "boot".to_string(),
health_check_command: vec!["true".to_string()],
rollback_on_failure: true,
@ -948,7 +948,7 @@ mod tests {
let desired = ResolvedDesiredSystem {
nixos_configuration: Some("node01".to_string()),
target_system: None,
flake_ref: "/opt/plasmacloud-src".to_string(),
flake_ref: "/opt/ultracloud-src".to_string(),
switch_action: "boot".to_string(),
health_check_command: vec!["true".to_string()],
rollback_on_failure: true,

View file

@ -316,7 +316,7 @@ impl Agent {
warn!(error = %e, "failed to sync local service instances");
}
} else {
info!("local instance upsert disabled; skipping /etc/photoncloud/instances.json");
info!("local instance upsert disabled; skipping /etc/ultracloud/instances.json");
}
if self.apply {
@ -619,10 +619,10 @@ impl Agent {
Ok(())
}
/// ローカルファイル (/etc/photoncloud/instances.json) から ServiceInstance 定義を読み、
/// Chainfire 上の `photoncloud/clusters/{cluster_id}/instances/{service}/{instance_id}` に upsert する。
/// ローカルファイル (/etc/ultracloud/instances.json) から ServiceInstance 定義を読み、
/// Chainfire 上の `ultracloud/clusters/{cluster_id}/instances/{service}/{instance_id}` に upsert する。
async fn sync_local_instances(&self, client: &mut Client) -> Result<()> {
let path = PathBuf::from("/etc/photoncloud/instances.json");
let path = PathBuf::from("/etc/ultracloud/instances.json");
let contents = match fs::read_to_string(&path) {
Ok(c) => c,
Err(e) => {
@ -1139,14 +1139,14 @@ mod tests {
fn test_agent() -> Agent {
Agent::new(
"http://127.0.0.1:7000".to_string(),
"photoncloud".to_string(),
"ultracloud".to_string(),
"test-cluster".to_string(),
"node01".to_string(),
Duration::from_secs(1),
300,
false,
false,
PathBuf::from("/tmp/photoncloud-node-agent-tests"),
PathBuf::from("/tmp/ultracloud-node-agent-tests"),
)
}

View file

@ -9,9 +9,9 @@ mod agent;
mod process;
mod watcher;
/// PhotonCloud NodeAgent
/// UltraCloud NodeAgent
///
/// - Chainfire 上の `photoncloud/clusters/{cluster_id}/nodes/{node_id}` と
/// - Chainfire 上の `ultracloud/clusters/{cluster_id}/nodes/{node_id}` と
/// `.../instances/*` を watch しつつ、周期 heartbeat/safety reconcile も行う。
/// - `--apply` が指定された場合のみプロセス起動/停止を行う(デフォルトは dry-run
#[derive(Parser, Debug)]
@ -21,11 +21,11 @@ struct Cli {
#[arg(long, default_value = "http://127.0.0.1:7000")]
chainfire_endpoint: String,
/// PhotonCloud cluster namespace (default: photoncloud)
#[arg(long, default_value = "photoncloud")]
/// UltraCloud cluster namespace (default: ultracloud)
#[arg(long, default_value = "ultracloud")]
cluster_namespace: String,
/// PhotonCloud Cluster ID
/// UltraCloud Cluster ID
#[arg(long)]
cluster_id: String,
@ -42,7 +42,7 @@ struct Cli {
heartbeat_timeout_secs: u64,
/// PIDファイル出力ディレクトリ
#[arg(long, default_value = "/var/run/photoncloud")]
#[arg(long, default_value = "/var/run/ultracloud")]
pid_dir: String,
/// Desired State を実際に適用する(プロセス起動/停止、ヘルスチェック更新)

View file

@ -1,5 +1,5 @@
[package]
name = "plasmacloud-reconciler"
name = "ultracloud-reconciler"
version.workspace = true
edition.workspace = true
rust-version.workspace = true

View file

@ -73,7 +73,7 @@ async fn ensure_project_admin_binding(
"roles/ProjectAdmin",
scope,
)
.with_created_by("plasmacloud-reconciler");
.with_created_by("ultracloud-reconciler");
client.create_binding(&binding).await?;
Ok(())
}

View file

@ -72,7 +72,7 @@ pub struct HostsCommand {
#[arg(long)]
pub endpoint: String,
#[arg(long, default_value = "photoncloud")]
#[arg(long, default_value = "ultracloud")]
pub cluster_namespace: String,
#[arg(long)]
@ -1204,7 +1204,7 @@ mod tests {
},
nixos_configuration: Some("worker-golden".to_string()),
target_system: Some("/nix/store/worker-golden".to_string()),
flake_ref: Some("/opt/plasmacloud-src".to_string()),
flake_ref: Some("/opt/ultracloud-src".to_string()),
batch_size: Some(1),
max_unavailable: Some(1),
health_check_command: vec!["true".to_string()],
@ -1219,7 +1219,7 @@ mod tests {
fn test_controller() -> HostDeploymentController {
HostDeploymentController::new(HostsCommand {
endpoint: "http://127.0.0.1:7000".to_string(),
cluster_namespace: "photoncloud".to_string(),
cluster_namespace: "ultracloud".to_string(),
cluster_id: "test-cluster".to_string(),
interval_secs: 1,
heartbeat_timeout_secs: 300,

View file

@ -3,29 +3,29 @@ set -euo pipefail
ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
if [[ -z "${PHOTONCLOUD_E2E_IN_NIX:-}" ]]; then
exec nix develop "$ROOT" -c env PHOTONCLOUD_E2E_IN_NIX=1 bash "$0" "$@"
if [[ -z "${ULTRACLOUD_E2E_IN_NIX:-}" ]]; then
exec nix develop "$ROOT" -c env ULTRACLOUD_E2E_IN_NIX=1 bash "$0" "$@"
fi
run_chainfire_server_bin() {
if [[ -n "${PHOTONCLOUD_CHAINFIRE_SERVER_BIN:-}" ]]; then
"$PHOTONCLOUD_CHAINFIRE_SERVER_BIN" "$@"
if [[ -n "${ULTRACLOUD_CHAINFIRE_SERVER_BIN:-}" ]]; then
"$ULTRACLOUD_CHAINFIRE_SERVER_BIN" "$@"
else
cargo run --manifest-path "$ROOT/chainfire/Cargo.toml" -p chainfire-server -- "$@"
fi
}
run_deployer_server_bin() {
if [[ -n "${PHOTONCLOUD_DEPLOYER_SERVER_BIN:-}" ]]; then
"$PHOTONCLOUD_DEPLOYER_SERVER_BIN" "$@"
if [[ -n "${ULTRACLOUD_DEPLOYER_SERVER_BIN:-}" ]]; then
"$ULTRACLOUD_DEPLOYER_SERVER_BIN" "$@"
else
cargo run --quiet --manifest-path "$ROOT/deployer/Cargo.toml" -p deployer-server -- "$@"
fi
}
run_deployer_ctl_bin() {
if [[ -n "${PHOTONCLOUD_DEPLOYER_CTL_BIN:-}" ]]; then
"$PHOTONCLOUD_DEPLOYER_CTL_BIN" "$@"
if [[ -n "${ULTRACLOUD_DEPLOYER_CTL_BIN:-}" ]]; then
"$ULTRACLOUD_DEPLOYER_CTL_BIN" "$@"
else
cargo run --quiet --manifest-path "$ROOT/deployer/Cargo.toml" -p deployer-ctl -- "$@"
fi
@ -164,7 +164,7 @@ wait_for_port "127.0.0.1" "$api_port" 120
cat >"$tmp_dir/deployer.toml" <<EOF
bind_addr = "127.0.0.1:${deployer_port}"
cluster_id = "test-cluster"
cluster_namespace = "photoncloud"
cluster_namespace = "ultracloud"
heartbeat_timeout_secs = 300
local_state_path = "$tmp_dir/deployer-state"
bootstrap_flake_bundle_path = "$tmp_dir/flake-bundle.tar.gz"
@ -269,7 +269,7 @@ run_deployer_ctl() {
run_deployer_ctl_bin \
--chainfire-endpoint "$chainfire_endpoint" \
--cluster-id test-cluster \
--cluster-namespace photoncloud \
--cluster-namespace ultracloud \
--deployer-namespace deployer \
"$@"
}
@ -296,14 +296,14 @@ with urllib.request.urlopen(request, timeout=5) as response:
payload = json.loads(response.read().decode("utf-8"))
assert payload["success"] is True
assert payload["node_id"] == "node-seeded"
assert payload["node_config"]["pool"] == "general"
assert payload["node_config"]["node_class"] == "general-worker"
assert payload["node_config"]["nix_profile"] == "profiles/worker-linux"
assert payload["node_config"]["install_plan"]["nixos_configuration"] == "node01"
assert payload["node_config"]["install_plan"]["disko_config_path"] == "nix/nodes/vm-cluster/node01/disko.nix"
assert payload["node_config"]["install_plan"]["target_disk"] == "/dev/vda"
assert payload["node_config"]["failure_domain"] == "rack-a"
assert payload["node_config"]["assignment"]["node_id"] == "node-seeded"
assert payload["node_config"]["assignment"]["pool"] == "general"
assert payload["node_config"]["assignment"]["node_class"] == "general-worker"
assert payload["node_config"]["assignment"]["failure_domain"] == "rack-a"
assert payload["node_config"]["bootstrap_plan"]["nix_profile"] == "profiles/worker-linux"
assert payload["node_config"]["bootstrap_plan"]["install_plan"]["nixos_configuration"] == "node01"
assert payload["node_config"]["bootstrap_plan"]["install_plan"]["disko_config_path"] == "nix/nodes/vm-cluster/node01/disko.nix"
assert payload["node_config"]["bootstrap_plan"]["install_plan"]["target_disk"] == "/dev/vda"
print("Seeded mapping validated")
PY
@ -326,7 +326,7 @@ for path, expected in (
payload = response.read().decode("utf-8")
assert expected in payload
if path.endswith("user-data"):
assert "/etc/plasmacloud/node-config.json" in payload
assert "/etc/ultracloud/node-config.json" in payload
assert "profiles/worker-linux" in payload
assert "\"nixos_configuration\": \"node01\"" in payload
@ -410,22 +410,22 @@ with urllib.request.urlopen(request, timeout=5) as response:
payload = json.loads(response.read().decode("utf-8"))
assert payload["success"] is True
assert payload["node_id"].startswith("edge-")
assert payload["node_config"]["role"] == "edge"
assert payload["node_config"]["pool"] == "edge"
assert payload["node_config"]["node_class"] == "edge-metal"
assert payload["node_config"]["nix_profile"] == "profiles/edge-metal"
assert payload["node_config"]["install_plan"]["nixos_configuration"] == "edge-metal"
assert payload["node_config"]["install_plan"]["disko_config_path"] == "profiles/edge-metal/disko.nix"
assert payload["node_config"]["install_plan"]["target_disk_by_id"] == "/dev/disk/by-id/edge-default"
assert "prismnet" in payload["node_config"]["services"]
assert payload["node_config"]["labels"]["managed-by"] == "deployer"
print(payload["node_id"])
assert payload["node_config"]["assignment"]["node_id"].startswith("edge-")
assert payload["node_config"]["assignment"]["role"] == "edge"
assert payload["node_config"]["assignment"]["pool"] == "edge"
assert payload["node_config"]["assignment"]["node_class"] == "edge-metal"
assert payload["node_config"]["bootstrap_plan"]["nix_profile"] == "profiles/edge-metal"
assert payload["node_config"]["bootstrap_plan"]["install_plan"]["nixos_configuration"] == "edge-metal"
assert payload["node_config"]["bootstrap_plan"]["install_plan"]["disko_config_path"] == "profiles/edge-metal/disko.nix"
assert payload["node_config"]["bootstrap_plan"]["install_plan"]["target_disk_by_id"] == "/dev/disk/by-id/edge-default"
assert "prismnet" in payload["node_config"]["bootstrap_plan"]["services"]
assert payload["node_config"]["assignment"]["labels"]["managed-by"] == "deployer"
print(payload["node_config"]["assignment"]["node_id"])
PY
)"
echo "Inspecting stored cluster node records"
run_deployer_ctl dump --prefix "photoncloud/clusters/test-cluster/nodes/" >"$tmp_dir/nodes.dump"
run_deployer_ctl dump --prefix "ultracloud/clusters/test-cluster/nodes/" >"$tmp_dir/nodes.dump"
python3 - "$tmp_dir/nodes.dump" "$dynamic_node_id" <<'PY'
import json
import sys
@ -479,7 +479,7 @@ print("Deployer bootstrap records validated")
PY
echo "Inspecting desired-system state"
run_deployer_ctl dump --prefix "photoncloud/clusters/test-cluster/nodes/node-seeded/desired-system" >"$tmp_dir/desired-system.dump"
run_deployer_ctl dump --prefix "ultracloud/clusters/test-cluster/nodes/node-seeded/desired-system" >"$tmp_dir/desired-system.dump"
python3 - "$tmp_dir/desired-system.dump" <<'PY'
import json
import sys

View file

@ -3,37 +3,37 @@ set -euo pipefail
ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
if [[ -z "${PHOTONCLOUD_E2E_IN_NIX:-}" ]]; then
exec nix develop "$ROOT" -c env PHOTONCLOUD_E2E_IN_NIX=1 bash "$0" "$@"
if [[ -z "${ULTRACLOUD_E2E_IN_NIX:-}" ]]; then
exec nix develop "$ROOT" -c env ULTRACLOUD_E2E_IN_NIX=1 bash "$0" "$@"
fi
run_chainfire_server_bin() {
if [[ -n "${PHOTONCLOUD_CHAINFIRE_SERVER_BIN:-}" ]]; then
"$PHOTONCLOUD_CHAINFIRE_SERVER_BIN" "$@"
if [[ -n "${ULTRACLOUD_CHAINFIRE_SERVER_BIN:-}" ]]; then
"$ULTRACLOUD_CHAINFIRE_SERVER_BIN" "$@"
else
cargo run --manifest-path "$ROOT/chainfire/Cargo.toml" -p chainfire-server -- "$@"
fi
}
run_deployer_ctl_bin() {
if [[ -n "${PHOTONCLOUD_DEPLOYER_CTL_BIN:-}" ]]; then
"$PHOTONCLOUD_DEPLOYER_CTL_BIN" "$@"
if [[ -n "${ULTRACLOUD_DEPLOYER_CTL_BIN:-}" ]]; then
"$ULTRACLOUD_DEPLOYER_CTL_BIN" "$@"
else
cargo run --quiet --manifest-path "$ROOT/deployer/Cargo.toml" -p deployer-ctl -- "$@"
fi
}
run_node_agent_bin() {
if [[ -n "${PHOTONCLOUD_NODE_AGENT_BIN:-}" ]]; then
"$PHOTONCLOUD_NODE_AGENT_BIN" "$@"
if [[ -n "${ULTRACLOUD_NODE_AGENT_BIN:-}" ]]; then
"$ULTRACLOUD_NODE_AGENT_BIN" "$@"
else
cargo run --quiet --manifest-path "$ROOT/deployer/Cargo.toml" -p node-agent -- "$@"
fi
}
run_fleet_scheduler_bin() {
if [[ -n "${PHOTONCLOUD_FLEET_SCHEDULER_BIN:-}" ]]; then
"$PHOTONCLOUD_FLEET_SCHEDULER_BIN" "$@"
if [[ -n "${ULTRACLOUD_FLEET_SCHEDULER_BIN:-}" ]]; then
"$ULTRACLOUD_FLEET_SCHEDULER_BIN" "$@"
else
cargo run --quiet --manifest-path "$ROOT/deployer/Cargo.toml" -p fleet-scheduler -- "$@"
fi
@ -63,7 +63,7 @@ cleanup() {
wait "$cf_pid" 2>/dev/null || true
fi
if [[ "${PHOTONCLOUD_KEEP_TMP:-}" == "1" ]]; then
if [[ "${ULTRACLOUD_KEEP_TMP:-}" == "1" ]]; then
echo "Keeping temporary directory: $tmp_dir" >&2
else
rm -rf "$tmp_dir"
@ -492,7 +492,7 @@ echo "Waiting for worker to remain blocked until api becomes healthy"
wait_for_service_state worker blocked 0 - 120
echo "Validating dependency block before api is healthy"
run_deployer_ctl dump --prefix "photoncloud/clusters/test-cluster/instances/worker/" >"$tmp_dir/worker-blocked.dump"
run_deployer_ctl dump --prefix "ultracloud/clusters/test-cluster/instances/worker/" >"$tmp_dir/worker-blocked.dump"
python3 - "$tmp_dir/worker-blocked.dump" <<'PY'
import sys
@ -503,7 +503,7 @@ if lines:
print("worker instances correctly blocked before dependency becomes healthy")
PY
run_deployer_ctl dump --prefix "photoncloud/clusters/test-cluster/service-statuses/worker" >"$tmp_dir/worker-status-blocked.dump"
run_deployer_ctl dump --prefix "ultracloud/clusters/test-cluster/service-statuses/worker" >"$tmp_dir/worker-status-blocked.dump"
python3 - "$tmp_dir/worker-status-blocked.dump" <<'PY'
import json
import sys
@ -596,7 +596,7 @@ print("HTTP endpoints are healthy")
PY
echo "Inspecting instance state in ChainFire"
run_deployer_ctl dump --prefix "photoncloud/clusters/test-cluster/instances/api/" >"$tmp_dir/instances.dump"
run_deployer_ctl dump --prefix "ultracloud/clusters/test-cluster/instances/api/" >"$tmp_dir/instances.dump"
python3 - "$tmp_dir/instances.dump" <<'PY'
import json
import sys
@ -629,7 +629,7 @@ if states != ["healthy", "healthy"]:
print("Observed two healthy scheduled instances across node01 and node02")
PY
run_deployer_ctl dump --prefix "photoncloud/clusters/test-cluster/instances/worker/" >"$tmp_dir/worker-instances.dump"
run_deployer_ctl dump --prefix "ultracloud/clusters/test-cluster/instances/worker/" >"$tmp_dir/worker-instances.dump"
python3 - "$tmp_dir/worker-instances.dump" <<'PY'
import json
import sys
@ -687,7 +687,7 @@ wait_for_service_state api healthy 1 healthy 120
wait_for_service_state worker healthy 1 healthy 120
echo "Inspecting scaled instance state in ChainFire"
run_deployer_ctl dump --prefix "photoncloud/clusters/test-cluster/instances/api/" >"$tmp_dir/instances-scaled.dump"
run_deployer_ctl dump --prefix "ultracloud/clusters/test-cluster/instances/api/" >"$tmp_dir/instances-scaled.dump"
python3 - "$tmp_dir/instances-scaled.dump" <<'PY'
import json
import sys
@ -718,7 +718,7 @@ if instance.get("state") != "healthy":
print("Observed one healthy scheduled instance on node01 after scale-down")
PY
run_deployer_ctl dump --prefix "photoncloud/clusters/test-cluster/instances/worker/" >"$tmp_dir/worker-instances-scaled.dump"
run_deployer_ctl dump --prefix "ultracloud/clusters/test-cluster/instances/worker/" >"$tmp_dir/worker-instances-scaled.dump"
python3 - "$tmp_dir/worker-instances-scaled.dump" <<'PY'
import json
import sys

View file

@ -3,31 +3,31 @@ set -euo pipefail
ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
if [[ -z "${PHOTONCLOUD_E2E_IN_NIX:-}" ]]; then
exec nix develop "$ROOT" -c env PHOTONCLOUD_E2E_IN_NIX=1 bash "$0" "$@"
if [[ -z "${ULTRACLOUD_E2E_IN_NIX:-}" ]]; then
exec nix develop "$ROOT" -c env ULTRACLOUD_E2E_IN_NIX=1 bash "$0" "$@"
fi
run_chainfire_server_bin() {
if [[ -n "${PHOTONCLOUD_CHAINFIRE_SERVER_BIN:-}" ]]; then
"$PHOTONCLOUD_CHAINFIRE_SERVER_BIN" "$@"
if [[ -n "${ULTRACLOUD_CHAINFIRE_SERVER_BIN:-}" ]]; then
"$ULTRACLOUD_CHAINFIRE_SERVER_BIN" "$@"
else
cargo run --manifest-path "$ROOT/chainfire/Cargo.toml" -p chainfire-server -- "$@"
fi
}
run_deployer_ctl_bin() {
if [[ -n "${PHOTONCLOUD_DEPLOYER_CTL_BIN:-}" ]]; then
"$PHOTONCLOUD_DEPLOYER_CTL_BIN" "$@"
if [[ -n "${ULTRACLOUD_DEPLOYER_CTL_BIN:-}" ]]; then
"$ULTRACLOUD_DEPLOYER_CTL_BIN" "$@"
else
cargo run --quiet --manifest-path "$ROOT/deployer/Cargo.toml" -p deployer-ctl -- "$@"
fi
}
run_plasmacloud_reconciler_bin() {
if [[ -n "${PHOTONCLOUD_PLASMACLOUD_RECONCILER_BIN:-}" ]]; then
"$PHOTONCLOUD_PLASMACLOUD_RECONCILER_BIN" "$@"
run_ultracloud_reconciler_bin() {
if [[ -n "${ULTRACLOUD_RECONCILER_BIN:-}" ]]; then
"$ULTRACLOUD_RECONCILER_BIN" "$@"
else
cargo run --quiet --manifest-path "$ROOT/deployer/Cargo.toml" -p plasmacloud-reconciler -- "$@"
cargo run --quiet --manifest-path "$ROOT/deployer/Cargo.toml" -p ultracloud-reconciler -- "$@"
fi
}
@ -50,7 +50,7 @@ cleanup() {
kill "$cf_pid" 2>/dev/null || true
wait "$cf_pid" 2>/dev/null || true
fi
if [[ "${PHOTONCLOUD_KEEP_TMP:-}" == "1" ]]; then
if [[ "${ULTRACLOUD_KEEP_TMP:-}" == "1" ]]; then
echo "Keeping temporary directory: $tmp_dir" >&2
else
rm -rf "$tmp_dir"
@ -253,16 +253,16 @@ run_deployer_ctl() {
run_deployer_ctl_bin \
--chainfire-endpoint "$chainfire_endpoint" \
--cluster-id test-cluster \
--cluster-namespace photoncloud \
--cluster-namespace ultracloud \
--deployer-namespace deployer \
"$@"
}
run_hosts_bg() {
run_plasmacloud_reconciler_bin \
run_ultracloud_reconciler_bin \
hosts \
--endpoint "$chainfire_endpoint" \
--cluster-namespace photoncloud \
--cluster-namespace ultracloud \
--cluster-id test-cluster \
--heartbeat-timeout-secs 300 \
--interval-secs 300 \
@ -346,7 +346,7 @@ assert status["failed_nodes"] == [], payload
print("initial rollout wave validated")
PY
run_deployer_ctl dump --prefix "photoncloud/clusters/test-cluster/nodes/" >"$tmp_dir/nodes-1.dump"
run_deployer_ctl dump --prefix "ultracloud/clusters/test-cluster/nodes/" >"$tmp_dir/nodes-1.dump"
python3 - "$tmp_dir/nodes-1.dump" <<'PY'
import json
import sys
@ -454,7 +454,7 @@ assert any('"ResetType":"PowerCycle"' in line for line in lines), lines
print("reinstall orchestration validated")
PY
run_deployer_ctl dump --prefix "photoncloud/clusters/test-cluster/nodes/node01" >"$tmp_dir/node01-post-reinstall.dump"
run_deployer_ctl dump --prefix "ultracloud/clusters/test-cluster/nodes/node01" >"$tmp_dir/node01-post-reinstall.dump"
python3 - "$tmp_dir/node01-post-reinstall.dump" <<'PY'
import sys
@ -478,7 +478,7 @@ PY
wait_for_deployment_state aborted true - - - 120
run_deployer_ctl dump --prefix "photoncloud/clusters/test-cluster/nodes/" >"$tmp_dir/nodes-2.dump"
run_deployer_ctl dump --prefix "ultracloud/clusters/test-cluster/nodes/" >"$tmp_dir/nodes-2.dump"
python3 - "$tmp_dir/nodes-2.dump" <<'PY'
import json
import sys

View file

@ -1,6 +1,6 @@
# Docs
This directory is the public documentation entrypoint for PhotonCloud.
This directory is the public documentation entrypoint for UltraCloud.
## Read First

View file

@ -1,6 +1,6 @@
# Component Matrix
PhotonCloud is intended to validate meaningful service combinations, not only a single all-on deployment.
UltraCloud is intended to validate meaningful service combinations, not only a single all-on deployment.
This page summarizes the compositions that are exercised by the VM-cluster harness today.
## Validated Control Plane

View file

@ -1,6 +1,6 @@
# Testing
PhotonCloud treats VM-first validation as the canonical local proof path.
UltraCloud treats VM-first validation as the canonical local proof path.
## Canonical Validation
@ -28,7 +28,7 @@ nix build .#checks.x86_64-linux.deployer-vm-smoke
Use these commands as the release-facing local proof set:
- `fresh-smoke`: whole-cluster readiness, core behavior, and fault injection
- `fresh-demo-vm-webapp`: focused VM demo showing a web app inside the guest with SQLite state persisted on the attached PhotonCloud volume across restart and migration
- `fresh-demo-vm-webapp`: focused VM demo showing a web app inside the guest with FlareDB-backed state and LightningStor object snapshots surviving restart and migration
- `fresh-matrix`: composed service scenarios such as `prismnet + flashdns + fiberlb` and PrismNet-backed VM hosting bundles with `plasmavmc + coronafs + lightningstor`
- `fresh-bench-storage`: CoronaFS local-vs-shared-volume throughput, cross-worker volume visibility, and LightningStor large/small-object throughput capture
- `deployer-vm-smoke`: prebuilt NixOS system closure handoff into `nix-agent`, proving host rollout can activate a host-built target without guest-side compilation
@ -40,6 +40,7 @@ nix run ./nix/test-cluster#cluster -- status
nix run ./nix/test-cluster#cluster -- logs node01
nix run ./nix/test-cluster#cluster -- ssh node04
nix run ./nix/test-cluster#cluster -- demo-vm-webapp
nix run ./nix/test-cluster#cluster -- serve-vm-webapp
nix run ./nix/test-cluster#cluster -- matrix
nix run ./nix/test-cluster#cluster -- bench-storage
nix run ./nix/test-cluster#cluster -- fresh-matrix

17
flake.lock generated
View file

@ -38,22 +38,6 @@
"type": "github"
}
},
"nix-nos": {
"inputs": {
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"path": "./nix-nos",
"type": "path"
},
"original": {
"path": "./nix-nos",
"type": "path"
},
"parent": []
},
"nixpkgs": {
"locked": {
"lastModified": 1765186076,
@ -74,7 +58,6 @@
"inputs": {
"disko": "disko",
"flake-utils": "flake-utils",
"nix-nos": "nix-nos",
"nixpkgs": "nixpkgs",
"rust-overlay": "rust-overlay",
"systems": "systems_2"

280
flake.nix
View file

@ -1,5 +1,5 @@
{
description = "PhotonCloud - Japanese Cloud Platform";
description = "UltraCloud - Japanese Cloud Platform";
# ============================================================================
# INPUTS: External dependencies
@ -23,18 +23,14 @@
inputs.nixpkgs.follows = "nixpkgs";
};
# Nix-NOS generic network operating system modules
nix-nos = {
url = "path:./nix-nos";
inputs.nixpkgs.follows = "nixpkgs";
};
};
# ============================================================================
# OUTPUTS: What this flake provides
# ============================================================================
outputs = { self, nixpkgs, rust-overlay, flake-utils, disko, nix-nos, systems ? null }:
flake-utils.lib.eachDefaultSystem (system:
outputs = { self, nixpkgs, rust-overlay, flake-utils, disko, systems ? null }:
flake-utils.lib.eachDefaultSystem
(system:
let
# Apply rust-overlay to get rust-bin attribute
overlays = [ (import rust-overlay) ];
@ -219,11 +215,13 @@
in
rel == ""
|| builtins.elem rel [ "flake.nix" "flake.lock" ]
|| builtins.any (root:
|| builtins.any
(root:
rel == root
|| pkgs.lib.hasPrefix "${root}/" rel
|| pkgs.lib.hasPrefix "${rel}/" root
) sourceRoots;
)
sourceRoots;
};
flakeBundleSrc = pkgs.lib.cleanSourceWith {
@ -249,7 +247,6 @@
"mtls-agent"
"nightlight"
"nix"
"nix-nos"
"plasmavmc"
"prismnet"
];
@ -283,11 +280,6 @@
inputs.nixpkgs.follows = "nixpkgs";
};
# Nix-NOS generic network operating system modules
nix-nos = {
url = "path:./nix-nos";
inputs.nixpkgs.follows = "nixpkgs";
};
};
'';
@ -312,10 +304,6 @@
inputs.nixpkgs.follows = "nixpkgs";
};
nix-nos = {
url = "path:./nix-nos";
inputs.nixpkgs.follows = "nixpkgs";
};
};
'';
@ -342,17 +330,12 @@
inputs.nixpkgs.follows = "nixpkgs";
};
# Nix-NOS generic network operating system modules
nix-nos = {
url = "path:./nix-nos";
inputs.nixpkgs.follows = "nixpkgs";
};
};
# ============================================================================
# OUTPUTS: What this flake provides
# ============================================================================
outputs = { self, nixpkgs, rust-overlay, flake-utils, disko, nix-nos, systems ? null }:
outputs = { self, nixpkgs, rust-overlay, flake-utils, disko, systems ? null }:
'';
bundledHeaderBlock = ''
@ -379,21 +362,17 @@
inputs.nixpkgs.follows = "nixpkgs";
};
nix-nos = {
url = "path:./nix-nos";
inputs.nixpkgs.follows = "nixpkgs";
};
};
# ============================================================================
# OUTPUTS: What this flake provides
# ============================================================================
outputs = { self, nixpkgs, rust-overlay, flake-utils, disko, nix-nos, systems ? null }:
outputs = { self, nixpkgs, rust-overlay, flake-utils, disko, systems ? null }:
'';
bundledFlakeNix =
pkgs.writeText
"plasmacloud-bundled-flake.nix"
"ultracloud-bundled-flake.nix"
(
builtins.replaceStrings
[ flakeHeaderBlock ]
@ -402,7 +381,7 @@
);
bundledFlakeHeaderFile =
pkgs.writeText "plasmacloud-bundled-flake-header" bundledHeaderBlock;
pkgs.writeText "ultracloud-bundled-flake-header" bundledHeaderBlock;
baseFlakeLock = builtins.fromJSON (builtins.readFile ./flake.lock);
@ -468,11 +447,12 @@
};
bundledFlakeLockFile =
pkgs.writeText "plasmacloud-bundled-flake.lock" (builtins.toJSON bundledFlakeLock);
pkgs.writeText "ultracloud-bundled-flake.lock" (builtins.toJSON bundledFlakeLock);
inBundledEval = builtins.pathExists ./.bundle-eval-marker;
bundledFlakeRootDrv = pkgs.runCommand "plasmacloud-bundled-flake-root" {
bundledFlakeRootDrv = pkgs.runCommand "ultracloud-bundled-flake-root"
{
nativeBuildInputs = [
pkgs.coreutils
pkgs.python3
@ -505,7 +485,7 @@
r" # ============================================================================\n"
r" # OUTPUTS: What this flake provides\n"
r" # ============================================================================\n"
r" outputs = \{ self, nixpkgs, rust-overlay, flake-utils, disko, nix-nos, systems \? null \}:",
r" outputs = \{ self, nixpkgs, rust-overlay, flake-utils, disko, systems \? null \}:",
re.S,
)
rewritten, count = pattern.subn(header.rstrip("\n"), source, count=1)
@ -521,14 +501,15 @@
else
builtins.path {
path = bundledFlakeRootDrv;
name = "plasmacloud-bundled-flake-root-src";
name = "ultracloud-bundled-flake-root-src";
};
bundledFlakeRootNarHashFile =
if inBundledEval then
null
else
pkgs.runCommand "plasmacloud-bundled-flake-root-narhash" {
pkgs.runCommand "ultracloud-bundled-flake-root-narhash"
{
nativeBuildInputs = [ pkgs.nix ];
} ''
${pkgs.nix}/bin/nix \
@ -598,7 +579,7 @@
# Metadata for the package
meta = with pkgs.lib; {
description = description;
homepage = "https://github.com/yourorg/plasmacloud";
homepage = "https://github.com/yourorg/ultracloud";
license = licenses.asl20; # Apache 2.0
maintainers = [ ];
platforms = platforms.linux;
@ -640,7 +621,7 @@
meta = with pkgs.lib; {
description = description;
homepage = "https://github.com/yourorg/plasmacloud";
homepage = "https://github.com/yourorg/ultracloud";
license = licenses.asl20;
maintainers = [ ];
platforms = platforms.linux;
@ -856,7 +837,7 @@
name = "apigateway-server";
workspaceSubdir = "apigateway";
mainCrate = "apigateway-server";
description = "API Gateway for PlasmaCloud services";
description = "API Gateway for UltraCloud services";
};
# --------------------------------------------------------------------
@ -883,31 +864,32 @@
name = "deployer-ctl";
workspaceSubdir = "deployer";
mainCrate = "deployer-ctl";
description = "Declarative control utility for PhotonCloud deployer state";
description = "Declarative control utility for UltraCloud deployer state";
};
node-agent = buildRustWorkspace {
name = "node-agent";
workspaceSubdir = "deployer";
mainCrate = "node-agent";
description = "Node-local runtime agent for PhotonCloud scheduled services";
description = "Node-local runtime agent for UltraCloud scheduled services";
};
nix-agent = buildRustWorkspace {
name = "nix-agent";
workspaceSubdir = "deployer";
mainCrate = "nix-agent";
description = "Node-local NixOS reconciliation agent for PhotonCloud hosts";
description = "Node-local NixOS reconciliation agent for UltraCloud hosts";
};
plasmacloud-reconciler = buildRustWorkspace {
name = "plasmacloud-reconciler";
ultracloud-reconciler = buildRustWorkspace {
name = "ultracloud-reconciler";
workspaceSubdir = "deployer";
mainCrate = "plasmacloud-reconciler";
mainCrate = "ultracloud-reconciler";
description = "Declarative reconciler for host rollouts and published resources";
};
plasmacloudFlakeBundle = pkgs.runCommand "plasmacloud-flake-bundle.tar.gz" {
ultracloudFlakeBundle = pkgs.runCommand "ultracloud-flake-bundle.tar.gz"
{
nativeBuildInputs = [
pkgs.coreutils
pkgs.gnutar
@ -936,7 +918,7 @@
name = "fleet-scheduler";
workspaceSubdir = "deployer";
mainCrate = "fleet-scheduler";
description = "Label-aware service scheduler for PhotonCloud bare-metal fleets";
description = "Label-aware service scheduler for UltraCloud bare-metal fleets";
};
deployer-workspace = buildRustWorkspaceBundle {
@ -947,23 +929,23 @@
"deployer-ctl"
"node-agent"
"nix-agent"
"plasmacloud-reconciler"
"ultracloud-reconciler"
"fleet-scheduler"
];
description = "Combined deployer workspace build for cluster images and checks";
};
vmClusterDeployerState =
self.nixosConfigurations.node01.config.system.build.plasmacloudDeployerClusterState;
self.nixosConfigurations.node01.config.system.build.ultracloudDeployerClusterState;
vmClusterFlakeBundle = self.packages.${system}.plasmacloudFlakeBundle;
vmClusterFlakeBundle = self.packages.${system}.ultracloudFlakeBundle;
vmSmokeBundledTargetToplevel = bundledVmSmokeTargetToplevel;
# --------------------------------------------------------------------
# Default package: Build all servers
# --------------------------------------------------------------------
default = pkgs.symlinkJoin {
name = "photoncloud-all";
name = "ultracloud-all";
paths = [
self.packages.${system}.chainfire-server
self.packages.${system}.flaredb-server
@ -1047,8 +1029,8 @@
drv = self.packages.${system}.deployer-ctl;
};
plasmacloud-reconciler = flake-utils.lib.mkApp {
drv = self.packages.${system}.plasmacloud-reconciler;
ultracloud-reconciler = flake-utils.lib.mkApp {
drv = self.packages.${system}.ultracloud-reconciler;
};
nix-agent = flake-utils.lib.mkApp {
@ -1065,7 +1047,8 @@
};
checks = {
workspace-source-roots-audit = pkgs.runCommand "workspace-source-roots-audit" {
workspace-source-roots-audit = pkgs.runCommand "workspace-source-roots-audit"
{
nativeBuildInputs = [ pkgs.python3 ];
} ''
${pkgs.python3}/bin/python - <<'PY' ${./.}
@ -1189,16 +1172,15 @@
first-boot-topology-vm-smoke = pkgs.testers.runNixOSTest (
import ./nix/tests/first-boot-topology-vm-smoke.nix {
inherit pkgs;
photoncloudPackages = self.packages.${system};
photoncloudModule = self.nixosModules.default;
nixNosModule = nix-nos.nixosModules.default;
ultracloudPackages = self.packages.${system};
ultracloudModule = self.nixosModules.default;
}
);
deployer-vm-smoke = pkgs.testers.runNixOSTest (
import ./nix/tests/deployer-vm-smoke.nix {
inherit pkgs;
photoncloudPackages = self.packages.${system};
ultracloudPackages = self.packages.${system};
smokeTargetToplevel = self.packages.${system}.vmSmokeBundledTargetToplevel;
}
);
@ -1206,7 +1188,7 @@
deployer-vm-rollback = pkgs.testers.runNixOSTest (
import ./nix/tests/deployer-vm-smoke.nix {
inherit pkgs;
photoncloudPackages = self.packages.${system};
ultracloudPackages = self.packages.${system};
smokeTargetToplevel = self.packages.${system}.vmSmokeBundledTargetToplevel;
desiredSystemOverrides = {
health_check_command = [ "false" ];
@ -1221,40 +1203,37 @@
fiberlb-native-bgp-vm-smoke = pkgs.testers.runNixOSTest (
import ./nix/tests/fiberlb-native-bgp-vm-smoke.nix {
inherit pkgs;
photoncloudPackages = self.packages.${system};
photoncloudModule = self.nixosModules.default;
nixNosModule = nix-nos.nixosModules.default;
ultracloudPackages = self.packages.${system};
ultracloudModule = self.nixosModules.default;
}
);
fiberlb-native-bgp-multipath-vm-smoke = pkgs.testers.runNixOSTest (
import ./nix/tests/fiberlb-native-bgp-multipath-vm-smoke.nix {
inherit pkgs;
photoncloudPackages = self.packages.${system};
photoncloudModule = self.nixosModules.default;
nixNosModule = nix-nos.nixosModules.default;
ultracloudPackages = self.packages.${system};
ultracloudModule = self.nixosModules.default;
}
);
fiberlb-native-bgp-interop-vm-smoke = pkgs.testers.runNixOSTest (
import ./nix/tests/fiberlb-native-bgp-interop-vm-smoke.nix {
inherit pkgs;
photoncloudPackages = self.packages.${system};
photoncloudModule = self.nixosModules.default;
nixNosModule = nix-nos.nixosModules.default;
ultracloudPackages = self.packages.${system};
ultracloudModule = self.nixosModules.default;
}
);
fiberlb-native-bgp-ecmp-drain-vm-smoke = pkgs.testers.runNixOSTest (
import ./nix/tests/fiberlb-native-bgp-ecmp-drain-vm-smoke.nix {
inherit pkgs;
photoncloudPackages = self.packages.${system};
photoncloudModule = self.nixosModules.default;
nixNosModule = nix-nos.nixosModules.default;
ultracloudPackages = self.packages.${system};
ultracloudModule = self.nixosModules.default;
}
);
deployer-bootstrap-e2e = pkgs.runCommand "deployer-bootstrap-e2e" {
deployer-bootstrap-e2e = pkgs.runCommand "deployer-bootstrap-e2e"
{
nativeBuildInputs = with pkgs; [
bash
coreutils
@ -1266,12 +1245,12 @@
procps
python3
];
PHOTONCLOUD_E2E_IN_NIX = "1";
PHOTONCLOUD_CHAINFIRE_SERVER_BIN =
ULTRACLOUD_E2E_IN_NIX = "1";
ULTRACLOUD_CHAINFIRE_SERVER_BIN =
"${self.packages.${system}.chainfire-server}/bin/chainfire";
PHOTONCLOUD_DEPLOYER_SERVER_BIN =
ULTRACLOUD_DEPLOYER_SERVER_BIN =
"${self.packages.${system}.deployer-workspace}/bin/deployer-server";
PHOTONCLOUD_DEPLOYER_CTL_BIN =
ULTRACLOUD_DEPLOYER_CTL_BIN =
"${self.packages.${system}.deployer-workspace}/bin/deployer-ctl";
} ''
export HOME="$TMPDIR/home"
@ -1291,7 +1270,8 @@
touch "$out"
'';
host-lifecycle-e2e = pkgs.runCommand "host-lifecycle-e2e" {
host-lifecycle-e2e = pkgs.runCommand "host-lifecycle-e2e"
{
nativeBuildInputs = with pkgs; [
bash
coreutils
@ -1303,13 +1283,13 @@
procps
python3
];
PHOTONCLOUD_E2E_IN_NIX = "1";
PHOTONCLOUD_CHAINFIRE_SERVER_BIN =
ULTRACLOUD_E2E_IN_NIX = "1";
ULTRACLOUD_CHAINFIRE_SERVER_BIN =
"${self.packages.${system}.chainfire-server}/bin/chainfire";
PHOTONCLOUD_DEPLOYER_CTL_BIN =
ULTRACLOUD_DEPLOYER_CTL_BIN =
"${self.packages.${system}.deployer-workspace}/bin/deployer-ctl";
PHOTONCLOUD_PLASMACLOUD_RECONCILER_BIN =
"${self.packages.${system}.deployer-workspace}/bin/plasmacloud-reconciler";
ULTRACLOUD_RECONCILER_BIN =
"${self.packages.${system}.deployer-workspace}/bin/ultracloud-reconciler";
} ''
export HOME="$TMPDIR/home"
mkdir -p "$HOME"
@ -1328,7 +1308,8 @@
touch "$out"
'';
fleet-scheduler-e2e = pkgs.runCommand "fleet-scheduler-e2e" {
fleet-scheduler-e2e = pkgs.runCommand "fleet-scheduler-e2e"
{
nativeBuildInputs = with pkgs; [
bash
coreutils
@ -1340,14 +1321,14 @@
procps
python3
];
PHOTONCLOUD_E2E_IN_NIX = "1";
PHOTONCLOUD_CHAINFIRE_SERVER_BIN =
ULTRACLOUD_E2E_IN_NIX = "1";
ULTRACLOUD_CHAINFIRE_SERVER_BIN =
"${self.packages.${system}.chainfire-server}/bin/chainfire";
PHOTONCLOUD_DEPLOYER_CTL_BIN =
ULTRACLOUD_DEPLOYER_CTL_BIN =
"${self.packages.${system}.deployer-workspace}/bin/deployer-ctl";
PHOTONCLOUD_NODE_AGENT_BIN =
ULTRACLOUD_NODE_AGENT_BIN =
"${self.packages.${system}.deployer-workspace}/bin/node-agent";
PHOTONCLOUD_FLEET_SCHEDULER_BIN =
ULTRACLOUD_FLEET_SCHEDULER_BIN =
"${self.packages.${system}.deployer-workspace}/bin/fleet-scheduler";
} ''
export HOME="$TMPDIR/home"
@ -1374,13 +1355,30 @@
# ========================================================================
nixosModules.default = import ./nix/modules;
nixosModules.photoncloud = import ./nix/modules;
nixosModules.plasmacloud = import ./nix/modules; # backwards compatibility
nixosModules.ultracloud = import ./nix/modules;
# ========================================================================
# NIXOS CONFIGURATIONS: Netboot images for bare-metal provisioning
# ========================================================================
nixosConfigurations = {
nixosConfigurations =
let
vmClusterLib = import ./nix/nodes/vm-cluster/lib.nix { lib = nixpkgs.lib; };
mkVmClusterSystem = nodeName:
nixpkgs.lib.nixosSystem {
system = "x86_64-linux";
modules = [
disko.nixosModules.disko
vmClusterLib.nodeConfigurationPaths.${nodeName}
self.nixosModules.default
(vmClusterLib.mkBootstrapServicesModule {
inherit self nodeName;
enableDeployer = nodeName == vmClusterLib.bootstrapNodeName;
})
{ nixpkgs.overlays = [ self.overlays.default ]; }
];
};
in
{
# Control Plane netboot image (all 8 services)
netboot-control-plane = nixpkgs.lib.nixosSystem {
system = "x86_64-linux";
@ -1411,12 +1409,11 @@
modules = [ ./nix/images/deployer-vm-smoke-target.nix ];
};
# PlasmaCloud ISO (T061.S5 - bootable ISO with cluster-config embedding)
plasmacloud-iso = nixpkgs.lib.nixosSystem {
# UltraCloud ISO (T061.S5 - bootable ISO with cluster-config embedding)
ultracloud-iso = nixpkgs.lib.nixosSystem {
system = "x86_64-linux";
modules = [
./nix/iso/plasmacloud-iso.nix
nix-nos.nixosModules.default
./nix/iso/ultracloud-iso.nix
self.nixosModules.default
{ nixpkgs.overlays = [ self.overlays.default ]; }
];
@ -1433,93 +1430,14 @@
{ nixpkgs.overlays = [ self.overlays.default ]; }
];
};
node01 = nixpkgs.lib.nixosSystem {
system = "x86_64-linux";
modules = [
disko.nixosModules.disko
nix-nos.nixosModules.default
./nix/nodes/vm-cluster/node01/configuration.nix
self.nixosModules.default
({ pkgs, ... }: {
services.deployer = {
enable = true;
bindAddr = "0.0.0.0:8088";
chainfireEndpoints = [ "http://192.168.100.11:2379" ];
clusterId = "plasmacloud-vm-cluster";
requireChainfire = true;
allowUnknownNodes = false;
allowUnauthenticated = false;
bootstrapToken = "vm-cluster-bootstrap-token";
adminToken = "vm-cluster-admin-token";
bootstrapFlakeBundle = pkgs.plasmacloudFlakeBundle;
seedClusterState = true;
};
services.nix-agent = {
enable = true;
chainfireEndpoint = "http://192.168.100.11:2379";
clusterId = "plasmacloud-vm-cluster";
nodeId = "node01";
flakeRoot = self.outPath;
intervalSecs = 30;
apply = true;
};
})
{ nixpkgs.overlays = [ self.overlays.default ]; }
];
};
node02 = nixpkgs.lib.nixosSystem {
system = "x86_64-linux";
modules = [
disko.nixosModules.disko
nix-nos.nixosModules.default
./nix/nodes/vm-cluster/node02/configuration.nix
self.nixosModules.default
{
services.nix-agent = {
enable = true;
chainfireEndpoint = "http://192.168.100.11:2379";
clusterId = "plasmacloud-vm-cluster";
nodeId = "node02";
flakeRoot = self.outPath;
intervalSecs = 30;
apply = true;
};
}
{ nixpkgs.overlays = [ self.overlays.default ]; }
];
};
node03 = nixpkgs.lib.nixosSystem {
system = "x86_64-linux";
modules = [
disko.nixosModules.disko
nix-nos.nixosModules.default
./nix/nodes/vm-cluster/node03/configuration.nix
self.nixosModules.default
{
services.nix-agent = {
enable = true;
chainfireEndpoint = "http://192.168.100.11:2379";
clusterId = "plasmacloud-vm-cluster";
nodeId = "node03";
flakeRoot = self.outPath;
intervalSecs = 30;
apply = true;
};
}
{ nixpkgs.overlays = [ self.overlays.default ]; }
];
};
};
// nixpkgs.lib.genAttrs vmClusterLib.controlPlaneNodeNames mkVmClusterSystem;
# ========================================================================
# OVERLAY: Provides PhotonCloud packages to nixpkgs
# OVERLAY: Provides UltraCloud packages to nixpkgs
# ========================================================================
# Usage in NixOS configuration:
# nixpkgs.overlays = [ inputs.photoncloud.overlays.default ];
# nixpkgs.overlays = [ inputs.ultracloud.overlays.default ];
overlays.default = final: prev: {
chainfire-server = self.packages.${final.system}.chainfire-server;
flaredb-server = self.packages.${final.system}.flaredb-server;
@ -1539,8 +1457,8 @@
deployer-workspace = self.packages.${final.system}.deployer-workspace;
deployer-server = self.packages.${final.system}.deployer-workspace;
deployer-ctl = self.packages.${final.system}.deployer-workspace;
plasmacloud-reconciler = self.packages.${final.system}.deployer-workspace;
plasmacloudFlakeBundle = self.packages.${final.system}.plasmacloudFlakeBundle;
ultracloud-reconciler = self.packages.${final.system}.deployer-workspace;
ultracloudFlakeBundle = self.packages.${final.system}.ultracloudFlakeBundle;
nix-agent = self.packages.${final.system}.deployer-workspace;
node-agent = self.packages.${final.system}.deployer-workspace;
fleet-scheduler = self.packages.${final.system}.deployer-workspace;

View file

@ -1,13 +1,13 @@
//! k8shost Controllers
//!
//! This binary runs the PlasmaCloud integration controllers for k8shost:
//! This binary runs the UltraCloud integration controllers for k8shost:
//! - FiberLB Controller: Manages LoadBalancer services
//! - FlashDNS Controller: Manages Service DNS records
//! - IAM Webhook: Handles TokenReview authentication
//!
//! Each controller follows the watch-reconcile pattern:
//! 1. Watch k8s API for resource changes
//! 2. Reconcile desired state with PlasmaCloud components
//! 2. Reconcile desired state with UltraCloud components
//! 3. Update k8s resource status
use anyhow::Result;

View file

@ -26,8 +26,8 @@ use tonic::{transport::Channel, Code};
use tracing::{debug, info, warn};
const CONTROLLER_PRINCIPAL_ID: &str = "k8shost-controller";
const LB_ID_ANNOTATION: &str = "fiberlb.plasmacloud.io/lb-id";
const POOL_ID_ANNOTATION: &str = "fiberlb.plasmacloud.io/pool-id";
const LB_ID_ANNOTATION: &str = "fiberlb.ultracloud.io/lb-id";
const POOL_ID_ANNOTATION: &str = "fiberlb.ultracloud.io/pool-id";
/// FiberLB controller for managing LoadBalancer service VIPs
pub struct FiberLbController {

View file

@ -25,8 +25,8 @@ use tracing::{debug, info, warn};
const CLUSTER_DOMAIN: &str = "cluster.local";
const DNS_RECORD_TTL: u32 = 60;
const CONTROLLER_PRINCIPAL_ID: &str = "k8shost-controller";
const RECORD_ID_ANNOTATION: &str = "flashdns.plasmacloud.io/record-id";
const ZONE_ID_ANNOTATION: &str = "flashdns.plasmacloud.io/zone-id";
const RECORD_ID_ANNOTATION: &str = "flashdns.ultracloud.io/record-id";
const ZONE_ID_ANNOTATION: &str = "flashdns.ultracloud.io/zone-id";
/// FlashDNS controller for managing cluster.local DNS records
pub struct FlashDnsController {
@ -365,8 +365,8 @@ impl FlashDnsController {
name: CLUSTER_DOMAIN.to_string(),
org_id: tenant.org_id.clone(),
project_id: tenant.project_id.clone(),
primary_ns: "ns1.plasmacloud.io".to_string(),
admin_email: "admin@plasmacloud.io".to_string(),
primary_ns: "ns1.ultracloud.io".to_string(),
admin_email: "admin@ultracloud.io".to_string(),
},
auth_token,
))

View file

@ -35,7 +35,7 @@ use tracing_subscriber::EnvFilter;
/// k8shost API Server
#[derive(Parser, Debug)]
#[command(name = "k8shost-server")]
#[command(about = "Kubernetes API server for PlasmaCloud's k8shost component")]
#[command(about = "Kubernetes API server for UltraCloud's k8shost component")]
struct Args {
/// Configuration file path
#[arg(short, long, default_value = "k8shost.toml")]

View file

@ -25,9 +25,9 @@ const ACTION_DEPLOYMENT_LIST: &str = "k8s:deployments:list";
const ACTION_DEPLOYMENT_UPDATE: &str = "k8s:deployments:update";
const ACTION_DEPLOYMENT_DELETE: &str = "k8s:deployments:delete";
pub(crate) const DEPLOYMENT_NAME_ANNOTATION: &str = "k8shost.photoncloud.io/deployment-name";
pub(crate) const DEPLOYMENT_UID_ANNOTATION: &str = "k8shost.photoncloud.io/deployment-uid";
pub(crate) const TEMPLATE_HASH_ANNOTATION: &str = "k8shost.photoncloud.io/template-hash";
pub(crate) const DEPLOYMENT_NAME_ANNOTATION: &str = "k8shost.ultracloud.io/deployment-name";
pub(crate) const DEPLOYMENT_UID_ANNOTATION: &str = "k8shost.ultracloud.io/deployment-uid";
pub(crate) const TEMPLATE_HASH_ANNOTATION: &str = "k8shost.ultracloud.io/template-hash";
#[derive(Clone)]
pub struct DeploymentServiceImpl {

View file

@ -27,7 +27,7 @@ pub struct ObjectMeta {
#[serde(default, skip_serializing_if = "HashMap::is_empty")]
pub annotations: HashMap<String, String>,
// Multi-tenant fields for PlasmaCloud integration
// Multi-tenant fields for UltraCloud integration
#[serde(skip_serializing_if = "Option::is_none")]
pub org_id: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]

View file

@ -10,7 +10,7 @@ use serde::{Deserialize, Serialize};
use tokio::sync::RwLock;
use tracing::{info, warn};
const PHOTON_PREFIX: &str = "photoncloud";
const PHOTON_PREFIX: &str = "ultracloud";
const CACHE_TTL: Duration = Duration::from_secs(30);
const POLICY_CACHE_TTL: Duration = Duration::from_secs(30);

View file

@ -404,7 +404,7 @@ mod tests {
"-days",
"3650",
"-subj",
"/CN=PhotonCloud Test CA",
"/CN=UltraCloud Test CA",
"-out",
ca_pem.to_string_lossy().as_ref(),
]);
@ -464,7 +464,7 @@ mod tests {
"-key",
client_key.to_string_lossy().as_ref(),
"-subj",
"/CN=photoncloud-test-client",
"/CN=ultracloud-test-client",
"-out",
client_csr.to_string_lossy().as_ref(),
]);

View file

@ -2,7 +2,6 @@
{
imports = [
./topology.nix
./network/interfaces.nix
./network/vlans.nix
./bgp/default.nix
@ -10,7 +9,7 @@
];
options.nix-nos = {
enable = lib.mkEnableOption "Nix-NOS network operating system modules";
enable = lib.mkEnableOption "Nix-NOS network primitive modules";
version = lib.mkOption {
type = lib.types.str;

View file

@ -1,68 +0,0 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.nix-nos;
clusterConfigLib = import ../lib/cluster-config-lib.nix { inherit lib; };
nodeType = clusterConfigLib.mkNodeType types;
# Cluster definition type
clusterType = types.submodule {
options = {
name = mkOption {
type = types.str;
default = "plasmacloud-cluster";
description = "Cluster name";
};
nodes = mkOption {
type = types.attrsOf nodeType;
default = {};
description = "Map of node names to their configurations";
example = literalExpression ''
{
"node01" = {
role = "control-plane";
ip = "10.0.1.10";
services = [ "chainfire" "flaredb" ];
};
}
'';
};
bootstrapNode = mkOption {
type = types.nullOr types.str;
default = null;
description = "Name of the bootstrap node (first control-plane node if null)";
};
};
};
in {
options.nix-nos = {
clusters = mkOption {
type = types.attrsOf clusterType;
default = {};
description = "Map of cluster names to their configurations";
};
# Helper function to generate cluster-config.json for a specific node
generateClusterConfig = mkOption {
type = types.functionTo types.attrs;
default = { hostname, clusterName ? "plasmacloud" }:
let
cluster = cfg.clusters.${clusterName} or (throw "Cluster ${clusterName} not found");
in clusterConfigLib.mkClusterConfig {
inherit cluster hostname;
bootstrapNodeName =
if cluster.bootstrapNode != null
then cluster.bootstrapNode
else null;
};
description = "Function to generate cluster-config.json for a specific hostname";
};
};
config = mkIf cfg.enable { };
}

82
nix/ci/flake.lock generated
View file

@ -3,7 +3,7 @@
"disko": {
"inputs": {
"nixpkgs": [
"photoncloud",
"ultracloud",
"nixpkgs"
]
},
@ -57,32 +57,13 @@
"type": "github"
}
},
"nix-nos": {
"inputs": {
"nixpkgs": [
"photoncloud",
"nixpkgs"
]
},
"locked": {
"path": "./nix-nos",
"type": "path"
},
"original": {
"path": "./nix-nos",
"type": "path"
},
"parent": [
"photoncloud"
]
},
"nixpkgs": {
"locked": {
"lastModified": 1765186076,
"narHash": "sha256-hM20uyap1a0M9d344I692r+ik4gTMyj60cQWO+hAYP8=",
"lastModified": 1775036866,
"narHash": "sha256-ZojAnPuCdy657PbTq5V0Y+AHKhZAIwSIT2cb8UgAz/U=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "addf7cf5f383a3101ecfba091b98d0a1263dc9b8",
"rev": "6201e203d09599479a3b3450ed24fa81537ebc4e",
"type": "github"
},
"original": {
@ -108,46 +89,26 @@
"type": "github"
}
},
"photoncloud": {
"inputs": {
"disko": "disko",
"flake-utils": "flake-utils_2",
"nix-nos": "nix-nos",
"nixpkgs": "nixpkgs_2",
"rust-overlay": "rust-overlay",
"systems": "systems_3"
},
"locked": {
"path": "../..",
"type": "path"
},
"original": {
"path": "../..",
"type": "path"
},
"parent": []
},
"root": {
"inputs": {
"flake-utils": "flake-utils",
"nixpkgs": "nixpkgs",
"photoncloud": "photoncloud",
"rust-overlay": "rust-overlay_2"
"rust-overlay": "rust-overlay",
"ultracloud": "ultracloud"
}
},
"rust-overlay": {
"inputs": {
"nixpkgs": [
"photoncloud",
"nixpkgs"
]
},
"locked": {
"lastModified": 1765465581,
"narHash": "sha256-fCXT0aZXmTalM3NPCTedVs9xb0egBG5BOZkcrYo5PGE=",
"lastModified": 1775272153,
"narHash": "sha256-FwYb64ysv8J2TxaqsYYcDyHAHBUEaQlriPMWPMi1K7M=",
"owner": "oxalica",
"repo": "rust-overlay",
"rev": "99cc5667eece98bb35dcf35f7e511031a8b7a125",
"rev": "740fb0203b2852917b909a72b948d34d0b171ec0",
"type": "github"
},
"original": {
@ -159,15 +120,16 @@
"rust-overlay_2": {
"inputs": {
"nixpkgs": [
"ultracloud",
"nixpkgs"
]
},
"locked": {
"lastModified": 1765507345,
"narHash": "sha256-fq34mBLvAgv93EuZjGp7cVV633pxnph9AVuB/Ql5y5Q=",
"lastModified": 1765465581,
"narHash": "sha256-fCXT0aZXmTalM3NPCTedVs9xb0egBG5BOZkcrYo5PGE=",
"owner": "oxalica",
"repo": "rust-overlay",
"rev": "a9471b23bf656d69ceb2d5ddccdc5082d51fc0e3",
"rev": "99cc5667eece98bb35dcf35f7e511031a8b7a125",
"type": "github"
},
"original": {
@ -219,6 +181,24 @@
"id": "systems",
"type": "indirect"
}
},
"ultracloud": {
"inputs": {
"disko": "disko",
"flake-utils": "flake-utils_2",
"nixpkgs": "nixpkgs_2",
"rust-overlay": "rust-overlay_2",
"systems": "systems_3"
},
"locked": {
"path": "../..",
"type": "path"
},
"original": {
"path": "../..",
"type": "path"
},
"parent": []
}
},
"root": "root",

View file

@ -1,11 +1,11 @@
{
description = "PhotonCloud local CI gates (Nix-first, CI-provider-agnostic)";
description = "UltraCloud local CI gates (Nix-first, CI-provider-agnostic)";
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
flake-utils.url = "github:numtide/flake-utils";
photoncloud.url = "path:../..";
ultracloud.url = "path:../..";
rust-overlay = {
url = "github:oxalica/rust-overlay";
@ -13,7 +13,7 @@
};
};
outputs = { self, nixpkgs, flake-utils, photoncloud, rust-overlay }:
outputs = { self, nixpkgs, flake-utils, ultracloud, rust-overlay }:
flake-utils.lib.eachDefaultSystem (system:
let
overlays = [ (import rust-overlay) ];
@ -32,7 +32,7 @@
supportedWorkspaces = pkgs.lib.concatStringsSep ", " wsList;
gate = pkgs.writeShellApplication {
name = "photoncloud-gate";
name = "ultracloud-gate";
runtimeInputs = with pkgs; [
bash
@ -59,10 +59,10 @@
usage() {
cat <<'USAGE'
PhotonCloud local CI gates (provider-agnostic)
UltraCloud local CI gates (provider-agnostic)
Usage:
photoncloud-gate [--tier 0|1|2] [--workspace <name>] [--shared-crates] [--shared-crate <name>] [--no-logs] [--fix]
ultracloud-gate [--tier 0|1|2] [--workspace <name>] [--shared-crates] [--shared-crate <name>] [--no-logs] [--fix]
Tiers:
0: fmt + clippy + unit tests (lib) (fast, stable default)
@ -374,19 +374,19 @@
packages.gate-ci = gate;
# Checks are minimal and mirror tier0 (provider-agnostic).
checks.gate-tier0 = pkgs.runCommand "photoncloud-gate-tier0" { } ''
checks.gate-tier0 = pkgs.runCommand "ultracloud-gate-tier0" { } ''
mkdir -p $out
${gate}/bin/photoncloud-gate --tier 0 --no-logs
${gate}/bin/ultracloud-gate --tier 0 --no-logs
touch $out/ok
'';
checks.deployer-vm-smoke = photoncloud.checks.${system}.deployer-vm-smoke;
checks.deployer-vm-rollback = photoncloud.checks.${system}.deployer-vm-rollback;
checks.deployer-bootstrap-e2e = photoncloud.checks.${system}.deployer-bootstrap-e2e;
checks.host-lifecycle-e2e = photoncloud.checks.${system}.host-lifecycle-e2e;
checks.fleet-scheduler-e2e = photoncloud.checks.${system}.fleet-scheduler-e2e;
checks.deployer-vm-smoke = ultracloud.checks.${system}.deployer-vm-smoke;
checks.deployer-vm-rollback = ultracloud.checks.${system}.deployer-vm-rollback;
checks.deployer-bootstrap-e2e = ultracloud.checks.${system}.deployer-bootstrap-e2e;
checks.host-lifecycle-e2e = ultracloud.checks.${system}.host-lifecycle-e2e;
checks.fleet-scheduler-e2e = ultracloud.checks.${system}.fleet-scheduler-e2e;
devShells.default = pkgs.mkShell {
name = "photoncloud-ci-dev";
name = "ultracloud-ci-dev";
buildInputs = with pkgs; [
rustToolchain
protobuf

View file

@ -4,7 +4,6 @@
"flake.lock",
"shell.nix",
"nix/**",
"nix-nos/**",
".github/workflows/nix.yml",
"Cargo.toml",
"Cargo.lock",
@ -145,7 +144,7 @@
"deployer-ctl",
"node-agent",
"nix-agent",
"plasmacloud-reconciler",
"ultracloud-reconciler",
"fleet-scheduler"
]
},

View file

@ -3,13 +3,13 @@
{
imports = [
./netboot-base.nix
../modules # Import PlasmaCloud service modules
../modules # Import UltraCloud service modules
];
# ============================================================================
# ALL-IN-ONE PROFILE
# ============================================================================
# This profile includes all 8 PlasmaCloud services for a single-node deployment:
# This profile includes all 8 UltraCloud services for a single-node deployment:
# - Chainfire: Distributed configuration and coordination
# - FlareDB: Time-series metrics and events database
# - IAM: Identity and access management

View file

@ -3,13 +3,13 @@
{
imports = [
./netboot-base.nix
../modules # Import PlasmaCloud service modules
../modules # Import UltraCloud service modules
];
# ============================================================================
# CONTROL PLANE PROFILE
# ============================================================================
# This profile includes all 8 PlasmaCloud services for a control plane node:
# This profile includes all 8 UltraCloud services for a control plane node:
# - Chainfire: Distributed configuration and coordination
# - FlareDB: Time-series metrics and events database
# - IAM: Identity and access management

View file

@ -3,7 +3,7 @@
{
imports = [
./netboot-base.nix
../modules # Import PlasmaCloud service modules
../modules # Import UltraCloud service modules
];
# ============================================================================

View file

@ -1,4 +1,4 @@
# PlasmaCloud Bootstrap ISO
# UltraCloud Bootstrap ISO
# Minimal ISO with DHCP + Phone Home to Deployer + Auto-Install
# For VM cluster deployment: boots, phones home, partitions disk, installs NixOS
@ -10,15 +10,15 @@
];
# ISO metadata
image.fileName = "ultracloud-bootstrap.iso";
isoImage = {
isoName = "plasmacloud-bootstrap.iso";
makeEfiBootable = true;
makeUsbBootable = true;
};
# Embed the repository into the ISO for offline flake install
isoImage.contents = [
{ source = ../../.; target = "/opt/plasmacloud-src"; }
{ source = ../../.; target = "/opt/ultracloud-src"; }
];
# Minimal network: DHCP on all interfaces
@ -30,8 +30,8 @@
};
# Phone Home service — fetches secrets from Deployer
systemd.services.plasmacloud-bootstrap = {
description = "PlasmaCloud Bootstrap via Phone Home";
systemd.services.ultracloud-bootstrap = {
description = "UltraCloud Bootstrap via Phone Home";
wantedBy = [ "multi-user.target" ];
after = [ "network-online.target" ];
wants = [ "network-online.target" ];
@ -58,12 +58,12 @@
return 1
}
mkdir -p /etc/plasmacloud
mkdir -p /etc/ultracloud
# Discover Deployer via environment, kernel cmdline, or fallback.
DEPLOYER_URL="''${DEPLOYER_URL:-}"
if [ -z "$DEPLOYER_URL" ]; then
DEPLOYER_URL="$(cmdline_value plasmacloud.deployer_url || true)"
DEPLOYER_URL="$(cmdline_value ultracloud.deployer_url || true)"
fi
if [ -z "$DEPLOYER_URL" ]; then
DEPLOYER_URL="http://192.168.100.1:8080"
@ -72,26 +72,26 @@
# Get machine identity
MACHINE_ID=$(cat /etc/machine-id)
echo "PlasmaCloud Bootstrap starting..."
echo "UltraCloud Bootstrap starting..."
echo "Machine ID: $MACHINE_ID"
echo "Deployer URL: $DEPLOYER_URL"
# Optional bootstrap token (from file or environment)
TOKEN_FILE="/etc/plasmacloud/bootstrap-token"
TOKEN_FILE="/etc/ultracloud/bootstrap-token"
DEPLOYER_TOKEN=""
if [ -s "$TOKEN_FILE" ]; then
DEPLOYER_TOKEN=$(cat "$TOKEN_FILE")
elif [ -n "''${DEPLOYER_BOOTSTRAP_TOKEN:-}" ]; then
DEPLOYER_TOKEN="''${DEPLOYER_BOOTSTRAP_TOKEN}"
else
DEPLOYER_TOKEN="$(cmdline_value plasmacloud.bootstrap_token || true)"
DEPLOYER_TOKEN="$(cmdline_value ultracloud.bootstrap_token || true)"
fi
DEPLOYER_CA_CERT_PATH="''${DEPLOYER_CA_CERT:-}"
if [ -z "$DEPLOYER_CA_CERT_PATH" ]; then
DEPLOYER_CA_CERT_URL="$(cmdline_value plasmacloud.ca_cert_url || true)"
DEPLOYER_CA_CERT_URL="$(cmdline_value ultracloud.ca_cert_url || true)"
if [ -n "$DEPLOYER_CA_CERT_URL" ]; then
DEPLOYER_CA_CERT_PATH="/etc/plasmacloud/bootstrap-ca.crt"
DEPLOYER_CA_CERT_PATH="/etc/ultracloud/bootstrap-ca.crt"
${pkgs.curl}/bin/curl -sfL --connect-timeout 5 --max-time 30 \
"$DEPLOYER_CA_CERT_URL" \
-o "$DEPLOYER_CA_CERT_PATH"
@ -197,7 +197,7 @@
echo " Phone Home successful"
# Create directories
mkdir -p /etc/ssh /etc/plasmacloud /root/.ssh
mkdir -p /etc/ssh /etc/ultracloud /root/.ssh
# Validate success flag
SUCCESS=$(echo "$RESPONSE" | ${pkgs.jq}/bin/jq -r '.success // false' || echo "false")
@ -208,18 +208,19 @@
continue
fi
# Extract and apply secrets
NODE_CONFIG=$(echo "$RESPONSE" | ${pkgs.jq}/bin/jq -c '.node_config // empty' || true)
if [ -z "$NODE_CONFIG" ] || [ "$NODE_CONFIG" = "null" ]; then
echo " Phone Home response missing node_config"
sleep $((2 ** i))
continue
fi
echo "$NODE_CONFIG" > /etc/plasmacloud/node-config.json
echo "$RESPONSE" | ${pkgs.jq}/bin/jq -r '.node_config.ssh_authorized_keys[]?' > /root/.ssh/authorized_keys
echo "$NODE_CONFIG" > /etc/ultracloud/node-config.json
echo "$RESPONSE" | ${pkgs.jq}/bin/jq -r '
.node_config.bootstrap_secrets.ssh_authorized_keys[]?
' > /root/.ssh/authorized_keys
# Apply SSH host key if provided
SSH_HOST_KEY=$(echo "$RESPONSE" | ${pkgs.jq}/bin/jq -r '.ssh_host_key // empty')
SSH_HOST_KEY=$(echo "$RESPONSE" | ${pkgs.jq}/bin/jq -r '.node_config.bootstrap_secrets.ssh_host_key // empty')
if [ -n "$SSH_HOST_KEY" ]; then
umask 077
echo "$SSH_HOST_KEY" > /etc/ssh/ssh_host_ed25519_key
@ -227,13 +228,13 @@
fi
# Apply TLS material if provided
TLS_CERT=$(echo "$RESPONSE" | ${pkgs.jq}/bin/jq -r '.tls_cert // empty')
TLS_KEY=$(echo "$RESPONSE" | ${pkgs.jq}/bin/jq -r '.tls_key // empty')
TLS_CERT=$(echo "$RESPONSE" | ${pkgs.jq}/bin/jq -r '.node_config.bootstrap_secrets.tls_cert // empty')
TLS_KEY=$(echo "$RESPONSE" | ${pkgs.jq}/bin/jq -r '.node_config.bootstrap_secrets.tls_key // empty')
if [ -n "$TLS_CERT" ] && [ -n "$TLS_KEY" ]; then
umask 077
mkdir -p /etc/plasmacloud/tls
echo "$TLS_CERT" > /etc/plasmacloud/tls/node.crt
echo "$TLS_KEY" > /etc/plasmacloud/tls/node.key
mkdir -p /etc/ultracloud/tls
echo "$TLS_CERT" > /etc/ultracloud/tls/node.crt
echo "$TLS_KEY" > /etc/ultracloud/tls/node.key
fi
# Generate host keys locally if missing
@ -242,16 +243,16 @@
fi
# Set permissions
chmod 644 /etc/plasmacloud/node-config.json 2>/dev/null || true
chmod 644 /etc/ultracloud/node-config.json 2>/dev/null || true
chmod 700 /root/.ssh 2>/dev/null || true
chmod 600 /root/.ssh/authorized_keys 2>/dev/null || true
chmod 600 /etc/ssh/ssh_host_ed25519_key 2>/dev/null || true
chmod 644 /etc/ssh/ssh_host_ed25519_key.pub 2>/dev/null || true
chmod 600 /etc/plasmacloud/tls/node.key 2>/dev/null || true
chmod 644 /etc/plasmacloud/tls/node.crt 2>/dev/null || true
chmod 600 /etc/ultracloud/tls/node.key 2>/dev/null || true
chmod 644 /etc/ultracloud/tls/node.crt 2>/dev/null || true
# Signal success
NODE_ID=$(echo "$RESPONSE" | ${pkgs.jq}/bin/jq -r '.node_id // "unknown"')
NODE_ID=$(echo "$RESPONSE" | ${pkgs.jq}/bin/jq -r '.node_config.assignment.node_id // "unknown"')
echo " Bootstrap complete: $NODE_ID"
exit 0
else
@ -266,11 +267,11 @@
};
# Auto-install service - partitions disk and runs nixos-install
systemd.services.plasmacloud-install = {
description = "PlasmaCloud Auto-Install to Disk";
systemd.services.ultracloud-install = {
description = "UltraCloud Auto-Install to Disk";
wantedBy = [ "multi-user.target" ];
after = [ "plasmacloud-bootstrap.service" ];
requires = [ "plasmacloud-bootstrap.service" ];
after = [ "ultracloud-bootstrap.service" ];
requires = [ "ultracloud-bootstrap.service" ];
serviceConfig = {
Type = "oneshot";
@ -296,25 +297,25 @@
return 1
}
if [ ! -s /etc/plasmacloud/node-config.json ]; then
if [ ! -s /etc/ultracloud/node-config.json ]; then
echo "ERROR: node-config.json missing (bootstrap not complete?)"
exit 1
fi
NODE_ID=$(${pkgs.jq}/bin/jq -r '.hostname // empty' /etc/plasmacloud/node-config.json)
NODE_IP=$(${pkgs.jq}/bin/jq -r '.ip // empty' /etc/plasmacloud/node-config.json)
NIXOS_CONFIGURATION=$(${pkgs.jq}/bin/jq -r '.install_plan.nixos_configuration // .hostname // empty' /etc/plasmacloud/node-config.json)
DISKO_PATH=$(${pkgs.jq}/bin/jq -r '.install_plan.disko_config_path // empty' /etc/plasmacloud/node-config.json)
TARGET_DISK=$(${pkgs.jq}/bin/jq -r '.install_plan.target_disk // empty' /etc/plasmacloud/node-config.json)
TARGET_DISK_BY_ID=$(${pkgs.jq}/bin/jq -r '.install_plan.target_disk_by_id // empty' /etc/plasmacloud/node-config.json)
NODE_ID=$(${pkgs.jq}/bin/jq -r '.assignment.hostname // .assignment.node_id // empty' /etc/ultracloud/node-config.json)
NODE_IP=$(${pkgs.jq}/bin/jq -r '.assignment.ip // empty' /etc/ultracloud/node-config.json)
NIXOS_CONFIGURATION=$(${pkgs.jq}/bin/jq -r '.bootstrap_plan.install_plan.nixos_configuration // .assignment.hostname // empty' /etc/ultracloud/node-config.json)
DISKO_PATH=$(${pkgs.jq}/bin/jq -r '.bootstrap_plan.install_plan.disko_config_path // empty' /etc/ultracloud/node-config.json)
TARGET_DISK=$(${pkgs.jq}/bin/jq -r '.bootstrap_plan.install_plan.target_disk // empty' /etc/ultracloud/node-config.json)
TARGET_DISK_BY_ID=$(${pkgs.jq}/bin/jq -r '.bootstrap_plan.install_plan.target_disk_by_id // empty' /etc/ultracloud/node-config.json)
DEPLOYER_URL="''${DEPLOYER_URL:-}"
if [ -z "$DEPLOYER_URL" ]; then
DEPLOYER_URL="$(cmdline_value plasmacloud.deployer_url || true)"
DEPLOYER_URL="$(cmdline_value ultracloud.deployer_url || true)"
fi
if [ -z "$DEPLOYER_URL" ]; then
DEPLOYER_URL="http://192.168.100.1:8080"
fi
SRC_ROOT="/opt/plasmacloud-src"
SRC_ROOT="/opt/ultracloud-src"
if [ -z "$NODE_ID" ] || [ -z "$NODE_IP" ]; then
echo "ERROR: node-config.json missing hostname/ip"
@ -326,21 +327,21 @@
exit 1
fi
TOKEN_FILE="/etc/plasmacloud/bootstrap-token"
TOKEN_FILE="/etc/ultracloud/bootstrap-token"
DEPLOYER_TOKEN=""
if [ -s "$TOKEN_FILE" ]; then
DEPLOYER_TOKEN=$(cat "$TOKEN_FILE")
elif [ -n "''${DEPLOYER_BOOTSTRAP_TOKEN:-}" ]; then
DEPLOYER_TOKEN="''${DEPLOYER_BOOTSTRAP_TOKEN}"
else
DEPLOYER_TOKEN="$(cmdline_value plasmacloud.bootstrap_token || true)"
DEPLOYER_TOKEN="$(cmdline_value ultracloud.bootstrap_token || true)"
fi
DEPLOYER_CA_CERT_PATH="''${DEPLOYER_CA_CERT:-}"
if [ -z "$DEPLOYER_CA_CERT_PATH" ]; then
DEPLOYER_CA_CERT_URL="$(cmdline_value plasmacloud.ca_cert_url || true)"
DEPLOYER_CA_CERT_URL="$(cmdline_value ultracloud.ca_cert_url || true)"
if [ -n "$DEPLOYER_CA_CERT_URL" ]; then
DEPLOYER_CA_CERT_PATH="/etc/plasmacloud/bootstrap-ca.crt"
DEPLOYER_CA_CERT_PATH="/etc/ultracloud/bootstrap-ca.crt"
${pkgs.curl}/bin/curl -sfL --connect-timeout 5 --max-time 30 \
"$DEPLOYER_CA_CERT_URL" \
-o "$DEPLOYER_CA_CERT_PATH"
@ -355,8 +356,8 @@
CURL_ARGS+=(--cacert "$DEPLOYER_CA_CERT_PATH")
fi
BUNDLE_PATH="/run/plasmacloud/flake-bundle.tar.gz"
mkdir -p /run/plasmacloud
BUNDLE_PATH="/run/ultracloud/flake-bundle.tar.gz"
mkdir -p /run/ultracloud
if ${pkgs.curl}/bin/curl "''${CURL_ARGS[@]}" \
"$DEPLOYER_URL/api/v1/bootstrap/flake-bundle" \
-o "$BUNDLE_PATH"; then
@ -385,7 +386,7 @@
exit 1
fi
echo "PlasmaCloud install starting for $NODE_ID (ip=$NODE_IP, nixos_configuration=$NIXOS_CONFIGURATION, disko_path=$DISKO_PATH)"
echo "UltraCloud install starting for $NODE_ID (ip=$NODE_IP, nixos_configuration=$NIXOS_CONFIGURATION, disko_path=$DISKO_PATH)"
# Resolve installation target disk.
if [ -n "$TARGET_DISK_BY_ID" ]; then
@ -427,7 +428,7 @@
EFFECTIVE_DISKO_PATH="$SRC_ROOT/$DISKO_PATH"
if [ -n "$DISK" ]; then
cat > /run/plasmacloud/disko-wrapper.nix <<EOF
cat > /run/ultracloud/disko-wrapper.nix <<EOF
{ ... }:
{
imports = [
@ -435,10 +436,10 @@
"$SRC_ROOT/$DISKO_PATH"
];
plasmacloud.install.diskDevice = "$DISK";
ultracloud.install.diskDevice = "$DISK";
}
EOF
EFFECTIVE_DISKO_PATH="/run/plasmacloud/disko-wrapper.nix"
EFFECTIVE_DISKO_PATH="/run/ultracloud/disko-wrapper.nix"
fi
echo "Running disko to partition $DISK..."
@ -456,7 +457,18 @@ EOF
# Packages for bootstrap + install
environment.systemPackages = with pkgs; [
curl jq vim htop gawk gnugrep util-linux parted dosfstools e2fsprogs gnutar gzip
curl
jq
vim
htop
gawk
gnugrep
util-linux
parted
dosfstools
e2fsprogs
gnutar
gzip
];
# SSH with key-based auth for non-interactive access

View file

@ -120,7 +120,8 @@ let
mkHostDeploymentType = types:
let
selectorType = mkHostDeploymentSelectorType types;
in types.submodule {
in
types.submodule {
options = {
selector = mkOption {
type = selectorType;
@ -284,7 +285,8 @@ let
let
containerPortType = mkContainerPortType types;
containerVolumeType = mkContainerVolumeType types;
in types.submodule {
in
types.submodule {
options = {
image = mkOption {
type = types.str;
@ -443,7 +445,8 @@ let
processType = mkProcessType types;
containerType = mkContainerType types;
healthCheckType = mkHealthCheckType types;
in types.submodule {
in
types.submodule {
options = {
mode = mkOption {
type = types.enum [ "replicated" "daemon" ];
@ -572,7 +575,8 @@ let
let
dnsPublicationType = mkDnsPublicationType types;
loadBalancerPublicationType = mkLoadBalancerPublicationType types;
in types.submodule {
in
types.submodule {
options = {
orgId = mkOption {
type = types.nullOr types.str;
@ -605,7 +609,8 @@ let
servicePortsType = mkServicePortsType types;
serviceScheduleType = mkServiceScheduleType types;
servicePublicationType = mkServicePublicationType types;
in types.submodule {
in
types.submodule {
options = {
ports = mkOption {
type = types.nullOr servicePortsType;
@ -681,7 +686,8 @@ let
let
installPlanType = mkInstallPlanType types;
desiredSystemType = mkDesiredSystemType types;
in types.submodule {
in
types.submodule {
options = {
role = mkOption {
type = types.enum [ "control-plane" "worker" ];
@ -801,7 +807,8 @@ let
mkNodeClassType = types:
let
installPlanType = mkInstallPlanType types;
in types.submodule {
in
types.submodule {
options = {
description = mkOption {
type = types.nullOr types.str;
@ -872,7 +879,8 @@ let
mkEnrollmentRuleType = types:
let
installPlanType = mkInstallPlanType types;
in types.submodule {
in
types.submodule {
options = {
priority = mkOption {
type = types.int;
@ -957,7 +965,8 @@ let
mkInstallPlan = plan:
let
rendered =
optionalAttrs (plan != null && plan.nixosConfiguration != null) {
optionalAttrs (plan != null && plan.nixosConfiguration != null)
{
nixos_configuration = plan.nixosConfiguration;
}
// optionalAttrs (plan != null && plan.diskoConfigPath != null) {
@ -975,7 +984,8 @@ let
mkDesiredSystem = nodeName: desiredSystem:
let
rendered =
optionalAttrs (desiredSystem != null && desiredSystem.deploymentId != null) {
optionalAttrs (desiredSystem != null && desiredSystem.deploymentId != null)
{
deployment_id = desiredSystem.deploymentId;
}
// optionalAttrs (desiredSystem != null && desiredSystem.nixosConfiguration != null) {
@ -1164,7 +1174,8 @@ let
};
mkServicePorts = ports:
optionalAttrs (ports != null && ports.http != null) {
optionalAttrs (ports != null && ports.http != null)
{
http = ports.http;
}
// optionalAttrs (ports != null && ports.grpc != null) {
@ -1291,7 +1302,8 @@ let
};
mkLoadBalancerPublicationSpec = loadBalancer:
optionalAttrs (loadBalancer.orgId != null) {
optionalAttrs (loadBalancer.orgId != null)
{
org_id = loadBalancer.orgId;
}
// optionalAttrs (loadBalancer.projectId != null) {
@ -1311,7 +1323,8 @@ let
};
mkServicePublicationSpec = publish:
optionalAttrs (publish.orgId != null) {
optionalAttrs (publish.orgId != null)
{
org_id = publish.orgId;
}
// optionalAttrs (publish.projectId != null) {
@ -1378,16 +1391,11 @@ let
else
head controlPlaneNodes;
mkNixNOSTopologyCluster = cluster: {
name = cluster.name;
bootstrapNode = resolveBootstrapNodeName cluster null;
nodes = cluster.nodes;
};
mkClusterConfig = {
cluster,
hostname,
bootstrapNodeName ? null,
mkClusterConfig =
{ cluster
, hostname
, bootstrapNodeName ? null
,
}:
let
node = cluster.nodes.${hostname} or (throw "Node ${hostname} not found in cluster configuration");
@ -1401,18 +1409,23 @@ let
bootstrapNode = cluster.nodes.${resolvedBootstrapNodeName}
or (throw "Bootstrap node ${resolvedBootstrapNodeName} not found in cluster configuration");
initialPeers = map (nodeName: {
initialPeers = map
(nodeName: {
id = nodeName;
addr = "${cluster.nodes.${nodeName}.ip}:${toString cluster.nodes.${nodeName}.raftPort}";
}) controlPlaneNodes;
})
controlPlaneNodes;
flaredbPeers = map (nodeName:
flaredbPeers = map
(nodeName:
"${cluster.nodes.${nodeName}.ip}:${toString (cluster.nodes.${nodeName}.apiPort + 100)}"
) controlPlaneNodes;
)
controlPlaneNodes;
chainfireLeaderUrl = "http://${bootstrapNode.ip}:8081";
flaredbLeaderUrl = "http://${bootstrapNode.ip}:8082";
in {
in
{
node_id = hostname;
node_role = node.role;
bootstrap = hostname == resolvedBootstrapNodeName;
@ -1443,7 +1456,8 @@ let
hostDeployments = deployer.hostDeployments or { };
services = deployer.services or { };
mtlsPolicies = deployer.mtlsPolicies or { };
in {
in
{
cluster = {
cluster_id = clusterId;
} // optionalAttrs (deployer ? environment && deployer.environment != null) {
@ -1484,7 +1498,6 @@ in
mkNodePoolType
mkEnrollmentRuleType
resolveBootstrapNodeName
mkNixNOSTopologyCluster
mkClusterConfig
mkDeployerClusterState;
}

View file

@ -287,7 +287,7 @@ in {
users.groups.apigateway = {};
systemd.services.apigateway = {
description = "PlasmaCloud API Gateway";
description = "UltraCloud API Gateway";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];

View file

@ -1,3 +1,3 @@
{ lib }:
import ../../nix-nos/lib/cluster-config-lib.nix { inherit lib; }
import ../lib/cluster-schema.nix { inherit lib; }

View file

@ -1,7 +1,7 @@
{
imports = [
./chainfire.nix
./plasmacloud-cluster.nix
./ultracloud-cluster.nix
./install-target.nix
./service-port-reservations.nix
./creditservice.nix
@ -12,9 +12,9 @@
./prismnet.nix
./flashdns.nix
./fiberlb.nix
./plasmacloud-network.nix
./plasmacloud-resources.nix
./plasmacloud-tenant-networking.nix
./ultracloud-network.nix
./ultracloud-resources.nix
./ultracloud-tenant-networking.nix
./lightningstor.nix
./k8shost.nix
./nightlight.nix

View file

@ -81,7 +81,7 @@ in
clusterNamespace = lib.mkOption {
type = lib.types.str;
default = "photoncloud";
default = "ultracloud";
description = "Cluster namespace prefix";
};
@ -100,7 +100,7 @@ in
bootstrapFlakeBundle = lib.mkOption {
type = lib.types.nullOr lib.types.path;
default = null;
description = "Optional tar.gz bundle served to bootstrap installers as the canonical PhotonCloud flake source";
description = "Optional tar.gz bundle served to bootstrap installers as the canonical UltraCloud flake source";
};
requireChainfire = lib.mkOption {
@ -220,8 +220,8 @@ in
services.deployer.clusterStateFile =
lib.mkDefault (
if config.system.build ? plasmacloudDeployerClusterState then
config.system.build.plasmacloudDeployerClusterState
if config.system.build ? ultracloudDeployerClusterState then
config.system.build.ultracloudDeployerClusterState
else
null
);
@ -241,7 +241,7 @@ in
];
systemd.services.deployer = {
description = "PlasmaCloud Deployer Server";
description = "UltraCloud Deployer Server";
wantedBy = [ "multi-user.target" ];
wants = [ "network-online.target" ] ++ localChainfireDeps;
after = [ "network-online.target" ] ++ localChainfireDeps;
@ -276,7 +276,7 @@ in
};
systemd.services.deployer-seed-cluster-state = lib.mkIf cfg.seedClusterState {
description = "Seed PlasmaCloud cluster state from declarative Nix output";
description = "Seed UltraCloud cluster state from declarative Nix output";
wantedBy = [ "multi-user.target" ];
wants = [ "network-online.target" "deployer.service" ];
after = [ "network-online.target" "deployer.service" ];

View file

@ -3,39 +3,10 @@
let
cfg = config.services.first-boot-automation;
configFilePath = toString cfg.configFile;
configEtcPath =
if lib.hasPrefix "/etc/" configFilePath
then lib.removePrefix "/etc/" configFilePath
else null;
hasPlasmacloudManagedClusterConfig =
(config ? plasmacloud)
&& (config.plasmacloud ? cluster)
&& (config.plasmacloud.cluster.generated.nodeClusterConfig or null) != null;
availableNixNOSClusters = builtins.attrNames (config.nix-nos.clusters or {});
resolvedNixNOSClusterName =
if builtins.elem cfg.nixnosClusterName availableNixNOSClusters then
cfg.nixnosClusterName
else if
(config ? plasmacloud)
&& (config.plasmacloud ? cluster)
&& (config.plasmacloud.cluster.enable or false)
&& builtins.elem config.plasmacloud.cluster.name availableNixNOSClusters
then
config.plasmacloud.cluster.name
else if builtins.length availableNixNOSClusters == 1 then
builtins.head availableNixNOSClusters
else
cfg.nixnosClusterName;
useNixNOS = cfg.useNixNOS && (config.nix-nos.enable or false) &&
(builtins.length availableNixNOSClusters) > 0;
nixNOSClusterConfig =
if useNixNOS then
config.nix-nos.generateClusterConfig {
hostname = config.networking.hostName;
clusterName = resolvedNixNOSClusterName;
}
else
null;
hasUltraCloudManagedClusterConfig =
(config ? ultracloud)
&& (config.ultracloud ? cluster)
&& (config.ultracloud.cluster.generated.nodeClusterConfig or null) != null;
# Helper function to create cluster join service
mkClusterJoinService = {
@ -194,22 +165,10 @@ in
options.services.first-boot-automation = {
enable = lib.mkEnableOption "first-boot cluster join automation";
useNixNOS = lib.mkOption {
type = lib.types.bool;
default = false;
description = "Use nix-nos topology for cluster configuration instead of cluster-config.json";
};
nixnosClusterName = lib.mkOption {
type = lib.types.str;
default = "plasmacloud";
description = "Name of the nix-nos cluster to use (only used when useNixNOS is true)";
};
configFile = lib.mkOption {
type = lib.types.path;
default = "/etc/nixos/secrets/cluster-config.json";
description = "Path to cluster configuration JSON file (used when useNixNOS is false)";
description = "Path to the cluster configuration JSON file consumed at first boot.";
};
enableChainfire = lib.mkOption {
@ -258,32 +217,11 @@ in
config = lib.mkIf cfg.enable {
assertions = [
{
assertion = (!cfg.useNixNOS) || (config.nix-nos.enable or false);
message = "services.first-boot-automation.useNixNOS requires nix-nos.enable = true";
}
{
assertion = (!cfg.useNixNOS) || ((builtins.length availableNixNOSClusters) > 0);
message = "services.first-boot-automation.useNixNOS requires at least one nix-nos.clusters entry";
}
{
assertion = (!cfg.useNixNOS) || (configEtcPath != null);
message = "services.first-boot-automation.useNixNOS requires services.first-boot-automation.configFile to live under /etc";
}
{
assertion = (!cfg.useNixNOS) || builtins.elem resolvedNixNOSClusterName availableNixNOSClusters;
message = "services.first-boot-automation.useNixNOS could not resolve nix-nos cluster '${cfg.nixnosClusterName}' (available: ${lib.concatStringsSep ", " availableNixNOSClusters})";
assertion = (!hasUltraCloudManagedClusterConfig) || (configFilePath == "/etc/nixos/secrets/cluster-config.json");
message = "services.first-boot-automation.configFile must remain /etc/nixos/secrets/cluster-config.json when ultracloud.cluster manages the node cluster config";
}
];
environment.etc = lib.mkIf (useNixNOS && !hasPlasmacloudManagedClusterConfig) (
lib.optionalAttrs (configEtcPath != null) {
"${configEtcPath}" = {
text = builtins.toJSON nixNOSClusterConfig;
mode = "0600";
};
}
);
# Chainfire cluster join service
systemd.services.chainfire-cluster-join = lib.mkIf cfg.enableChainfire (
mkClusterJoinService {

Some files were not shown because too many files have changed in this diff Show more