2426 lines
98 KiB
Nix
2426 lines
98 KiB
Nix
{
|
|
description = "UltraCloud - Japanese Cloud Platform";
|
|
|
|
# ============================================================================
|
|
# INPUTS: External dependencies
|
|
# ============================================================================
|
|
inputs = {
|
|
# Use unstable nixpkgs for latest packages
|
|
nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
|
|
|
|
# Rust overlay for managing Rust toolchains
|
|
rust-overlay = {
|
|
url = "github:oxalica/rust-overlay";
|
|
inputs.nixpkgs.follows = "nixpkgs";
|
|
};
|
|
|
|
# Flake utilities for multi-system support
|
|
flake-utils.url = "github:numtide/flake-utils";
|
|
|
|
# Disko for declarative disk partitioning
|
|
disko = {
|
|
url = "github:nix-community/disko";
|
|
inputs.nixpkgs.follows = "nixpkgs";
|
|
};
|
|
|
|
};
|
|
|
|
# ============================================================================
|
|
# OUTPUTS: What this flake provides
|
|
# ============================================================================
|
|
outputs = { self, nixpkgs, rust-overlay, flake-utils, disko, systems ? null }:
|
|
flake-utils.lib.eachDefaultSystem
|
|
(system:
|
|
let
|
|
# Apply rust-overlay to get rust-bin attribute
|
|
overlays = [ (import rust-overlay) ];
|
|
|
|
pkgs = import nixpkgs {
|
|
inherit system overlays;
|
|
};
|
|
|
|
# Rust toolchain configuration
|
|
# Using stable channel with rust-src (for rust-analyzer) and rust-analyzer
|
|
rustToolchain = pkgs.rust-bin.stable.latest.default.override {
|
|
extensions = [ "rust-src" "rust-analyzer" ];
|
|
};
|
|
|
|
# Common build inputs needed by all Rust packages
|
|
commonBuildInputs = with pkgs; [
|
|
rocksdb # RocksDB storage engine
|
|
openssl # TLS/SSL support
|
|
];
|
|
|
|
# Common native build inputs (build-time only)
|
|
commonNativeBuildInputs = with pkgs; [
|
|
pkg-config # For finding libraries
|
|
protobuf # Protocol Buffers compiler
|
|
rustToolchain
|
|
];
|
|
|
|
# Common environment variables for building
|
|
commonEnvVars = {
|
|
LIBCLANG_PATH = "${pkgs.llvmPackages.libclang.lib}/lib";
|
|
PROTOC = "${pkgs.protobuf}/bin/protoc";
|
|
ROCKSDB_LIB_DIR = "${pkgs.rocksdb}/lib";
|
|
};
|
|
|
|
clusterPython = pkgs.python3.withPackages (ps: [ ps.python-snappy ]);
|
|
singleNodeSurface = import ./nix/single-node/surface.nix;
|
|
|
|
# Keep Rust package builds stable without invalidating every package on
|
|
# unrelated workspace changes.
|
|
workspaceSourceRoots = {
|
|
chainfire = [ "chainfire" ];
|
|
flaredb = [ "flaredb" ];
|
|
iam = [
|
|
"apigateway"
|
|
"chainfire"
|
|
"creditservice"
|
|
"crates/photon-auth-client"
|
|
"crates/photon-config"
|
|
"crates/photon-runtime"
|
|
"crates/photon-state"
|
|
"flaredb"
|
|
"iam"
|
|
];
|
|
coronafs = [ "coronafs" ];
|
|
plasmavmc = [
|
|
"apigateway"
|
|
"chainfire"
|
|
"creditservice"
|
|
"crates/photon-auth-client"
|
|
"crates/photon-config"
|
|
"crates/photon-runtime"
|
|
"crates/photon-state"
|
|
"flaredb"
|
|
"iam"
|
|
"lightningstor"
|
|
"plasmavmc"
|
|
"prismnet"
|
|
];
|
|
prismnet = [
|
|
"apigateway"
|
|
"chainfire"
|
|
"creditservice"
|
|
"crates/photon-auth-client"
|
|
"crates/photon-config"
|
|
"crates/photon-runtime"
|
|
"crates/photon-state"
|
|
"flaredb"
|
|
"iam"
|
|
"prismnet"
|
|
];
|
|
flashdns = [
|
|
"apigateway"
|
|
"chainfire"
|
|
"creditservice"
|
|
"crates/photon-auth-client"
|
|
"crates/photon-config"
|
|
"crates/photon-runtime"
|
|
"crates/photon-state"
|
|
"flashdns"
|
|
"flaredb"
|
|
"iam"
|
|
];
|
|
fiberlb = [
|
|
"apigateway"
|
|
"chainfire"
|
|
"creditservice"
|
|
"crates/photon-auth-client"
|
|
"crates/photon-config"
|
|
"crates/photon-runtime"
|
|
"crates/photon-state"
|
|
"fiberlb"
|
|
"flaredb"
|
|
"iam"
|
|
];
|
|
lightningstor = [
|
|
"apigateway"
|
|
"chainfire"
|
|
"creditservice"
|
|
"crates/photon-auth-client"
|
|
"crates/photon-config"
|
|
"crates/photon-runtime"
|
|
"crates/photon-state"
|
|
"flaredb"
|
|
"iam"
|
|
"lightningstor"
|
|
];
|
|
nightlight = [ "nightlight" ];
|
|
creditservice = [
|
|
"apigateway"
|
|
"chainfire"
|
|
"creditservice"
|
|
"crates/photon-auth-client"
|
|
"crates/photon-config"
|
|
"crates/photon-runtime"
|
|
"crates/photon-state"
|
|
"flaredb"
|
|
"iam"
|
|
];
|
|
apigateway = [
|
|
"apigateway"
|
|
"chainfire"
|
|
"creditservice"
|
|
"crates/photon-auth-client"
|
|
"crates/photon-config"
|
|
"crates/photon-runtime"
|
|
"crates/photon-state"
|
|
"flaredb"
|
|
"iam"
|
|
];
|
|
k8shost = [
|
|
"apigateway"
|
|
"chainfire"
|
|
"creditservice"
|
|
"crates/photon-auth-client"
|
|
"crates/photon-config"
|
|
"crates/photon-runtime"
|
|
"crates/photon-state"
|
|
"fiberlb"
|
|
"flaredb"
|
|
"flashdns"
|
|
"iam"
|
|
"k8shost"
|
|
"lightningstor"
|
|
"plasmavmc"
|
|
"prismnet"
|
|
];
|
|
deployer = [
|
|
"apigateway"
|
|
"chainfire"
|
|
"creditservice"
|
|
"crates/photon-auth-client"
|
|
"crates/photon-config"
|
|
"crates/photon-runtime"
|
|
"crates/photon-state"
|
|
"deployer"
|
|
"fiberlb"
|
|
"flaredb"
|
|
"flashdns"
|
|
"iam"
|
|
"prismnet"
|
|
];
|
|
};
|
|
|
|
mkWorkspaceSrc = workspaceSubdir:
|
|
let
|
|
sourceRoots = workspaceSourceRoots.${workspaceSubdir} or [ workspaceSubdir ];
|
|
in
|
|
pkgs.lib.cleanSourceWith {
|
|
src = ./.;
|
|
filter = path: type:
|
|
let
|
|
rel = pkgs.lib.removePrefix ((toString ./.) + "/") (toString path);
|
|
in
|
|
rel == ""
|
|
|| builtins.elem rel [ "flake.nix" "flake.lock" ]
|
|
|| builtins.any
|
|
(root:
|
|
rel == root
|
|
|| pkgs.lib.hasPrefix "${root}/" rel
|
|
|| pkgs.lib.hasPrefix "${rel}/" root
|
|
)
|
|
sourceRoots;
|
|
};
|
|
|
|
flakeBundleSrc = pkgs.lib.cleanSourceWith {
|
|
src = ./.;
|
|
filter = path: type:
|
|
let
|
|
rel = pkgs.lib.removePrefix ((toString ./.) + "/") (toString path);
|
|
topLevel = builtins.head (pkgs.lib.splitString "/" rel);
|
|
includedTopLevels = [
|
|
"apigateway"
|
|
"baremetal"
|
|
"chainfire"
|
|
"coronafs"
|
|
"crates"
|
|
"creditservice"
|
|
"deployer"
|
|
"fiberlb"
|
|
"flashdns"
|
|
"flaredb"
|
|
"iam"
|
|
"k8shost"
|
|
"lightningstor"
|
|
"mtls-agent"
|
|
"nightlight"
|
|
"nix"
|
|
"plasmavmc"
|
|
"prismnet"
|
|
];
|
|
isTargetDir = builtins.match "(.*/)?target(/.*)?" rel != null;
|
|
in
|
|
!isTargetDir
|
|
&& (
|
|
rel == ""
|
|
|| builtins.elem rel [ "flake.nix" "flake.lock" ]
|
|
|| builtins.elem topLevel includedTopLevels
|
|
);
|
|
};
|
|
|
|
flakeInputsBlock = ''
|
|
inputs = {
|
|
# Use unstable nixpkgs for latest packages
|
|
nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
|
|
|
|
# Rust overlay for managing Rust toolchains
|
|
rust-overlay = {
|
|
url = "github:oxalica/rust-overlay";
|
|
inputs.nixpkgs.follows = "nixpkgs";
|
|
};
|
|
|
|
# Flake utilities for multi-system support
|
|
flake-utils.url = "github:numtide/flake-utils";
|
|
|
|
# Disko for declarative disk partitioning
|
|
disko = {
|
|
url = "github:nix-community/disko";
|
|
inputs.nixpkgs.follows = "nixpkgs";
|
|
};
|
|
|
|
};
|
|
'';
|
|
|
|
bundledInputsBlock = ''
|
|
inputs = {
|
|
nixpkgs.url = "path:./.bundle-inputs/nixpkgs";
|
|
|
|
rust-overlay = {
|
|
url = "path:./.bundle-inputs/rust-overlay";
|
|
inputs.nixpkgs.follows = "nixpkgs";
|
|
};
|
|
|
|
flake-utils = {
|
|
url = "path:./.bundle-inputs/flake-utils";
|
|
inputs.systems.follows = "systems";
|
|
};
|
|
|
|
systems.url = "path:./.bundle-inputs/systems";
|
|
|
|
disko = {
|
|
url = "path:./.bundle-inputs/disko";
|
|
inputs.nixpkgs.follows = "nixpkgs";
|
|
};
|
|
|
|
};
|
|
'';
|
|
|
|
flakeHeaderBlock = ''
|
|
# ============================================================================
|
|
# INPUTS: External dependencies
|
|
# ============================================================================
|
|
inputs = {
|
|
# Use unstable nixpkgs for latest packages
|
|
nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
|
|
|
|
# Rust overlay for managing Rust toolchains
|
|
rust-overlay = {
|
|
url = "github:oxalica/rust-overlay";
|
|
inputs.nixpkgs.follows = "nixpkgs";
|
|
};
|
|
|
|
# Flake utilities for multi-system support
|
|
flake-utils.url = "github:numtide/flake-utils";
|
|
|
|
# Disko for declarative disk partitioning
|
|
disko = {
|
|
url = "github:nix-community/disko";
|
|
inputs.nixpkgs.follows = "nixpkgs";
|
|
};
|
|
|
|
};
|
|
|
|
# ============================================================================
|
|
# OUTPUTS: What this flake provides
|
|
# ============================================================================
|
|
outputs = { self, nixpkgs, rust-overlay, flake-utils, disko, systems ? null }:
|
|
'';
|
|
|
|
bundledHeaderBlock = ''
|
|
# ============================================================================
|
|
# INPUTS: External dependencies
|
|
# ============================================================================
|
|
inputs = {
|
|
nixpkgs.url = "path:./.bundle-inputs/nixpkgs";
|
|
|
|
rust-overlay = {
|
|
url = "path:./.bundle-inputs/rust-overlay";
|
|
inputs.nixpkgs.follows = "nixpkgs";
|
|
};
|
|
|
|
flake-utils = {
|
|
url = "path:./.bundle-inputs/flake-utils";
|
|
inputs.systems.follows = "systems";
|
|
};
|
|
|
|
systems.url = "path:./.bundle-inputs/systems";
|
|
|
|
disko = {
|
|
url = "path:./.bundle-inputs/disko";
|
|
inputs.nixpkgs.follows = "nixpkgs";
|
|
};
|
|
|
|
};
|
|
|
|
# ============================================================================
|
|
# OUTPUTS: What this flake provides
|
|
# ============================================================================
|
|
outputs = { self, nixpkgs, rust-overlay, flake-utils, disko, systems ? null }:
|
|
'';
|
|
|
|
bundledFlakeNix =
|
|
pkgs.writeText
|
|
"ultracloud-bundled-flake.nix"
|
|
(
|
|
builtins.replaceStrings
|
|
[ flakeHeaderBlock ]
|
|
[ bundledHeaderBlock ]
|
|
(builtins.readFile ./flake.nix)
|
|
);
|
|
|
|
bundledFlakeHeaderFile =
|
|
pkgs.writeText "ultracloud-bundled-flake-header" bundledHeaderBlock;
|
|
|
|
baseFlakeLock = builtins.fromJSON (builtins.readFile ./flake.lock);
|
|
|
|
bundleInputRelPaths = {
|
|
nixpkgs = "./.bundle-inputs/nixpkgs";
|
|
"rust-overlay" = "./.bundle-inputs/rust-overlay";
|
|
"flake-utils" = "./.bundle-inputs/flake-utils";
|
|
disko = "./.bundle-inputs/disko";
|
|
systems = "./.bundle-inputs/systems";
|
|
};
|
|
|
|
fetchLockedInput =
|
|
nodeName:
|
|
let
|
|
tree = builtins.fetchTree baseFlakeLock.nodes.${nodeName}.locked;
|
|
in
|
|
if builtins.isAttrs tree && tree ? outPath then tree.outPath else tree;
|
|
|
|
vendoredFlakeInputs = {
|
|
nixpkgs = fetchLockedInput "nixpkgs";
|
|
"rust-overlay" = fetchLockedInput "rust-overlay";
|
|
"flake-utils" = fetchLockedInput "flake-utils";
|
|
disko = fetchLockedInput "disko";
|
|
systems = fetchLockedInput "systems";
|
|
};
|
|
|
|
makeBundledLockNode =
|
|
nodeName: relPath:
|
|
let
|
|
node = baseFlakeLock.nodes.${nodeName};
|
|
in
|
|
node
|
|
// {
|
|
locked = {
|
|
type = "path";
|
|
path = relPath;
|
|
};
|
|
original = {
|
|
type = "path";
|
|
path = relPath;
|
|
};
|
|
};
|
|
|
|
bundledFlakeLock = baseFlakeLock // {
|
|
nodes =
|
|
baseFlakeLock.nodes
|
|
// {
|
|
root =
|
|
baseFlakeLock.nodes.root
|
|
// {
|
|
inputs =
|
|
baseFlakeLock.nodes.root.inputs
|
|
// {
|
|
systems = "systems";
|
|
};
|
|
};
|
|
nixpkgs = makeBundledLockNode "nixpkgs" bundleInputRelPaths.nixpkgs;
|
|
"rust-overlay" = makeBundledLockNode "rust-overlay" bundleInputRelPaths."rust-overlay";
|
|
"flake-utils" = makeBundledLockNode "flake-utils" bundleInputRelPaths."flake-utils";
|
|
disko = makeBundledLockNode "disko" bundleInputRelPaths.disko;
|
|
systems = makeBundledLockNode "systems" bundleInputRelPaths.systems;
|
|
};
|
|
};
|
|
|
|
bundledFlakeLockFile =
|
|
pkgs.writeText "ultracloud-bundled-flake.lock" (builtins.toJSON bundledFlakeLock);
|
|
|
|
inBundledEval = builtins.pathExists ./.bundle-eval-marker;
|
|
|
|
bundledFlakeRootDrv = pkgs.runCommand "ultracloud-bundled-flake-root"
|
|
{
|
|
nativeBuildInputs = [
|
|
pkgs.coreutils
|
|
pkgs.python3
|
|
];
|
|
} ''
|
|
mkdir -p "$out"
|
|
cp -a ${flakeBundleSrc}/. "$out"/
|
|
chmod -R u+w "$out"
|
|
touch "$out/.bundle-eval-marker"
|
|
mkdir -p "$out/.bundle-inputs"
|
|
cp -a ${vendoredFlakeInputs.nixpkgs} "$out/.bundle-inputs/nixpkgs"
|
|
cp -a ${vendoredFlakeInputs."rust-overlay"} "$out/.bundle-inputs/rust-overlay"
|
|
cp -a ${vendoredFlakeInputs."flake-utils"} "$out/.bundle-inputs/flake-utils"
|
|
cp -a ${vendoredFlakeInputs.disko} "$out/.bundle-inputs/disko"
|
|
cp -a ${vendoredFlakeInputs.systems} "$out/.bundle-inputs/systems"
|
|
cp ${bundledFlakeLockFile} "$out/flake.lock"
|
|
python3 - <<'PY' "$out/flake.nix" ${bundledFlakeHeaderFile}
|
|
from pathlib import Path
|
|
import re
|
|
import sys
|
|
|
|
flake_path = Path(sys.argv[1])
|
|
header = Path(sys.argv[2]).read_text()
|
|
source = flake_path.read_text()
|
|
pattern = re.compile(
|
|
r" # ============================================================================\n"
|
|
r" # INPUTS: External dependencies\n"
|
|
r" # ============================================================================\n"
|
|
r" inputs = \{.*?\n"
|
|
r" # ============================================================================\n"
|
|
r" # OUTPUTS: What this flake provides\n"
|
|
r" # ============================================================================\n"
|
|
r" outputs = \{ self, nixpkgs, rust-overlay, flake-utils, disko, systems \? null \}:",
|
|
re.S,
|
|
)
|
|
rewritten, count = pattern.subn(header.rstrip("\n"), source, count=1)
|
|
if count != 1:
|
|
raise SystemExit(f"expected to rewrite 1 flake header, rewrote {count}")
|
|
flake_path.write_text(rewritten)
|
|
PY
|
|
'';
|
|
|
|
bundledFlakeRoot =
|
|
if inBundledEval then
|
|
null
|
|
else
|
|
builtins.path {
|
|
path = bundledFlakeRootDrv;
|
|
name = "ultracloud-bundled-flake-root-src";
|
|
};
|
|
|
|
bundledFlakeRootNarHashFile =
|
|
if inBundledEval then
|
|
null
|
|
else
|
|
pkgs.runCommand "ultracloud-bundled-flake-root-narhash"
|
|
{
|
|
nativeBuildInputs = [ pkgs.nix ];
|
|
} ''
|
|
${pkgs.nix}/bin/nix \
|
|
--extra-experimental-features nix-command \
|
|
hash path --sri ${bundledFlakeRoot} \
|
|
| tr -d '\n' > "$out"
|
|
'';
|
|
|
|
bundledFlakeRootNarHash =
|
|
if inBundledEval then
|
|
null
|
|
else
|
|
builtins.readFile bundledFlakeRootNarHashFile;
|
|
|
|
bundledFlake =
|
|
if inBundledEval then
|
|
null
|
|
else
|
|
builtins.getFlake (
|
|
builtins.unsafeDiscardStringContext
|
|
"path:${toString bundledFlakeRoot}?narHash=${bundledFlakeRootNarHash}"
|
|
);
|
|
|
|
bundledVmSmokeTargetToplevel =
|
|
if inBundledEval then
|
|
null
|
|
else
|
|
bundledFlake.nixosConfigurations.vm-smoke-target.config.system.build.toplevel;
|
|
|
|
# Helper function to build a Rust workspace package
|
|
# Parameters:
|
|
# name: package name (e.g., "chainfire-server")
|
|
# workspaceSubdir: subdirectory containing Cargo.toml (e.g., "chainfire")
|
|
# mainCrate: optional main crate name if different from workspace
|
|
# description: package description for meta
|
|
# doCheck: whether to run tests during build (default: false)
|
|
buildRustWorkspace = { name, workspaceSubdir, mainCrate ? null, description ? "", doCheck ? false }:
|
|
pkgs.rustPlatform.buildRustPackage ({
|
|
pname = name;
|
|
version = "0.1.0";
|
|
src = mkWorkspaceSrc workspaceSubdir;
|
|
|
|
cargoLock = {
|
|
lockFile = ./${workspaceSubdir}/Cargo.lock;
|
|
};
|
|
|
|
# Build from the workspace subdirectory
|
|
buildAndTestSubdir = workspaceSubdir;
|
|
|
|
# Copy Cargo.lock to root for nix validation (expects it at src root)
|
|
postUnpack = ''
|
|
cp $sourceRoot/${workspaceSubdir}/Cargo.lock $sourceRoot/Cargo.lock
|
|
'';
|
|
|
|
nativeBuildInputs = commonNativeBuildInputs;
|
|
buildInputs = commonBuildInputs;
|
|
|
|
# Set environment variables for build
|
|
inherit (commonEnvVars) LIBCLANG_PATH PROTOC ROCKSDB_LIB_DIR;
|
|
|
|
# Enable cargo tests during build (can be overridden per-package)
|
|
inherit doCheck;
|
|
|
|
# Test flags: run tests for the main crate only
|
|
cargoTestFlags = pkgs.lib.optionals (mainCrate != null) [ "-p" mainCrate ];
|
|
|
|
# Metadata for the package
|
|
meta = with pkgs.lib; {
|
|
description = description;
|
|
homepage = "https://github.com/yourorg/ultracloud";
|
|
license = licenses.asl20; # Apache 2.0
|
|
maintainers = [ ];
|
|
platforms = platforms.linux;
|
|
};
|
|
|
|
# Build only the server binary if mainCrate is specified
|
|
# This avoids building test binaries and examples
|
|
} // pkgs.lib.optionalAttrs (mainCrate != null) {
|
|
cargoBuildFlags = [ "-p" mainCrate ];
|
|
});
|
|
|
|
# Helper function to build multiple binaries from the same workspace in
|
|
# one cargo invocation. This is mainly used by the VM cluster builds so
|
|
# a single host build can satisfy several services from the same
|
|
# workspace.
|
|
buildRustWorkspaceBundle = { name, workspaceSubdir, crates, description ? "", doCheck ? false }:
|
|
pkgs.rustPlatform.buildRustPackage {
|
|
pname = name;
|
|
version = "0.1.0";
|
|
src = mkWorkspaceSrc workspaceSubdir;
|
|
|
|
cargoLock = {
|
|
lockFile = ./${workspaceSubdir}/Cargo.lock;
|
|
};
|
|
|
|
buildAndTestSubdir = workspaceSubdir;
|
|
|
|
postUnpack = ''
|
|
cp $sourceRoot/${workspaceSubdir}/Cargo.lock $sourceRoot/Cargo.lock
|
|
'';
|
|
|
|
nativeBuildInputs = commonNativeBuildInputs;
|
|
buildInputs = commonBuildInputs;
|
|
|
|
inherit (commonEnvVars) LIBCLANG_PATH PROTOC ROCKSDB_LIB_DIR;
|
|
inherit doCheck;
|
|
|
|
cargoBuildFlags = pkgs.lib.concatMap (crate: [ "-p" crate ]) crates;
|
|
|
|
meta = with pkgs.lib; {
|
|
description = description;
|
|
homepage = "https://github.com/yourorg/ultracloud";
|
|
license = licenses.asl20;
|
|
maintainers = [ ];
|
|
platforms = platforms.linux;
|
|
};
|
|
};
|
|
|
|
in
|
|
{
|
|
# ======================================================================
|
|
# DEVELOPMENT SHELL: Drop-in replacement for shell.nix
|
|
# ======================================================================
|
|
devShells.default = pkgs.mkShell {
|
|
name = "cloud-dev";
|
|
|
|
buildInputs = with pkgs; [
|
|
# Rust toolchain (replaces rustup/cargo/rustc from shell.nix)
|
|
rustToolchain
|
|
|
|
# Protocol Buffers
|
|
protobuf
|
|
|
|
# LLVM/Clang (for bindgen/clang-sys)
|
|
llvmPackages.libclang
|
|
llvmPackages.clang
|
|
|
|
# Build essentials
|
|
pkg-config
|
|
openssl
|
|
|
|
# Development tools
|
|
git
|
|
curl
|
|
jq
|
|
grpcurl
|
|
openssh
|
|
sshpass
|
|
clusterPython
|
|
qemu
|
|
vde2
|
|
bind
|
|
|
|
# For RocksDB (chainfire dependency)
|
|
rocksdb
|
|
];
|
|
|
|
# Environment variables for clang-sys and other build tools
|
|
LIBCLANG_PATH = "${pkgs.llvmPackages.libclang.lib}/lib";
|
|
PROTOC = "${pkgs.protobuf}/bin/protoc";
|
|
ROCKSDB_LIB_DIR = "${pkgs.rocksdb}/lib";
|
|
|
|
shellHook = ''
|
|
echo "Cloud Platform Development Environment"
|
|
echo "======================================="
|
|
echo "Rust: $(rustc --version)"
|
|
echo "Protoc: $(protoc --version)"
|
|
echo "Clang: $(clang --version | head -1)"
|
|
echo ""
|
|
echo "Environment variables set:"
|
|
echo " LIBCLANG_PATH=$LIBCLANG_PATH"
|
|
echo " PROTOC=$PROTOC"
|
|
echo " ROCKSDB_LIB_DIR=$ROCKSDB_LIB_DIR"
|
|
echo ""
|
|
echo "Available workspaces:"
|
|
echo " - chainfire (distributed cluster coordination store)"
|
|
echo " - flaredb (distributed SQL/KV database for metadata and tenant data)"
|
|
echo " - iam (identity & access management)"
|
|
echo " - plasmavmc (VM control plane)"
|
|
echo " - prismnet (SDN controller)"
|
|
echo " - flashdns (DNS server)"
|
|
echo " - fiberlb (load balancer)"
|
|
echo " - lightningstor (block storage)"
|
|
echo " - nightlight (metrics store)"
|
|
echo " - creditservice (quota & billing)"
|
|
echo " - k8shost (kubernetes hosting)"
|
|
'';
|
|
};
|
|
|
|
# ======================================================================
|
|
# PACKAGES: Buildable artifacts from each workspace
|
|
# ======================================================================
|
|
packages = {
|
|
# --------------------------------------------------------------------
|
|
# Chainfire: Distributed Cluster Coordination Store
|
|
# --------------------------------------------------------------------
|
|
chainfire-server = buildRustWorkspace {
|
|
name = "chainfire-server";
|
|
workspaceSubdir = "chainfire";
|
|
mainCrate = "chainfire-server";
|
|
description = "Distributed cluster coordination store with consensus, watches, and membership";
|
|
};
|
|
|
|
# --------------------------------------------------------------------
|
|
# FlareDB: Distributed SQL/KV Database
|
|
# --------------------------------------------------------------------
|
|
flaredb-server = buildRustWorkspace {
|
|
name = "flaredb-server";
|
|
workspaceSubdir = "flaredb";
|
|
mainCrate = "flaredb-server";
|
|
description = "Distributed Postgres-like SQL/KV database for service metadata, tenant data, and DBaaS";
|
|
};
|
|
|
|
# --------------------------------------------------------------------
|
|
# IAM: Identity and Access Management Service
|
|
# --------------------------------------------------------------------
|
|
iam-server = buildRustWorkspace {
|
|
name = "iam-server";
|
|
workspaceSubdir = "iam";
|
|
mainCrate = "iam-server";
|
|
description = "Identity and access management service with RBAC and multi-tenant support";
|
|
};
|
|
|
|
# --------------------------------------------------------------------
|
|
# CoronaFS: Shared Block Volume Service
|
|
# --------------------------------------------------------------------
|
|
coronafs-server = buildRustWorkspace {
|
|
name = "coronafs-server";
|
|
workspaceSubdir = "coronafs";
|
|
mainCrate = "coronafs-server";
|
|
description = "Shared block volume service exporting raw VM volumes over NBD";
|
|
};
|
|
|
|
# --------------------------------------------------------------------
|
|
# PlasmaVMC: Virtual Machine Control Plane
|
|
# --------------------------------------------------------------------
|
|
plasmavmc-server = buildRustWorkspace {
|
|
name = "plasmavmc-server";
|
|
workspaceSubdir = "plasmavmc";
|
|
mainCrate = "plasmavmc-server";
|
|
description = "Virtual machine control plane for managing compute instances";
|
|
};
|
|
|
|
# --------------------------------------------------------------------
|
|
# PrismNet: Software-Defined Networking Controller
|
|
# --------------------------------------------------------------------
|
|
prismnet-server = buildRustWorkspace {
|
|
name = "prismnet-server";
|
|
workspaceSubdir = "prismnet";
|
|
mainCrate = "prismnet-server";
|
|
description = "Software-defined networking controller with OVN integration";
|
|
};
|
|
|
|
# --------------------------------------------------------------------
|
|
# FlashDNS: High-Performance DNS Server
|
|
# --------------------------------------------------------------------
|
|
flashdns-server = buildRustWorkspace {
|
|
name = "flashdns-server";
|
|
workspaceSubdir = "flashdns";
|
|
mainCrate = "flashdns-server";
|
|
description = "High-performance DNS server with pattern-based reverse DNS";
|
|
};
|
|
|
|
# --------------------------------------------------------------------
|
|
# FiberLB: Layer 4/7 Load Balancer
|
|
# --------------------------------------------------------------------
|
|
fiberlb-server = buildRustWorkspace {
|
|
name = "fiberlb-server";
|
|
workspaceSubdir = "fiberlb";
|
|
mainCrate = "fiberlb-server";
|
|
description = "Layer 4/7 load balancer for distributing traffic across services";
|
|
};
|
|
|
|
# --------------------------------------------------------------------
|
|
# LightningStor: Block Storage Service
|
|
# --------------------------------------------------------------------
|
|
lightningstor-server = buildRustWorkspace {
|
|
name = "lightningstor-server";
|
|
workspaceSubdir = "lightningstor";
|
|
mainCrate = "lightningstor-server";
|
|
description = "Distributed block storage service for persistent volumes";
|
|
};
|
|
|
|
lightningstor-node = buildRustWorkspace {
|
|
name = "lightningstor-node";
|
|
workspaceSubdir = "lightningstor";
|
|
mainCrate = "lightningstor-node";
|
|
description = "LightningStor distributed storage node daemon";
|
|
};
|
|
|
|
lightningstor-workspace = buildRustWorkspaceBundle {
|
|
name = "lightningstor-workspace";
|
|
workspaceSubdir = "lightningstor";
|
|
crates = [
|
|
"lightningstor-server"
|
|
"lightningstor-node"
|
|
];
|
|
description = "Combined LightningStor server and node workspace build";
|
|
};
|
|
|
|
# --------------------------------------------------------------------
|
|
# NightLight: Prometheus-compatible Metrics Store
|
|
# --------------------------------------------------------------------
|
|
nightlight-server = buildRustWorkspace {
|
|
name = "nightlight-server";
|
|
workspaceSubdir = "nightlight";
|
|
mainCrate = "nightlight-server";
|
|
description = "Prometheus-compatible metrics storage (NightLight)";
|
|
};
|
|
|
|
# --------------------------------------------------------------------
|
|
# CreditService: Quota and Billing Controller
|
|
# --------------------------------------------------------------------
|
|
creditservice-server = buildRustWorkspace {
|
|
name = "creditservice-server";
|
|
workspaceSubdir = "creditservice";
|
|
mainCrate = "creditservice-server";
|
|
description = "Credit/quota management service with billing integration";
|
|
};
|
|
|
|
# --------------------------------------------------------------------
|
|
# APIGateway: API Gateway Service
|
|
# --------------------------------------------------------------------
|
|
apigateway-server = buildRustWorkspace {
|
|
name = "apigateway-server";
|
|
workspaceSubdir = "apigateway";
|
|
mainCrate = "apigateway-server";
|
|
description = "API Gateway for UltraCloud services";
|
|
};
|
|
|
|
# --------------------------------------------------------------------
|
|
# k8shost: Kubernetes Hosting Component
|
|
# --------------------------------------------------------------------
|
|
k8shost-server = buildRustWorkspace {
|
|
name = "k8shost-server";
|
|
workspaceSubdir = "k8shost";
|
|
mainCrate = "k8shost-server";
|
|
description = "Lightweight Kubernetes hosting with multi-tenant isolation";
|
|
};
|
|
|
|
# --------------------------------------------------------------------
|
|
# Deployer: Bare-metal bootstrap orchestration service
|
|
# --------------------------------------------------------------------
|
|
deployer-server = buildRustWorkspace {
|
|
name = "deployer-server";
|
|
workspaceSubdir = "deployer";
|
|
mainCrate = "deployer-server";
|
|
description = "Node bootstrap and phone-home orchestration service";
|
|
};
|
|
|
|
deployer-ctl = buildRustWorkspace {
|
|
name = "deployer-ctl";
|
|
workspaceSubdir = "deployer";
|
|
mainCrate = "deployer-ctl";
|
|
description = "Declarative control utility for UltraCloud deployer state";
|
|
};
|
|
|
|
node-agent = buildRustWorkspace {
|
|
name = "node-agent";
|
|
workspaceSubdir = "deployer";
|
|
mainCrate = "node-agent";
|
|
description = "Node-local runtime agent for UltraCloud scheduled services";
|
|
};
|
|
|
|
nix-agent = buildRustWorkspace {
|
|
name = "nix-agent";
|
|
workspaceSubdir = "deployer";
|
|
mainCrate = "nix-agent";
|
|
description = "Node-local NixOS reconciliation agent for UltraCloud hosts";
|
|
};
|
|
|
|
ultracloud-reconciler = buildRustWorkspace {
|
|
name = "ultracloud-reconciler";
|
|
workspaceSubdir = "deployer";
|
|
mainCrate = "ultracloud-reconciler";
|
|
description = "Declarative reconciler for host rollouts and published resources";
|
|
};
|
|
|
|
ultracloudFlakeBundle = pkgs.runCommand "ultracloud-flake-bundle.tar.gz"
|
|
{
|
|
nativeBuildInputs = [
|
|
pkgs.coreutils
|
|
pkgs.gnutar
|
|
pkgs.gzip
|
|
];
|
|
} ''
|
|
bundle_root="$(mktemp -d)"
|
|
cp -a ${bundledFlakeRootDrv}/. "$bundle_root"/
|
|
chmod -R u+w "$bundle_root"
|
|
|
|
tar \
|
|
--sort=name \
|
|
--mtime='@1' \
|
|
--owner=0 \
|
|
--group=0 \
|
|
--numeric-owner \
|
|
-C "$bundle_root" \
|
|
-cf - . \
|
|
| gzip -n > "$out"
|
|
'';
|
|
|
|
# --------------------------------------------------------------------
|
|
# Fleet Scheduler: Non-Kubernetes service scheduler for bare-metal nodes
|
|
# --------------------------------------------------------------------
|
|
fleet-scheduler = buildRustWorkspace {
|
|
name = "fleet-scheduler";
|
|
workspaceSubdir = "deployer";
|
|
mainCrate = "fleet-scheduler";
|
|
description = "Label-aware service scheduler for UltraCloud bare-metal fleets";
|
|
};
|
|
|
|
deployer-workspace = buildRustWorkspaceBundle {
|
|
name = "deployer-workspace";
|
|
workspaceSubdir = "deployer";
|
|
crates = [
|
|
"deployer-server"
|
|
"deployer-ctl"
|
|
"node-agent"
|
|
"nix-agent"
|
|
"ultracloud-reconciler"
|
|
"fleet-scheduler"
|
|
];
|
|
description = "Combined deployer workspace build for cluster images and checks";
|
|
};
|
|
|
|
vmClusterDeployerState =
|
|
self.nixosConfigurations.node01.config.system.build.ultracloudDeployerClusterState;
|
|
|
|
vmClusterFlakeBundle = self.packages.${system}.ultracloudFlakeBundle;
|
|
vmSmokeBundledTargetToplevel = bundledVmSmokeTargetToplevel;
|
|
|
|
# --------------------------------------------------------------------
|
|
# Default package: Build all servers
|
|
# --------------------------------------------------------------------
|
|
default = pkgs.symlinkJoin {
|
|
name = "ultracloud-all";
|
|
paths = [
|
|
self.packages.${system}.chainfire-server
|
|
self.packages.${system}.flaredb-server
|
|
self.packages.${system}.iam-server
|
|
self.packages.${system}.plasmavmc-server
|
|
self.packages.${system}.prismnet-server
|
|
self.packages.${system}.flashdns-server
|
|
self.packages.${system}.fiberlb-server
|
|
self.packages.${system}.lightningstor-workspace
|
|
self.packages.${system}.nightlight-server
|
|
self.packages.${system}.creditservice-server
|
|
self.packages.${system}.apigateway-server
|
|
self.packages.${system}.k8shost-server
|
|
self.packages.${system}.deployer-workspace
|
|
self.packages.${system}.vmClusterDeployerState
|
|
];
|
|
};
|
|
|
|
single-node-quickstart-vm =
|
|
self.nixosConfigurations.single-node-quickstart.config.system.build.vm;
|
|
|
|
single-node-trial-vm = self.packages.${system}.single-node-quickstart-vm;
|
|
|
|
single-node-trial-manifest =
|
|
pkgs.writeText "single-node-trial-manifest.json"
|
|
(builtins.toJSON singleNodeSurface);
|
|
|
|
single-node-quickstart = pkgs.writeShellApplication {
|
|
name = "single-node-quickstart";
|
|
runtimeInputs = with pkgs; [
|
|
coreutils
|
|
findutils
|
|
netcat
|
|
nix
|
|
openssh
|
|
procps
|
|
sshpass
|
|
];
|
|
text = ''
|
|
set -euo pipefail
|
|
|
|
REPO_FLAKE="${self}"
|
|
WORK_ROOT="''${ULTRACLOUD_QUICKSTART_WORK_ROOT:-$PWD/work}"
|
|
STATE_DIR="''${ULTRACLOUD_QUICKSTART_STATE_DIR:-$WORK_ROOT/single-node-quickstart}"
|
|
RUN_DIR="$STATE_DIR/run"
|
|
DISK_IMAGE="$STATE_DIR/quickstart.qcow2"
|
|
PID_FILE="$STATE_DIR/qemu.pid"
|
|
SERIAL_LOG="$STATE_DIR/serial.log"
|
|
METADATA_FILE="$STATE_DIR/run.env"
|
|
BUILD_LOG="$STATE_DIR/build-vm.log"
|
|
BUILD_PATH_FILE="$STATE_DIR/vm-path.txt"
|
|
SSH_PORT="''${ULTRACLOUD_QUICKSTART_SSH_PORT:-22220}"
|
|
KEEP_VM="''${ULTRACLOUD_QUICKSTART_KEEP_VM:-0}"
|
|
REUSE_DISK="''${ULTRACLOUD_QUICKSTART_REUSE_DISK:-0}"
|
|
HOST_CPU_COUNT=""
|
|
LOCAL_NIX_MAX_JOBS=""
|
|
LOCAL_NIX_BUILD_CORES=""
|
|
VM_PATH=""
|
|
RUN_VM=""
|
|
|
|
log() {
|
|
printf '[single-node-quickstart] %s\n' "$*"
|
|
}
|
|
|
|
host_cpu_count() {
|
|
local count
|
|
count="$(getconf _NPROCESSORS_ONLN 2>/dev/null || nproc 2>/dev/null || echo 1)"
|
|
if [[ ! "$count" =~ ^[0-9]+$ ]] || (( count < 1 )); then
|
|
count=1
|
|
fi
|
|
printf '%s\n' "$count"
|
|
}
|
|
|
|
default_local_nix_max_jobs() {
|
|
local cpu_count="$1"
|
|
if (( cpu_count <= 2 )); then
|
|
printf '1\n'
|
|
return 0
|
|
fi
|
|
|
|
printf '%s\n' "$(( (cpu_count + 1) / 2 ))"
|
|
}
|
|
|
|
default_local_nix_build_cores() {
|
|
local cpu_count="$1"
|
|
local max_jobs="$2"
|
|
local build_cores=1
|
|
|
|
if (( max_jobs > 0 )); then
|
|
build_cores="$(( cpu_count / max_jobs ))"
|
|
fi
|
|
if (( build_cores < 1 )); then
|
|
build_cores=1
|
|
fi
|
|
|
|
printf '%s\n' "$build_cores"
|
|
}
|
|
|
|
append_nix_config_line() {
|
|
local line="$1"
|
|
if [[ -n "''${NIX_CONFIG:-}" ]]; then
|
|
NIX_CONFIG+=$'\n'
|
|
fi
|
|
NIX_CONFIG+="''${line}"
|
|
}
|
|
|
|
configure_local_nix_execution() {
|
|
append_nix_config_line "builders ="
|
|
append_nix_config_line "max-jobs = $LOCAL_NIX_MAX_JOBS"
|
|
append_nix_config_line "cores = $LOCAL_NIX_BUILD_CORES"
|
|
append_nix_config_line "experimental-features = nix-command flakes"
|
|
append_nix_config_line "warn-dirty = false"
|
|
export NIX_CONFIG
|
|
}
|
|
|
|
prepare_local_nix_execution() {
|
|
HOST_CPU_COUNT="$(host_cpu_count)"
|
|
LOCAL_NIX_MAX_JOBS="''${ULTRACLOUD_QUICKSTART_NIX_MAX_JOBS:-''${ULTRACLOUD_LOCAL_NIX_MAX_JOBS:-$(default_local_nix_max_jobs "$HOST_CPU_COUNT")}}"
|
|
LOCAL_NIX_BUILD_CORES="''${ULTRACLOUD_QUICKSTART_NIX_BUILD_CORES:-''${ULTRACLOUD_LOCAL_NIX_BUILD_CORES:-$(default_local_nix_build_cores "$HOST_CPU_COUNT" "$LOCAL_NIX_MAX_JOBS")}}"
|
|
export ULTRACLOUD_LOCAL_NIX_MAX_JOBS="''${ULTRACLOUD_LOCAL_NIX_MAX_JOBS:-$LOCAL_NIX_MAX_JOBS}"
|
|
export ULTRACLOUD_LOCAL_NIX_BUILD_CORES="''${ULTRACLOUD_LOCAL_NIX_BUILD_CORES:-$LOCAL_NIX_BUILD_CORES}"
|
|
configure_local_nix_execution
|
|
}
|
|
|
|
build_vm_locally() {
|
|
log "building single-node quickstart VM locally (max-jobs=$LOCAL_NIX_MAX_JOBS build-cores=$LOCAL_NIX_BUILD_CORES)"
|
|
if ! TMPDIR="$RUN_DIR" NIX_BUILD_CORES="$LOCAL_NIX_BUILD_CORES" nix \
|
|
--option builders "" \
|
|
--option warn-dirty false \
|
|
--max-jobs "$LOCAL_NIX_MAX_JOBS" \
|
|
build "$REPO_FLAKE#single-node-quickstart-vm" \
|
|
--no-link \
|
|
--print-out-paths \
|
|
>"$BUILD_PATH_FILE" \
|
|
2>"$BUILD_LOG"; then
|
|
log "local VM build failed; build log tail:"
|
|
tail -n 120 "$BUILD_LOG" >&2 || true
|
|
return 1
|
|
fi
|
|
|
|
VM_PATH="$(tail -n 1 "$BUILD_PATH_FILE")"
|
|
if [ -z "$VM_PATH" ]; then
|
|
log "failed to resolve single-node quickstart VM output path"
|
|
return 1
|
|
fi
|
|
|
|
RUN_VM="$(find "$VM_PATH/bin" -maxdepth 1 -name 'run-*-vm' | head -n1)"
|
|
if [ -z "$RUN_VM" ]; then
|
|
log "failed to locate run-*-vm under $VM_PATH/bin"
|
|
return 1
|
|
fi
|
|
|
|
{
|
|
printf 'vm_path=%s\n' "$VM_PATH"
|
|
printf 'build_log=%s\n' "$BUILD_LOG"
|
|
printf 'build_path_file=%s\n' "$BUILD_PATH_FILE"
|
|
printf 'nix_build_command=%s\n' "nix --option builders \"\" --max-jobs $LOCAL_NIX_MAX_JOBS build $REPO_FLAKE#single-node-quickstart-vm --no-link --print-out-paths"
|
|
} >>"$METADATA_FILE"
|
|
}
|
|
|
|
capture_environment() {
|
|
{
|
|
printf 'started_at=%s\n' "$(date -Is)"
|
|
printf 'repo_flake=%s\n' "$REPO_FLAKE"
|
|
printf 'pwd=%s\n' "$PWD"
|
|
printf 'user=%s\n' "$(id -un)"
|
|
printf 'uid=%s\n' "$(id -u)"
|
|
printf 'gid=%s\n' "$(id -g)"
|
|
printf 'work_root=%s\n' "$WORK_ROOT"
|
|
printf 'state_dir=%s\n' "$STATE_DIR"
|
|
printf 'run_dir=%s\n' "$RUN_DIR"
|
|
printf 'disk_image=%s\n' "$DISK_IMAGE"
|
|
printf 'serial_log=%s\n' "$SERIAL_LOG"
|
|
printf 'ssh_port=%s\n' "$SSH_PORT"
|
|
printf 'reuse_disk=%s\n' "$REUSE_DISK"
|
|
printf 'keep_vm=%s\n' "$KEEP_VM"
|
|
printf 'host_cpu_count=%s\n' "$HOST_CPU_COUNT"
|
|
printf 'local_nix_max_jobs=%s\n' "$LOCAL_NIX_MAX_JOBS"
|
|
printf 'local_nix_build_cores=%s\n' "$LOCAL_NIX_BUILD_CORES"
|
|
printf 'nix_builders=%s\n' "$(nix config show builders 2>/dev/null | awk -F' = ' 'NR==1 { print $2 }')"
|
|
printf 'kvm_present=%s\n' "$([[ -e /dev/kvm ]] && echo yes || echo no)"
|
|
printf 'kvm_access=%s\n' "$([[ -r /dev/kvm && -w /dev/kvm ]] && echo rw || echo no)"
|
|
} >"$METADATA_FILE"
|
|
}
|
|
|
|
dump_serial() {
|
|
if [ -f "$SERIAL_LOG" ]; then
|
|
log "serial log tail:"
|
|
tail -n 120 "$SERIAL_LOG" >&2 || true
|
|
fi
|
|
}
|
|
|
|
cleanup() {
|
|
if [ -f "$PID_FILE" ]; then
|
|
pid="$(cat "$PID_FILE")"
|
|
if kill -0 "$pid" >/dev/null 2>&1; then
|
|
kill "$pid" >/dev/null 2>&1 || true
|
|
for _ in $(seq 1 30); do
|
|
if ! kill -0 "$pid" >/dev/null 2>&1; then
|
|
break
|
|
fi
|
|
sleep 1
|
|
done
|
|
fi
|
|
rm -f "$PID_FILE"
|
|
fi
|
|
}
|
|
|
|
on_exit() {
|
|
status="$?"
|
|
{
|
|
printf 'finished_at=%s\n' "$(date -Is)"
|
|
printf 'exit_status=%s\n' "$status"
|
|
} >>"$METADATA_FILE"
|
|
if [ "$status" -ne 0 ]; then
|
|
dump_serial
|
|
fi
|
|
if [ "$KEEP_VM" != "1" ]; then
|
|
cleanup
|
|
fi
|
|
exit "$status"
|
|
}
|
|
|
|
wait_for_ssh() {
|
|
local deadline=$((SECONDS + 240))
|
|
while true; do
|
|
if sshpass -p ultracloud ssh \
|
|
-F /dev/null \
|
|
-o StrictHostKeyChecking=no \
|
|
-o UserKnownHostsFile=/dev/null \
|
|
-o LogLevel=ERROR \
|
|
-o ConnectTimeout=5 \
|
|
-o ConnectionAttempts=1 \
|
|
-p "$SSH_PORT" \
|
|
root@127.0.0.1 true >/dev/null 2>&1; then
|
|
return 0
|
|
fi
|
|
if [ "$SECONDS" -ge "$deadline" ]; then
|
|
log "timed out waiting for SSH on port $SSH_PORT"
|
|
return 1
|
|
fi
|
|
sleep 1
|
|
done
|
|
}
|
|
|
|
wait_for_unit_active() {
|
|
local unit="$1"
|
|
local deadline=$((SECONDS + 240))
|
|
while true; do
|
|
if ssh_cmd systemctl is-active "$unit" >/dev/null 2>&1; then
|
|
return 0
|
|
fi
|
|
if [ "$SECONDS" -ge "$deadline" ]; then
|
|
log "timed out waiting for $unit"
|
|
ssh_cmd systemctl status "$unit" --no-pager || true
|
|
return 1
|
|
fi
|
|
sleep 1
|
|
done
|
|
}
|
|
|
|
ssh_cmd() {
|
|
sshpass -p ultracloud ssh \
|
|
-F /dev/null \
|
|
-o StrictHostKeyChecking=no \
|
|
-o UserKnownHostsFile=/dev/null \
|
|
-o LogLevel=ERROR \
|
|
-o ConnectTimeout=5 \
|
|
-o ConnectionAttempts=1 \
|
|
-p "$SSH_PORT" \
|
|
root@127.0.0.1 -- "$@"
|
|
}
|
|
|
|
ssh_shell() {
|
|
local script="$1"
|
|
local quoted
|
|
printf -v quoted '%q' "$script"
|
|
sshpass -p ultracloud ssh \
|
|
-F /dev/null \
|
|
-o StrictHostKeyChecking=no \
|
|
-o UserKnownHostsFile=/dev/null \
|
|
-o LogLevel=ERROR \
|
|
-o ConnectTimeout=5 \
|
|
-o ConnectionAttempts=1 \
|
|
-p "$SSH_PORT" \
|
|
root@127.0.0.1 "bash -lc $quoted"
|
|
}
|
|
|
|
trap on_exit EXIT
|
|
|
|
mkdir -p "$STATE_DIR"
|
|
rm -rf "$RUN_DIR"
|
|
mkdir -p "$RUN_DIR"
|
|
rm -f "$SERIAL_LOG"
|
|
rm -f "$BUILD_LOG" "$BUILD_PATH_FILE"
|
|
if [ "$REUSE_DISK" != "1" ]; then
|
|
rm -f "$DISK_IMAGE"
|
|
fi
|
|
|
|
prepare_local_nix_execution
|
|
capture_environment
|
|
cleanup
|
|
build_vm_locally
|
|
|
|
log "launching single-node quickstart VM"
|
|
nohup env \
|
|
USE_TMPDIR=1 \
|
|
TMPDIR="$RUN_DIR" \
|
|
NIX_DISK_IMAGE="$DISK_IMAGE" \
|
|
QEMU_NET_OPTS="hostfwd=tcp:127.0.0.1:$SSH_PORT-:22" \
|
|
"$RUN_VM" >"$SERIAL_LOG" 2>&1 &
|
|
echo "$!" > "$PID_FILE"
|
|
|
|
log "waiting for guest SSH"
|
|
wait_for_ssh
|
|
|
|
log "waiting for in-guest readiness gate"
|
|
wait_for_unit_active ultracloud-single-node-quickstart-ready.service
|
|
|
|
log "verifying required services"
|
|
ssh_cmd systemctl is-active chainfire flaredb iam prismnet plasmavmc >/dev/null
|
|
|
|
log "verifying service health endpoints and VM runtime prerequisites"
|
|
ssh_shell 'curl -fsS http://127.0.0.1:8081/health >/dev/null && curl -fsS http://127.0.0.1:8082/health >/dev/null && curl -fsS http://127.0.0.1:8083/health >/dev/null && curl -fsS http://127.0.0.1:8087/health >/dev/null && curl -fsS http://127.0.0.1:8084/health >/dev/null && test -x /run/current-system/sw/bin/qemu-system-x86_64 && test -x /run/current-system/sw/bin/qemu-img && test -c /dev/net/tun'
|
|
|
|
log "single-node quickstart smoke passed"
|
|
printf 'result=passed\n' >>"$METADATA_FILE"
|
|
|
|
if [ "$KEEP_VM" = "1" ]; then
|
|
trap - EXIT
|
|
log "VM left running"
|
|
log "ssh: sshpass -p ultracloud ssh -p $SSH_PORT root@127.0.0.1"
|
|
exit 0
|
|
fi
|
|
'';
|
|
};
|
|
|
|
baremetal-iso-e2e-runner = pkgs.writeShellApplication {
|
|
name = "baremetal-iso-e2e";
|
|
runtimeInputs = with pkgs; [
|
|
bash
|
|
coreutils
|
|
curl
|
|
findutils
|
|
gawk
|
|
gnugrep
|
|
gnused
|
|
iproute2
|
|
jq
|
|
nix
|
|
openssh
|
|
procps
|
|
python3
|
|
qemu
|
|
];
|
|
text = ''
|
|
set -euo pipefail
|
|
|
|
export ULTRACLOUD_BAREMETAL_E2E_SOURCE_FLAKE_ROOT="${self}"
|
|
export ULTRACLOUD_BAREMETAL_PROOF_MODEL="materialized-check-runner"
|
|
exec ${pkgs.bash}/bin/bash ${./nix/test-cluster/run-baremetal-iso-e2e.sh} "$@"
|
|
'';
|
|
};
|
|
};
|
|
|
|
# ======================================================================
|
|
# APPS: Runnable applications from packages
|
|
# ======================================================================
|
|
apps = {
|
|
chainfire-server = flake-utils.lib.mkApp {
|
|
drv = self.packages.${system}.chainfire-server;
|
|
};
|
|
|
|
flaredb-server = flake-utils.lib.mkApp {
|
|
drv = self.packages.${system}.flaredb-server;
|
|
};
|
|
|
|
iam-server = flake-utils.lib.mkApp {
|
|
drv = self.packages.${system}.iam-server;
|
|
};
|
|
|
|
plasmavmc-server = flake-utils.lib.mkApp {
|
|
drv = self.packages.${system}.plasmavmc-server;
|
|
};
|
|
|
|
prismnet-server = flake-utils.lib.mkApp {
|
|
drv = self.packages.${system}.prismnet-server;
|
|
};
|
|
|
|
flashdns-server = flake-utils.lib.mkApp {
|
|
drv = self.packages.${system}.flashdns-server;
|
|
};
|
|
|
|
fiberlb-server = flake-utils.lib.mkApp {
|
|
drv = self.packages.${system}.fiberlb-server;
|
|
};
|
|
|
|
lightningstor-server = flake-utils.lib.mkApp {
|
|
drv = self.packages.${system}.lightningstor-server;
|
|
};
|
|
|
|
lightningstor-node = flake-utils.lib.mkApp {
|
|
drv = self.packages.${system}.lightningstor-node;
|
|
};
|
|
|
|
nightlight-server = flake-utils.lib.mkApp {
|
|
drv = self.packages.${system}.nightlight-server;
|
|
};
|
|
|
|
creditservice-server = flake-utils.lib.mkApp {
|
|
drv = self.packages.${system}.creditservice-server;
|
|
};
|
|
|
|
apigateway-server = flake-utils.lib.mkApp {
|
|
drv = self.packages.${system}.apigateway-server;
|
|
};
|
|
|
|
k8shost-server = flake-utils.lib.mkApp {
|
|
drv = self.packages.${system}.k8shost-server;
|
|
};
|
|
|
|
deployer-server = flake-utils.lib.mkApp {
|
|
drv = self.packages.${system}.deployer-server;
|
|
};
|
|
|
|
deployer-ctl = flake-utils.lib.mkApp {
|
|
drv = self.packages.${system}.deployer-ctl;
|
|
};
|
|
|
|
ultracloud-reconciler = flake-utils.lib.mkApp {
|
|
drv = self.packages.${system}.ultracloud-reconciler;
|
|
};
|
|
|
|
nix-agent = flake-utils.lib.mkApp {
|
|
drv = self.packages.${system}.nix-agent;
|
|
};
|
|
|
|
node-agent = flake-utils.lib.mkApp {
|
|
drv = self.packages.${system}.node-agent;
|
|
};
|
|
|
|
fleet-scheduler = flake-utils.lib.mkApp {
|
|
drv = self.packages.${system}.fleet-scheduler;
|
|
};
|
|
|
|
single-node-quickstart = flake-utils.lib.mkApp {
|
|
drv = self.packages.${system}.single-node-quickstart;
|
|
};
|
|
|
|
single-node-trial = flake-utils.lib.mkApp {
|
|
drv = self.packages.${system}.single-node-quickstart;
|
|
};
|
|
|
|
baremetal-iso-e2e = flake-utils.lib.mkApp {
|
|
drv = self.packages.${system}.baremetal-iso-e2e-runner;
|
|
};
|
|
|
|
all-in-one-quickstart = flake-utils.lib.mkApp {
|
|
drv = self.packages.${system}.single-node-quickstart;
|
|
};
|
|
};
|
|
|
|
checks =
|
|
let
|
|
stripKvmRequiredSystemFeature = drv:
|
|
drv.overrideTestDerivation (old: {
|
|
requiredSystemFeatures =
|
|
builtins.filter (feature: feature != "kvm") (old.requiredSystemFeatures or [ ]);
|
|
});
|
|
singleNodeQuickstartConfig = self.nixosConfigurations.single-node-quickstart;
|
|
node01Config = self.nixosConfigurations.node01;
|
|
node02Config = self.nixosConfigurations.node02;
|
|
node03Config = self.nixosConfigurations.node03;
|
|
netbootControlPlaneConfig = self.nixosConfigurations.netboot-control-plane;
|
|
netbootAllInOneConfig = self.nixosConfigurations.netboot-all-in-one;
|
|
ultracloudIsoConfig = self.nixosConfigurations.ultracloud-iso;
|
|
baremetalQemuControlPlaneConfig = self.nixosConfigurations.baremetal-qemu-control-plane;
|
|
baremetalQemuWorkerConfig = self.nixosConfigurations.baremetal-qemu-worker;
|
|
|
|
mkNixosOutput =
|
|
attr: configuration: extra:
|
|
{
|
|
kind = "nixosConfiguration";
|
|
inherit attr;
|
|
hostName = configuration.config.networking.hostName;
|
|
}
|
|
// extra;
|
|
|
|
canonicalProfileManifest = {
|
|
profiles = [
|
|
{
|
|
id = "single-node-dev";
|
|
label = "single-node dev";
|
|
entrypoints = [
|
|
{
|
|
kind = "app";
|
|
attr = "apps.${system}.single-node-trial";
|
|
command = "nix run .#single-node-trial";
|
|
mapsTo = "apps.${system}.single-node-quickstart";
|
|
flakeOutputType = self.apps.${system}.single-node-trial.type;
|
|
}
|
|
{
|
|
kind = "app";
|
|
attr = "apps.${system}.single-node-quickstart";
|
|
command = "nix run .#single-node-quickstart";
|
|
flakeOutputType = self.apps.${system}.single-node-quickstart.type;
|
|
}
|
|
(
|
|
mkNixosOutput "nixosConfigurations.single-node-quickstart" singleNodeQuickstartConfig {
|
|
stateVersion = singleNodeQuickstartConfig.config.system.stateVersion;
|
|
}
|
|
)
|
|
];
|
|
companionOutputs = [
|
|
{
|
|
kind = "package";
|
|
attr = "packages.${system}.single-node-trial-vm";
|
|
command = "nix build .#single-node-trial-vm";
|
|
mapsTo = "packages.${system}.single-node-quickstart-vm";
|
|
}
|
|
(
|
|
mkNixosOutput "nixosConfigurations.netboot-all-in-one" netbootAllInOneConfig {
|
|
role = "canonical single-node companion install image";
|
|
stateVersion = netbootAllInOneConfig.config.system.stateVersion;
|
|
}
|
|
)
|
|
];
|
|
}
|
|
{
|
|
id = "three-node-ha-control-plane";
|
|
label = "3-node HA control plane";
|
|
entrypoints = [
|
|
(
|
|
mkNixosOutput "nixosConfigurations.node01" node01Config {
|
|
stateVersion = node01Config.config.system.stateVersion;
|
|
}
|
|
)
|
|
(
|
|
mkNixosOutput "nixosConfigurations.node02" node02Config {
|
|
stateVersion = node02Config.config.system.stateVersion;
|
|
}
|
|
)
|
|
(
|
|
mkNixosOutput "nixosConfigurations.node03" node03Config {
|
|
stateVersion = node03Config.config.system.stateVersion;
|
|
}
|
|
)
|
|
];
|
|
companionOutputs = [
|
|
(
|
|
mkNixosOutput "nixosConfigurations.netboot-control-plane" netbootControlPlaneConfig {
|
|
role = "canonical HA control-plane install image";
|
|
stateVersion = netbootControlPlaneConfig.config.system.stateVersion;
|
|
}
|
|
)
|
|
];
|
|
}
|
|
{
|
|
id = "bare-metal-bootstrap";
|
|
label = "bare-metal bootstrap";
|
|
entrypoints = [
|
|
{
|
|
kind = "command";
|
|
command = "nix run ./nix/test-cluster#cluster -- baremetal-iso";
|
|
}
|
|
(
|
|
mkNixosOutput "nixosConfigurations.ultracloud-iso" ultracloudIsoConfig {
|
|
imageFileName = ultracloudIsoConfig.config.image.fileName;
|
|
}
|
|
)
|
|
(
|
|
mkNixosOutput
|
|
"nixosConfigurations.baremetal-qemu-control-plane"
|
|
baremetalQemuControlPlaneConfig
|
|
{
|
|
stateVersion =
|
|
baremetalQemuControlPlaneConfig.config.system.stateVersion;
|
|
}
|
|
)
|
|
(
|
|
mkNixosOutput
|
|
"nixosConfigurations.baremetal-qemu-worker"
|
|
baremetalQemuWorkerConfig
|
|
{
|
|
stateVersion = baremetalQemuWorkerConfig.config.system.stateVersion;
|
|
}
|
|
)
|
|
{
|
|
kind = "check";
|
|
attr = "checks.${system}.baremetal-iso-e2e";
|
|
command =
|
|
"nix build .#checks.${system}.baremetal-iso-e2e && ./result/bin/baremetal-iso-e2e";
|
|
flakeOutputType = self.checks.${system}.baremetal-iso-e2e.type;
|
|
}
|
|
];
|
|
companionOutputs = [ ];
|
|
}
|
|
];
|
|
clusterAuthoring = {
|
|
supportedSource = "ultracloud.cluster";
|
|
schemaPath = "nix/lib/cluster-schema.nix";
|
|
legacyCompatibility = [
|
|
"nix-nos: legacy compatibility and low-level network primitives only"
|
|
];
|
|
};
|
|
standaloneStories = [
|
|
{
|
|
id = "vm-platform";
|
|
entrypoints = [
|
|
"nix build .#single-node-trial-vm"
|
|
"nix run .#single-node-trial"
|
|
"nix run .#single-node-quickstart"
|
|
];
|
|
excludes = [ "deployer" "nix-agent" "fleet-scheduler" "node-agent" ];
|
|
}
|
|
{
|
|
id = "rollout-stack";
|
|
entrypoints = [
|
|
"nix build .#checks.${system}.deployer-vm-smoke"
|
|
"nix build .#checks.${system}.portable-control-plane-regressions"
|
|
"nix run ./nix/test-cluster#cluster -- baremetal-iso"
|
|
];
|
|
}
|
|
];
|
|
helperOutputs = [ ];
|
|
legacyAliases = [
|
|
{
|
|
attr = "apps.${system}.all-in-one-quickstart";
|
|
command = "nix run .#all-in-one-quickstart";
|
|
mapsTo = "apps.${system}.single-node-quickstart";
|
|
flakeOutputType = self.apps.${system}.all-in-one-quickstart.type;
|
|
}
|
|
];
|
|
internalOnlyOutputs = [
|
|
{
|
|
attr = "nixosConfigurations.netboot-base";
|
|
role = "internal helper image";
|
|
}
|
|
{
|
|
attr = "nixosConfigurations.netboot-worker";
|
|
role = "archived/non-product worker netboot helper";
|
|
}
|
|
{
|
|
attr = "nixosConfigurations.pxe-server";
|
|
role = "legacy/manual PXE helper";
|
|
}
|
|
{
|
|
attr = "nixosConfigurations.vm-smoke-target";
|
|
role = "offline deployer smoke-test target";
|
|
}
|
|
];
|
|
};
|
|
|
|
canonicalProfileBuildTargets = [
|
|
{
|
|
name = "single-node-quickstart-vm";
|
|
path = self.packages.${system}.single-node-quickstart-vm;
|
|
}
|
|
{
|
|
name = "single-node-trial-vm";
|
|
path = self.packages.${system}.single-node-trial-vm;
|
|
}
|
|
{
|
|
name = "netboot-all-in-one-toplevel";
|
|
path = netbootAllInOneConfig.config.system.build.toplevel;
|
|
}
|
|
{
|
|
name = "node01-toplevel";
|
|
path = node01Config.config.system.build.toplevel;
|
|
}
|
|
{
|
|
name = "node02-toplevel";
|
|
path = node02Config.config.system.build.toplevel;
|
|
}
|
|
{
|
|
name = "node03-toplevel";
|
|
path = node03Config.config.system.build.toplevel;
|
|
}
|
|
{
|
|
name = "netboot-control-plane-toplevel";
|
|
path = netbootControlPlaneConfig.config.system.build.toplevel;
|
|
}
|
|
{
|
|
name = "ultracloud-iso-image";
|
|
path = ultracloudIsoConfig.config.system.build.isoImage;
|
|
}
|
|
{
|
|
name = "baremetal-qemu-control-plane-toplevel";
|
|
path = baremetalQemuControlPlaneConfig.config.system.build.toplevel;
|
|
}
|
|
{
|
|
name = "baremetal-qemu-worker-toplevel";
|
|
path = baremetalQemuWorkerConfig.config.system.build.toplevel;
|
|
}
|
|
];
|
|
in
|
|
{
|
|
workspace-source-roots-audit = pkgs.runCommand "workspace-source-roots-audit"
|
|
{
|
|
nativeBuildInputs = [ pkgs.python3 ];
|
|
} ''
|
|
${pkgs.python3}/bin/python - <<'PY' ${./.}
|
|
from __future__ import annotations
|
|
|
|
import re
|
|
import sys
|
|
import tomllib
|
|
from pathlib import Path
|
|
from typing import Any
|
|
|
|
|
|
def extract_workspace_source_roots(flake_path: Path) -> dict[str, list[str]]:
|
|
source = flake_path.read_text()
|
|
match = re.search(r"workspaceSourceRoots\s*=\s*\{(.*?)\n\s*\};", source, re.S)
|
|
if match is None:
|
|
raise ValueError(f"Could not find workspaceSourceRoots in {flake_path}")
|
|
|
|
roots: dict[str, list[str]] = {}
|
|
for name, body in re.findall(r"\n\s*(\w+)\s*=\s*\[(.*?)\];", match.group(1), re.S):
|
|
roots[name] = re.findall(r'"([^"]+)"', body)
|
|
return roots
|
|
|
|
|
|
def collect_path_dependencies(value: Any) -> list[str]:
|
|
found: list[str] = []
|
|
|
|
if isinstance(value, dict):
|
|
path = value.get("path")
|
|
if isinstance(path, str):
|
|
found.append(path)
|
|
for nested in value.values():
|
|
found.extend(collect_path_dependencies(nested))
|
|
elif isinstance(value, list):
|
|
for nested in value:
|
|
found.extend(collect_path_dependencies(nested))
|
|
|
|
return found
|
|
|
|
|
|
def workspace_manifests(repo_root: Path, workspace_name: str) -> list[Path]:
|
|
workspace_manifest = repo_root / workspace_name / "Cargo.toml"
|
|
manifests = [workspace_manifest]
|
|
workspace_data = tomllib.loads(workspace_manifest.read_text())
|
|
members = workspace_data.get("workspace", {}).get("members", [])
|
|
|
|
for member in members:
|
|
for candidate in workspace_manifest.parent.glob(member):
|
|
manifest = candidate if candidate.name == "Cargo.toml" else candidate / "Cargo.toml"
|
|
if manifest.is_file():
|
|
manifests.append(manifest)
|
|
|
|
unique_manifests: list[Path] = []
|
|
seen: set[Path] = set()
|
|
for manifest in manifests:
|
|
resolved = manifest.resolve()
|
|
if resolved in seen:
|
|
continue
|
|
seen.add(resolved)
|
|
unique_manifests.append(manifest)
|
|
return unique_manifests
|
|
|
|
|
|
def required_root(dep_rel: Path) -> str:
|
|
parts = dep_rel.parts
|
|
if not parts:
|
|
return ""
|
|
if parts[0] == "crates" and len(parts) >= 2:
|
|
return "/".join(parts[:2])
|
|
return parts[0]
|
|
|
|
|
|
def is_covered(dep_rel: str, configured_roots: list[str]) -> bool:
|
|
return any(dep_rel == root or dep_rel.startswith(f"{root}/") for root in configured_roots)
|
|
|
|
|
|
def main() -> int:
|
|
repo_root = Path(sys.argv[1]).resolve()
|
|
workspace_roots = extract_workspace_source_roots(repo_root / "flake.nix")
|
|
failures: list[str] = []
|
|
|
|
for workspace_name, configured_roots in sorted(workspace_roots.items()):
|
|
workspace_manifest = repo_root / workspace_name / "Cargo.toml"
|
|
if not workspace_manifest.is_file():
|
|
continue
|
|
|
|
for manifest in workspace_manifests(repo_root, workspace_name):
|
|
manifest_data = tomllib.loads(manifest.read_text())
|
|
for dep_path in collect_path_dependencies(manifest_data):
|
|
dependency_dir = (manifest.parent / dep_path).resolve()
|
|
try:
|
|
dep_rel = dependency_dir.relative_to(repo_root)
|
|
except ValueError:
|
|
continue
|
|
|
|
dep_rel_str = dep_rel.as_posix()
|
|
if is_covered(dep_rel_str, configured_roots):
|
|
continue
|
|
|
|
failures.append(
|
|
f"{workspace_name}: missing source root '{required_root(dep_rel)}' "
|
|
f"for dependency '{dep_rel_str}' referenced by "
|
|
f"{manifest.relative_to(repo_root).as_posix()}"
|
|
)
|
|
|
|
if failures:
|
|
print("workspaceSourceRoots is missing path dependencies:", file=sys.stderr)
|
|
for failure in failures:
|
|
print(f" - {failure}", file=sys.stderr)
|
|
return 1
|
|
|
|
print("workspaceSourceRoots covers all workspace path dependencies.")
|
|
return 0
|
|
|
|
|
|
raise SystemExit(main())
|
|
PY
|
|
touch "$out"
|
|
'';
|
|
|
|
supported-surface-guard = pkgs.runCommand "supported-surface-guard"
|
|
{
|
|
nativeBuildInputs = with pkgs; [
|
|
bash
|
|
gawk
|
|
gnugrep
|
|
ripgrep
|
|
];
|
|
} ''
|
|
repo_root=${./.}
|
|
cd "$repo_root"
|
|
|
|
wording_targets=(
|
|
README.md
|
|
docs
|
|
apigateway
|
|
chainfire
|
|
k8shost
|
|
plasmavmc
|
|
creditservice
|
|
fiberlb
|
|
nightlight
|
|
nix/test-cluster
|
|
nix/modules/creditservice.nix
|
|
nix/modules/k8shost.nix
|
|
nix/modules/plasmavmc.nix
|
|
nix/modules/ultracloud-cluster.nix
|
|
nix-nos
|
|
)
|
|
|
|
wording_patterns=(
|
|
'minimal reference'
|
|
'not yet implemented'
|
|
'placeholder'
|
|
'TODO\('
|
|
)
|
|
|
|
public_api_code_targets=(
|
|
chainfire/crates/chainfire-api/src
|
|
chainfire/crates/chainfire-server/src
|
|
flaredb/crates/flaredb-server/src
|
|
lightningstor/crates/lightningstor-server/src
|
|
k8shost/crates/k8shost-server/src
|
|
plasmavmc/crates/plasmavmc-server/src
|
|
fiberlb/crates/fiberlb-server/src
|
|
prismnet/crates/prismnet-server/src
|
|
)
|
|
|
|
public_api_code_patterns=(
|
|
'Status::unimplemented'
|
|
'unimplemented!\('
|
|
'todo!\('
|
|
'not yet implemented'
|
|
'placeholder'
|
|
)
|
|
|
|
product_completeness_code_targets=(
|
|
k8shost/crates/k8shost-server/src
|
|
plasmavmc/crates/plasmavmc-server/src
|
|
fiberlb/crates/fiberlb-server/src
|
|
prismnet/crates/prismnet-server/src
|
|
)
|
|
|
|
product_completeness_code_patterns=(
|
|
'TODO:'
|
|
'FIXME'
|
|
'best-effort'
|
|
)
|
|
|
|
contract_targets=(
|
|
README.md
|
|
docs
|
|
apigateway
|
|
nightlight
|
|
creditservice
|
|
nix/test-cluster/README.md
|
|
nix/modules/ultracloud-cluster.nix
|
|
nix/modules/deployer.nix
|
|
nix/modules/fleet-scheduler.nix
|
|
nix/modules/nix-agent.nix
|
|
nix/modules/node-agent.nix
|
|
nix/modules/plasmavmc.nix
|
|
nix/modules/k8shost.nix
|
|
nix-nos
|
|
)
|
|
|
|
required_contract_patterns=(
|
|
'ultracloud\.cluster.*cluster-schema\.nix.*only supported cluster authoring source'
|
|
'nix-nos.*legacy compatibility.*low-level network primitives'
|
|
'single-node-trial-vm.*single-node-quickstart.*standalone VM-platform story'
|
|
'durability-proof.*chainfire.*flaredb.*deployer.*backup/restore'
|
|
'ChainFire dynamic membership, replace-node, and scale-out are unsupported on the supported surface'
|
|
'FlareDB online migration and schema evolution must start from the durability-proof backup/restore baseline'
|
|
'IAM bootstrap hardening requires an explicit admin token, an explicit signing key, and a 32-byte IAM_CRED_MASTER_KEY'
|
|
'FlareDB destructive DDL and fully automated online migration remain outside the supported product contract'
|
|
'credential overlap-and-revoke rotation, and mTLS overlap-and-cutover rotation are part of the supported operator contract; multi-node IAM failover remains outside the supported product contract'
|
|
'APIGateway is supported as stateless replicated instances behind an external L4 or VIP layer; live in-process reload is not part of the product contract'
|
|
'NightLight is supported as a single-node WAL/snapshot service; replicated HA metrics storage is not part of the product contract'
|
|
'CreditService export and backend migration are supported as offline export/import or backend-native snapshot workflows, not live mixed-writer migration'
|
|
'provider-vm-reality-proof.*authoritative DNS answers.*backend drain.*re-convergence'
|
|
'PrismNet real OVS/OVN dataplane validation remains outside the supported local KVM surface'
|
|
'FiberLB native BGP.*BFD peer interop.*outside the supported local KVM surface'
|
|
'OCI/Docker artifact is intentionally not the public trial surface'
|
|
'work-root-budget\.sh.*disk budget, GC, and cleanup guidance'
|
|
'work-root-budget\.sh status.*enforce.*prune-proof-logs'
|
|
'FiberLB HTTPS health checks currently do not verify backend TLS certificates'
|
|
'k8shost.*API/control-plane product surface.*archived non-product'
|
|
'deployer.*scope-fixed to one active writer plus optional cold-standby restore.*automatic ChainFire-backed multi-instance failover is outside the supported product contract'
|
|
'fleet-scheduler.*scope-fixed to the two native-runtime worker lab with one planned drain cycle, one fail-stop worker-loss cycle, and 30-second held degraded states in rollout-soak'
|
|
)
|
|
|
|
chainfire_core_surface_targets=(
|
|
chainfire/crates/chainfire-core/Cargo.toml
|
|
chainfire/crates/chainfire-core/src/lib.rs
|
|
)
|
|
|
|
chainfire_core_surface_patterns=(
|
|
'Embeddable distributed cluster library'
|
|
'pub mod builder;'
|
|
'pub mod cluster;'
|
|
'pub mod kvs;'
|
|
'pub use builder::ClusterBuilder;'
|
|
'pub use cluster::\{Cluster, ClusterHandle, ClusterState\};'
|
|
'pub use kvs::\{CasResult, Kv, KvEntry, KvHandle, KvNamespace, KvOptions, ReadConsistency\};'
|
|
)
|
|
|
|
extract_toml_array_block() {
|
|
local file=$1
|
|
local key=$2
|
|
${pkgs.gawk}/bin/awk -v key="$key" '
|
|
$0 ~ "^" key "[[:space:]]*=" {
|
|
in_array = 1
|
|
}
|
|
in_array {
|
|
print
|
|
}
|
|
in_array && /\]/ {
|
|
exit
|
|
}
|
|
' "$file"
|
|
}
|
|
|
|
extract_nix_array_block() {
|
|
local file=$1
|
|
local key=$2
|
|
${pkgs.gawk}/bin/awk -v key="$key" '
|
|
$0 ~ key "[[:space:]]*=[[:space:]]*\\[" {
|
|
in_array = 1
|
|
}
|
|
in_array {
|
|
print
|
|
}
|
|
in_array && /\];/ {
|
|
exit
|
|
}
|
|
' "$file"
|
|
}
|
|
|
|
status=0
|
|
|
|
for pattern in "''${wording_patterns[@]}"; do
|
|
if hits="$(${pkgs.ripgrep}/bin/rg -n "$pattern" "''${wording_targets[@]}" || true)" && [ -n "$hits" ]; then
|
|
printf 'supported-surface-guard: found unfinished public marker %q\n' "$pattern" >&2
|
|
printf '%s\n' "$hits" >&2
|
|
status=1
|
|
fi
|
|
done
|
|
|
|
for pattern in "''${public_api_code_patterns[@]}"; do
|
|
if hits="$(${pkgs.ripgrep}/bin/rg -n "$pattern" "''${public_api_code_targets[@]}" || true)" && [ -n "$hits" ]; then
|
|
printf 'supported-surface-guard: found unfinished public API stub %q\n' "$pattern" >&2
|
|
printf '%s\n' "$hits" >&2
|
|
status=1
|
|
fi
|
|
done
|
|
|
|
for pattern in "''${product_completeness_code_patterns[@]}"; do
|
|
if hits="$(${pkgs.ripgrep}/bin/rg -n "$pattern" "''${product_completeness_code_targets[@]}" || true)" && [ -n "$hits" ]; then
|
|
printf 'supported-surface-guard: found supported component completeness marker %q\n' "$pattern" >&2
|
|
printf '%s\n' "$hits" >&2
|
|
status=1
|
|
fi
|
|
done
|
|
|
|
for pattern in "''${required_contract_patterns[@]}"; do
|
|
if ! hits="$(${pkgs.ripgrep}/bin/rg -n "$pattern" "''${contract_targets[@]}" || true)" || [ -z "$hits" ]; then
|
|
printf 'supported-surface-guard: missing supported-surface contract marker %q\n' "$pattern" >&2
|
|
status=1
|
|
fi
|
|
done
|
|
|
|
for pattern in "''${chainfire_core_surface_patterns[@]}"; do
|
|
if hits="$(${pkgs.ripgrep}/bin/rg -n "$pattern" "''${chainfire_core_surface_targets[@]}" || true)" && [ -n "$hits" ]; then
|
|
printf 'supported-surface-guard: found desurfaced chainfire-core API marker %q\n' "$pattern" >&2
|
|
printf '%s\n' "$hits" >&2
|
|
status=1
|
|
fi
|
|
done
|
|
|
|
if default_members="$(extract_toml_array_block plasmavmc/Cargo.toml default-members)"; then
|
|
if hits="$(printf '%s\n' "$default_members" | ${pkgs.ripgrep}/bin/rg -n 'plasmavmc-firecracker' || true)" && [ -n "$hits" ]; then
|
|
printf 'supported-surface-guard: archived PlasmaVMC backend scaffold re-entered the default workspace members\n' >&2
|
|
printf '%s\n' "$hits" >&2
|
|
status=1
|
|
fi
|
|
fi
|
|
|
|
if default_members="$(extract_toml_array_block k8shost/Cargo.toml default-members)"; then
|
|
for pattern in 'k8shost-cni' 'k8shost-csi' 'k8shost-controllers'; do
|
|
if hits="$(printf '%s\n' "$default_members" | ${pkgs.ripgrep}/bin/rg -n "$pattern" || true)" && [ -n "$hits" ]; then
|
|
printf 'supported-surface-guard: archived K8sHost helper scaffold re-entered the default workspace members: %s\n' "$pattern" >&2
|
|
printf '%s\n' "$hits" >&2
|
|
status=1
|
|
fi
|
|
done
|
|
fi
|
|
|
|
if helper_outputs="$(extract_nix_array_block flake.nix 'helperOutputs')" \
|
|
&& hits="$(printf '%s\n' "$helper_outputs" | ${pkgs.ripgrep}/bin/rg -n 'netboot-worker' || true)" \
|
|
&& [ -n "$hits" ]; then
|
|
printf 'supported-surface-guard: archived netboot-worker helper re-entered canonical helper outputs\n' >&2
|
|
printf '%s\n' "$hits" >&2
|
|
status=1
|
|
fi
|
|
|
|
if canonical_build_targets="$(extract_nix_array_block flake.nix 'canonicalProfileBuildTargets')" \
|
|
&& hits="$(printf '%s\n' "$canonical_build_targets" | ${pkgs.ripgrep}/bin/rg -n 'netboot-worker' || true)" \
|
|
&& [ -n "$hits" ]; then
|
|
printf 'supported-surface-guard: archived netboot-worker helper re-entered canonical profile build targets\n' >&2
|
|
printf '%s\n' "$hits" >&2
|
|
status=1
|
|
fi
|
|
|
|
if [ "$status" -ne 0 ]; then
|
|
exit "$status"
|
|
fi
|
|
|
|
printf 'supported-surface-guard: no unfinished public markers, API stubs, supported component completeness markers, contract-marker regressions, desurfaced chainfire-core API markers, or archived scaffold regressions found\n'
|
|
touch "$out"
|
|
'';
|
|
|
|
canonical-profile-eval-guards = pkgs.writeText "canonical-profile-eval-guards.json"
|
|
(builtins.toJSON canonicalProfileManifest);
|
|
|
|
canonical-profile-build-guards =
|
|
pkgs.linkFarm "canonical-profile-build-guards"
|
|
(map (target: {
|
|
inherit (target) name path;
|
|
}) canonicalProfileBuildTargets);
|
|
|
|
portable-control-plane-regressions =
|
|
pkgs.linkFarm "portable-control-plane-regressions" [
|
|
{
|
|
name = "canonical-profile-eval-guards";
|
|
path = self.checks.${system}.canonical-profile-eval-guards;
|
|
}
|
|
{
|
|
name = "supported-surface-guard";
|
|
path = self.checks.${system}.supported-surface-guard;
|
|
}
|
|
{
|
|
name = "deployer-bootstrap-e2e";
|
|
path = self.checks.${system}.deployer-bootstrap-e2e;
|
|
}
|
|
{
|
|
name = "host-lifecycle-e2e";
|
|
path = self.checks.${system}.host-lifecycle-e2e;
|
|
}
|
|
{
|
|
name = "deployer-vm-smoke";
|
|
path = self.checks.${system}.deployer-vm-smoke;
|
|
}
|
|
{
|
|
name = "fleet-scheduler-e2e";
|
|
path = self.checks.${system}.fleet-scheduler-e2e;
|
|
}
|
|
];
|
|
|
|
first-boot-topology-vm-smoke = pkgs.testers.runNixOSTest (
|
|
import ./nix/tests/first-boot-topology-vm-smoke.nix {
|
|
inherit pkgs;
|
|
ultracloudPackages = self.packages.${system};
|
|
ultracloudModule = self.nixosModules.default;
|
|
}
|
|
);
|
|
|
|
deployer-vm-smoke = stripKvmRequiredSystemFeature (pkgs.testers.runNixOSTest (
|
|
import ./nix/tests/deployer-vm-smoke.nix {
|
|
inherit pkgs;
|
|
ultracloudPackages = self.packages.${system};
|
|
smokeTargetToplevel = self.packages.${system}.vmSmokeBundledTargetToplevel;
|
|
}
|
|
));
|
|
|
|
deployer-vm-rollback = stripKvmRequiredSystemFeature (pkgs.testers.runNixOSTest (
|
|
import ./nix/tests/deployer-vm-smoke.nix {
|
|
inherit pkgs;
|
|
ultracloudPackages = self.packages.${system};
|
|
smokeTargetToplevel = self.packages.${system}.vmSmokeBundledTargetToplevel;
|
|
desiredSystemOverrides = {
|
|
health_check_command = [ "false" ];
|
|
rollback_on_failure = true;
|
|
};
|
|
expectedStatus = "rolled-back";
|
|
expectCurrentSystemMatchesTarget = false;
|
|
expectMarkerPresent = false;
|
|
}
|
|
));
|
|
|
|
baremetal-iso-e2e = pkgs.runCommand "baremetal-iso-e2e"
|
|
{
|
|
nativeBuildInputs = with pkgs; [ coreutils ];
|
|
preferLocalBuild = true;
|
|
allowSubstitutes = false;
|
|
passthru.proofRunner = self.packages.${system}.baremetal-iso-e2e-runner;
|
|
} ''
|
|
mkdir -p "$out/bin" "$out/share/ultracloud"
|
|
ln -s ${self.packages.${system}.baremetal-iso-e2e-runner}/bin/baremetal-iso-e2e \
|
|
"$out/bin/baremetal-iso-e2e"
|
|
cat >"$out/share/ultracloud/README.txt" <<'EOF'
|
|
This check materializes the local-KVM baremetal-iso-e2e proof runner.
|
|
Direct build-time execution under the Nix daemon sandbox would run as nixbld and fall back to TCG instead of host KVM.
|
|
Run ./result/bin/baremetal-iso-e2e from a writable checkout to execute the exact proof and keep log/meta under ./work by default.
|
|
EOF
|
|
'';
|
|
|
|
fiberlb-native-bgp-vm-smoke = pkgs.testers.runNixOSTest (
|
|
import ./nix/tests/fiberlb-native-bgp-vm-smoke.nix {
|
|
inherit pkgs;
|
|
ultracloudPackages = self.packages.${system};
|
|
ultracloudModule = self.nixosModules.default;
|
|
}
|
|
);
|
|
|
|
fiberlb-native-bgp-multipath-vm-smoke = pkgs.testers.runNixOSTest (
|
|
import ./nix/tests/fiberlb-native-bgp-multipath-vm-smoke.nix {
|
|
inherit pkgs;
|
|
ultracloudPackages = self.packages.${system};
|
|
ultracloudModule = self.nixosModules.default;
|
|
}
|
|
);
|
|
|
|
fiberlb-native-bgp-interop-vm-smoke = pkgs.testers.runNixOSTest (
|
|
import ./nix/tests/fiberlb-native-bgp-interop-vm-smoke.nix {
|
|
inherit pkgs;
|
|
ultracloudPackages = self.packages.${system};
|
|
ultracloudModule = self.nixosModules.default;
|
|
}
|
|
);
|
|
|
|
fiberlb-native-bgp-ecmp-drain-vm-smoke = pkgs.testers.runNixOSTest (
|
|
import ./nix/tests/fiberlb-native-bgp-ecmp-drain-vm-smoke.nix {
|
|
inherit pkgs;
|
|
ultracloudPackages = self.packages.${system};
|
|
ultracloudModule = self.nixosModules.default;
|
|
}
|
|
);
|
|
|
|
deployer-bootstrap-e2e = pkgs.runCommand "deployer-bootstrap-e2e"
|
|
{
|
|
nativeBuildInputs = with pkgs; [
|
|
bash
|
|
coreutils
|
|
curl
|
|
findutils
|
|
gawk
|
|
gnugrep
|
|
gnused
|
|
procps
|
|
python3
|
|
];
|
|
ULTRACLOUD_E2E_IN_NIX = "1";
|
|
ULTRACLOUD_CHAINFIRE_SERVER_BIN =
|
|
"${self.packages.${system}.chainfire-server}/bin/chainfire";
|
|
ULTRACLOUD_DEPLOYER_SERVER_BIN =
|
|
"${self.packages.${system}.deployer-workspace}/bin/deployer-server";
|
|
ULTRACLOUD_DEPLOYER_CTL_BIN =
|
|
"${self.packages.${system}.deployer-workspace}/bin/deployer-ctl";
|
|
} ''
|
|
export HOME="$TMPDIR/home"
|
|
mkdir -p "$HOME"
|
|
export PATH="${pkgs.lib.makeBinPath [
|
|
pkgs.bash
|
|
pkgs.coreutils
|
|
pkgs.curl
|
|
pkgs.findutils
|
|
pkgs.gawk
|
|
pkgs.gnugrep
|
|
pkgs.gnused
|
|
pkgs.procps
|
|
pkgs.python3
|
|
]}"
|
|
bash ${./deployer/scripts/verify-deployer-bootstrap-e2e.sh}
|
|
touch "$out"
|
|
'';
|
|
|
|
host-lifecycle-e2e = pkgs.runCommand "host-lifecycle-e2e"
|
|
{
|
|
nativeBuildInputs = with pkgs; [
|
|
bash
|
|
coreutils
|
|
curl
|
|
findutils
|
|
gawk
|
|
gnugrep
|
|
gnused
|
|
procps
|
|
python3
|
|
];
|
|
ULTRACLOUD_E2E_IN_NIX = "1";
|
|
ULTRACLOUD_CHAINFIRE_SERVER_BIN =
|
|
"${self.packages.${system}.chainfire-server}/bin/chainfire";
|
|
ULTRACLOUD_DEPLOYER_CTL_BIN =
|
|
"${self.packages.${system}.deployer-workspace}/bin/deployer-ctl";
|
|
ULTRACLOUD_RECONCILER_BIN =
|
|
"${self.packages.${system}.deployer-workspace}/bin/ultracloud-reconciler";
|
|
} ''
|
|
export HOME="$TMPDIR/home"
|
|
mkdir -p "$HOME"
|
|
export PATH="${pkgs.lib.makeBinPath [
|
|
pkgs.bash
|
|
pkgs.coreutils
|
|
pkgs.curl
|
|
pkgs.findutils
|
|
pkgs.gawk
|
|
pkgs.gnugrep
|
|
pkgs.gnused
|
|
pkgs.procps
|
|
pkgs.python3
|
|
]}"
|
|
bash ${./deployer/scripts/verify-host-lifecycle-e2e.sh}
|
|
touch "$out"
|
|
'';
|
|
|
|
fleet-scheduler-e2e = pkgs.runCommand "fleet-scheduler-e2e"
|
|
{
|
|
nativeBuildInputs = with pkgs; [
|
|
bash
|
|
coreutils
|
|
curl
|
|
findutils
|
|
gawk
|
|
gnugrep
|
|
gnused
|
|
procps
|
|
python3
|
|
];
|
|
ULTRACLOUD_E2E_IN_NIX = "1";
|
|
ULTRACLOUD_CHAINFIRE_SERVER_BIN =
|
|
"${self.packages.${system}.chainfire-server}/bin/chainfire";
|
|
ULTRACLOUD_DEPLOYER_CTL_BIN =
|
|
"${self.packages.${system}.deployer-workspace}/bin/deployer-ctl";
|
|
ULTRACLOUD_NODE_AGENT_BIN =
|
|
"${self.packages.${system}.deployer-workspace}/bin/node-agent";
|
|
ULTRACLOUD_FLEET_SCHEDULER_BIN =
|
|
"${self.packages.${system}.deployer-workspace}/bin/fleet-scheduler";
|
|
ULTRACLOUD_FLEET_E2E_REPO_ROOT = "${self}";
|
|
} ''
|
|
export HOME="$TMPDIR/home"
|
|
mkdir -p "$HOME"
|
|
export PATH="${pkgs.lib.makeBinPath [
|
|
pkgs.bash
|
|
pkgs.coreutils
|
|
pkgs.curl
|
|
pkgs.findutils
|
|
pkgs.gawk
|
|
pkgs.gnugrep
|
|
pkgs.gnused
|
|
pkgs.procps
|
|
pkgs.python3
|
|
]}"
|
|
bash ${./nix/tests/verify-fleet-scheduler-e2e-stable.sh}
|
|
touch "$out"
|
|
'';
|
|
};
|
|
}
|
|
) // {
|
|
# ========================================================================
|
|
# NIXOS MODULES: System-level service modules (non-system-specific)
|
|
# ========================================================================
|
|
nixosModules.default = import ./nix/modules;
|
|
|
|
nixosModules.ultracloud = import ./nix/modules;
|
|
|
|
# ========================================================================
|
|
# NIXOS CONFIGURATIONS: Netboot images for bare-metal provisioning
|
|
# ========================================================================
|
|
nixosConfigurations =
|
|
let
|
|
vmClusterLib = import ./nix/nodes/vm-cluster/lib.nix { lib = nixpkgs.lib; };
|
|
overlayModule = {
|
|
nixpkgs.overlays = [ self.overlays.default ];
|
|
};
|
|
mkVmClusterSystem = nodeName:
|
|
nixpkgs.lib.nixosSystem {
|
|
system = "x86_64-linux";
|
|
modules = [
|
|
disko.nixosModules.disko
|
|
vmClusterLib.nodeConfigurationPaths.${nodeName}
|
|
self.nixosModules.default
|
|
(vmClusterLib.mkBootstrapServicesModule {
|
|
inherit self nodeName;
|
|
enableDeployer = nodeName == vmClusterLib.bootstrapNodeName;
|
|
})
|
|
{ nixpkgs.overlays = [ self.overlays.default ]; }
|
|
];
|
|
};
|
|
in
|
|
{
|
|
# Control Plane netboot image (all 8 services)
|
|
netboot-control-plane = nixpkgs.lib.nixosSystem {
|
|
system = "x86_64-linux";
|
|
modules = [
|
|
./nix/images/netboot-control-plane.nix
|
|
overlayModule
|
|
];
|
|
};
|
|
|
|
# Archived worker netboot helper kept only for manual lab debugging.
|
|
netboot-worker = nixpkgs.lib.nixosSystem {
|
|
system = "x86_64-linux";
|
|
modules = [
|
|
./nix/images/netboot-worker.nix
|
|
overlayModule
|
|
];
|
|
};
|
|
|
|
# All-in-One netboot image (single-node deployment)
|
|
netboot-all-in-one = nixpkgs.lib.nixosSystem {
|
|
system = "x86_64-linux";
|
|
modules = [
|
|
./nix/images/netboot-all-in-one.nix
|
|
overlayModule
|
|
];
|
|
};
|
|
|
|
# QEMU-first single-node quickstart for one-command local bring-up.
|
|
single-node-quickstart = nixpkgs.lib.nixosSystem {
|
|
system = "x86_64-linux";
|
|
modules = [
|
|
./nix/single-node/qemu-vm.nix
|
|
./nix/single-node/base.nix
|
|
self.nixosModules.default
|
|
overlayModule
|
|
{
|
|
ultracloud.quickstart.enable = true;
|
|
}
|
|
];
|
|
};
|
|
|
|
# Canonical bare-metal ISO install targets used by the QEMU proof path.
|
|
baremetal-qemu-control-plane = nixpkgs.lib.nixosSystem {
|
|
system = "x86_64-linux";
|
|
modules = [
|
|
disko.nixosModules.disko
|
|
./nix/nodes/baremetal-qemu/control-plane/configuration.nix
|
|
./nix/nodes/baremetal-qemu/control-plane/disko.nix
|
|
self.nixosModules.default
|
|
overlayModule
|
|
];
|
|
};
|
|
|
|
baremetal-qemu-worker = nixpkgs.lib.nixosSystem {
|
|
system = "x86_64-linux";
|
|
modules = [
|
|
disko.nixosModules.disko
|
|
./nix/nodes/baremetal-qemu/worker/configuration.nix
|
|
./nix/nodes/baremetal-qemu/worker/disko.nix
|
|
self.nixosModules.default
|
|
overlayModule
|
|
];
|
|
};
|
|
|
|
# Base netboot image (minimal, for VM testing and provisioning)
|
|
netboot-base = nixpkgs.lib.nixosSystem {
|
|
system = "x86_64-linux";
|
|
modules = [
|
|
./nix/images/netboot-base.nix
|
|
overlayModule
|
|
];
|
|
};
|
|
|
|
# Offline-friendly target used by deployer VM smoke tests.
|
|
vm-smoke-target = nixpkgs.lib.nixosSystem {
|
|
system = "x86_64-linux";
|
|
modules = [ ./nix/images/deployer-vm-smoke-target.nix ];
|
|
};
|
|
|
|
# UltraCloud ISO (T061.S5 - bootable ISO with cluster-config embedding)
|
|
ultracloud-iso = nixpkgs.lib.nixosSystem {
|
|
system = "x86_64-linux";
|
|
specialArgs = {
|
|
ultracloudBaremetalFormatMountPaths = {
|
|
baremetal-qemu-control-plane =
|
|
self.nixosConfigurations."baremetal-qemu-control-plane".config.system.build.formatMount;
|
|
baremetal-qemu-worker =
|
|
self.nixosConfigurations."baremetal-qemu-worker".config.system.build.formatMount;
|
|
};
|
|
ultracloudBaremetalSystemPaths = {
|
|
baremetal-qemu-control-plane =
|
|
self.nixosConfigurations."baremetal-qemu-control-plane".config.system.build.toplevel;
|
|
baremetal-qemu-worker =
|
|
self.nixosConfigurations."baremetal-qemu-worker".config.system.build.toplevel;
|
|
};
|
|
};
|
|
modules = [
|
|
./nix/iso/ultracloud-iso.nix
|
|
self.nixosModules.default
|
|
{ nixpkgs.overlays = [ self.overlays.default ]; }
|
|
];
|
|
};
|
|
|
|
# T036 VM Cluster Nodes (for nixos-anywhere deployment)
|
|
pxe-server = nixpkgs.lib.nixosSystem {
|
|
system = "x86_64-linux";
|
|
modules = [
|
|
disko.nixosModules.disko
|
|
./baremetal/vm-cluster/pxe-server/configuration.nix
|
|
./baremetal/vm-cluster/pxe-server/disko.nix
|
|
self.nixosModules.default
|
|
{ nixpkgs.overlays = [ self.overlays.default ]; }
|
|
];
|
|
};
|
|
}
|
|
// nixpkgs.lib.genAttrs vmClusterLib.controlPlaneNodeNames mkVmClusterSystem;
|
|
|
|
# ========================================================================
|
|
# OVERLAY: Provides UltraCloud packages to nixpkgs
|
|
# ========================================================================
|
|
# Usage in NixOS configuration:
|
|
# nixpkgs.overlays = [ inputs.ultracloud.overlays.default ];
|
|
overlays.default = final: prev: {
|
|
chainfire-server = self.packages.${final.system}.chainfire-server;
|
|
flaredb-server = self.packages.${final.system}.flaredb-server;
|
|
iam-server = self.packages.${final.system}.iam-server;
|
|
coronafs-server = self.packages.${final.system}.coronafs-server;
|
|
plasmavmc-server = self.packages.${final.system}.plasmavmc-server;
|
|
prismnet-server = self.packages.${final.system}.prismnet-server;
|
|
flashdns-server = self.packages.${final.system}.flashdns-server;
|
|
fiberlb-server = self.packages.${final.system}.fiberlb-server;
|
|
lightningstor-workspace = self.packages.${final.system}.lightningstor-workspace;
|
|
lightningstor-server = self.packages.${final.system}.lightningstor-workspace;
|
|
lightningstor-node = self.packages.${final.system}.lightningstor-workspace;
|
|
nightlight-server = self.packages.${final.system}.nightlight-server;
|
|
creditservice-server = self.packages.${final.system}.creditservice-server;
|
|
apigateway-server = self.packages.${final.system}.apigateway-server;
|
|
k8shost-server = self.packages.${final.system}.k8shost-server;
|
|
deployer-workspace = self.packages.${final.system}.deployer-workspace;
|
|
deployer-server = self.packages.${final.system}.deployer-server;
|
|
deployer-ctl = self.packages.${final.system}.deployer-ctl;
|
|
ultracloud-reconciler = self.packages.${final.system}.ultracloud-reconciler;
|
|
ultracloudFlakeBundle = self.packages.${final.system}.ultracloudFlakeBundle;
|
|
nix-agent = self.packages.${final.system}.nix-agent;
|
|
node-agent = self.packages.${final.system}.node-agent;
|
|
fleet-scheduler = self.packages.${final.system}.fleet-scheduler;
|
|
};
|
|
};
|
|
}
|