photoncloud-monorepo/nix/test-cluster/vm-guest-image.nix
2026-04-04 16:33:03 +09:00

1542 lines
55 KiB
Nix

{ modulesPath, lib, pkgs, ... }:
let
pythonWithBoto3 = pkgs.python3.withPackages (ps: [ ps.boto3 ps.python-multipart ]);
photonVmDemoIamProto = pkgs.writeText "photon-vm-demo-iam.proto" (builtins.readFile ../../iam/proto/iam.proto);
photonVmDemoApi = pkgs.writeText "photon-vm-demo-api.py" ''
import html
import json
import mimetypes
import os
import socket
import struct
import subprocess
import threading
import time
import traceback
import urllib.parse
import urllib.request
import uuid
from http import HTTPStatus
from http.server import BaseHTTPRequestHandler, ThreadingHTTPServer
import boto3
from botocore.config import Config
from botocore.exceptions import ClientError
from python_multipart import create_form_parser
DATA_MOUNT = "/mnt/photon-vm-data"
CONFIG_PATH = os.path.join(DATA_MOUNT, "demo-config.json")
ROOT_BOOT_COUNT_PATH = "/var/lib/photon-vm-smoke/boot-count"
DATA_BOOT_COUNT_PATH = os.path.join(DATA_MOUNT, "boot-count")
CONSOLE_PATH = "/dev/ttyS0"
LISTEN_HOST = "0.0.0.0"
LISTEN_PORT = 8080
GATEWAY_IP_FALLBACK = "10.62.10.1"
UPLOAD_TMP_DIR = os.path.join(DATA_MOUNT, ".upload-tmp")
MAX_IN_MEMORY_UPLOAD = 1024 * 1024
AWS_REGION = "us-east-1"
ORG_ID = "matrix-tenant-org"
PROJECT_ID = "matrix-tenant-project"
FLAREDB_SHARED_NAMESPACE = "validation"
STATE_OBJECT_KEY = "state.json"
ATTACHMENT_PREFIX = "attachments"
GRPCURL_BIN = "${pkgs.grpcurl}/bin/grpcurl"
IAM_PROTO_PATH = "${photonVmDemoIamProto}"
IAM_PROTO_DIR = os.path.dirname(IAM_PROTO_PATH)
IAM_PROTO_FILE = os.path.basename(IAM_PROTO_PATH)
UI_HTML = """<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>Photon Tasks</title>
<style>
:root {
color-scheme: light;
--bg: #f5efe4;
--panel: rgba(255, 252, 245, 0.9);
--panel-strong: #fffaf0;
--text: #1f1e1a;
--muted: #6a6459;
--line: rgba(60, 52, 38, 0.14);
--accent: #166b4a;
--accent-soft: rgba(22, 107, 74, 0.12);
--warn: #8b3a24;
--shadow: 0 24px 60px rgba(50, 40, 24, 0.12);
}
* { box-sizing: border-box; }
body {
margin: 0;
min-height: 100vh;
background:
radial-gradient(circle at top left, rgba(232, 174, 92, 0.22), transparent 30%),
radial-gradient(circle at top right, rgba(71, 140, 114, 0.18), transparent 28%),
linear-gradient(180deg, #fbf7ef 0%, var(--bg) 55%, #eee5d3 100%);
color: var(--text);
font-family: "Iowan Old Style", "Palatino Linotype", "URW Palladio L", serif;
}
.shell {
width: min(980px, calc(100vw - 32px));
margin: 0 auto;
padding: 28px 0 56px;
}
.hero {
padding: 32px;
border-bottom: 1px solid var(--line);
}
.eyebrow {
margin: 0 0 10px;
font-size: 12px;
letter-spacing: 0.22em;
text-transform: uppercase;
color: var(--accent);
}
h1 {
margin: 0;
font-size: clamp(38px, 8vw, 64px);
line-height: 0.96;
font-weight: 700;
}
.lede {
max-width: 52rem;
margin: 18px 0 0;
color: var(--muted);
font-size: 18px;
line-height: 1.55;
}
.grid {
display: grid;
gap: 18px;
grid-template-columns: 1.2fr 0.8fr;
margin-top: 20px;
}
.panel {
background: var(--panel);
border: 1px solid var(--line);
border-radius: 24px;
box-shadow: var(--shadow);
backdrop-filter: blur(10px);
overflow: hidden;
}
.panel-inner {
padding: 24px;
}
.panel h2 {
margin: 0 0 10px;
font-size: 24px;
}
.panel p {
margin: 0;
color: var(--muted);
line-height: 1.5;
}
.composer {
display: grid;
gap: 14px;
}
label {
display: block;
margin-bottom: 6px;
font-size: 13px;
letter-spacing: 0.06em;
text-transform: uppercase;
color: var(--muted);
}
input[type="text"],
textarea,
input[type="file"] {
width: 100%;
border: 1px solid rgba(47, 39, 26, 0.16);
border-radius: 16px;
background: rgba(255, 255, 255, 0.82);
color: var(--text);
font: inherit;
padding: 14px 16px;
}
textarea {
min-height: 120px;
resize: vertical;
}
.actions,
.toolbar {
display: flex;
align-items: center;
justify-content: space-between;
gap: 12px;
}
button {
border: none;
border-radius: 999px;
padding: 12px 18px;
font: inherit;
cursor: pointer;
background: var(--accent);
color: #fbf8f1;
transition: transform 120ms ease, opacity 120ms ease;
}
button.secondary {
background: rgba(31, 30, 26, 0.08);
color: var(--text);
}
button:hover { transform: translateY(-1px); }
button:disabled { opacity: 0.6; cursor: wait; transform: none; }
.status {
min-height: 24px;
margin-top: 10px;
color: var(--muted);
}
.status.error {
color: var(--warn);
}
.summary-grid {
display: grid;
gap: 12px;
grid-template-columns: repeat(2, minmax(0, 1fr));
margin-top: 18px;
}
.stat {
padding: 18px;
border-radius: 18px;
background: var(--panel-strong);
border: 1px solid var(--line);
}
.stat strong {
display: block;
font-size: 30px;
line-height: 1;
}
.stat span {
display: block;
margin-top: 8px;
color: var(--muted);
}
.meta {
margin-top: 18px;
padding-top: 18px;
border-top: 1px solid var(--line);
font-size: 14px;
color: var(--muted);
line-height: 1.6;
}
.todo-list {
list-style: none;
margin: 18px 0 0;
padding: 0;
display: grid;
gap: 14px;
}
.todo-card {
padding: 18px;
border-radius: 20px;
border: 1px solid var(--line);
background: rgba(255, 255, 255, 0.74);
}
.todo-card.done {
background: rgba(22, 107, 74, 0.08);
}
.todo-top {
display: flex;
align-items: flex-start;
justify-content: space-between;
gap: 16px;
}
.todo-title {
margin: 0;
font-size: 22px;
line-height: 1.15;
}
.todo-badge {
display: inline-flex;
align-items: center;
gap: 6px;
margin-top: 8px;
padding: 6px 10px;
border-radius: 999px;
background: var(--accent-soft);
color: var(--accent);
font-size: 13px;
}
.todo-card.done .todo-badge {
background: rgba(31, 30, 26, 0.08);
color: var(--muted);
}
.todo-details {
margin: 14px 0 0;
color: var(--muted);
line-height: 1.6;
white-space: pre-wrap;
}
.attachment {
display: flex;
align-items: center;
justify-content: space-between;
gap: 12px;
margin-top: 16px;
padding: 12px 14px;
border-radius: 14px;
border: 1px solid var(--line);
background: rgba(255, 249, 239, 0.92);
}
.attachment a {
color: var(--accent);
text-decoration: none;
font-weight: 700;
}
.attachment small {
color: var(--muted);
}
.empty {
margin-top: 18px;
padding: 26px;
border: 1px dashed rgba(47, 39, 26, 0.2);
border-radius: 20px;
color: var(--muted);
text-align: center;
background: rgba(255, 255, 255, 0.5);
}
@media (max-width: 860px) {
.grid { grid-template-columns: 1fr; }
.hero { padding: 28px 24px; }
.panel-inner { padding: 20px; }
.summary-grid { grid-template-columns: 1fr 1fr; }
}
@media (max-width: 540px) {
.shell { width: min(100vw - 20px, 980px); }
.summary-grid { grid-template-columns: 1fr; }
.todo-top,
.attachment,
.actions,
.toolbar { flex-direction: column; align-items: stretch; }
}
</style>
</head>
<body>
<main class="shell">
<section class="panel hero">
<p class="eyebrow">UltraCloud demo</p>
<h1>Photon Tasks</h1>
<p class="lede">
TODO items live in FlareDB, attached files live in LightningStor, and the whole app stays alive across restart and migration.
</p>
</section>
<section class="grid">
<section class="panel">
<div class="panel-inner">
<div class="toolbar">
<div>
<h2>New Task</h2>
<p>Add a title, optional notes, and a file attachment.</p>
</div>
<button id="refresh-button" class="secondary" type="button">Refresh</button>
</div>
<form id="todo-form" class="composer">
<div>
<label for="title">Title</label>
<input id="title" name="title" type="text" maxlength="120" placeholder="Example: Ship nested VM demo">
</div>
<div>
<label for="details">Details</label>
<textarea id="details" name="details" maxlength="1200" placeholder="Short note for the task"></textarea>
</div>
<div>
<label for="attachment">Attachment</label>
<input id="attachment" name="attachment" type="file">
</div>
<div class="actions">
<p>Files are uploaded into LightningStor and served back through the guest app.</p>
<button id="submit-button" type="submit">Create Task</button>
</div>
</form>
<div id="status" class="status" aria-live="polite"></div>
</div>
</section>
<section class="panel">
<div class="panel-inner">
<h2>Cluster Snapshot</h2>
<p>Useful while you are bouncing the VM around the cluster.</p>
<div class="summary-grid">
<div class="stat">
<strong id="todo-count">0</strong>
<span>Tasks</span>
</div>
<div class="stat">
<strong id="attachment-count">0</strong>
<span>Attachments</span>
</div>
<div class="stat">
<strong id="root-boots">0</strong>
<span>Root boots</span>
</div>
<div class="stat">
<strong id="data-boots">0</strong>
<span>Data disk boots</span>
</div>
</div>
<div class="meta">
<div><strong>Bucket:</strong> <span id="bucket-name">-</span></div>
<div><strong>FlareDB namespace:</strong> <span id="namespace-name">-</span></div>
<div><strong>Last task:</strong> <span id="latest-task">No tasks yet</span></div>
</div>
</div>
</section>
</section>
<section class="panel" style="margin-top: 20px;">
<div class="panel-inner">
<div class="toolbar">
<div>
<h2>Tasks</h2>
<p>Mark tasks done or open the attached file straight from the VM.</p>
</div>
</div>
<div id="todo-empty" class="empty">No tasks yet. Add one with a file to exercise FlareDB and LightningStor together.</div>
<ul id="todo-list" class="todo-list"></ul>
</div>
</section>
</main>
<script>
function escapeHtml(value) {
return String(value || "")
.replace(/&/g, "&amp;")
.replace(/</g, "&lt;")
.replace(/>/g, "&gt;")
.replace(/"/g, "&quot;")
.replace(/'/g, "&#39;");
}
function formatBytes(size) {
if (!size) {
return "0 B";
}
if (size < 1024) {
return size + " B";
}
if (size < 1024 * 1024) {
return (size / 1024).toFixed(1) + " KiB";
}
return (size / (1024 * 1024)).toFixed(1) + " MiB";
}
async function requestJson(url, options) {
const response = await fetch(url, options || {});
const text = await response.text();
let payload = {};
if (text) {
payload = JSON.parse(text);
}
if (!response.ok) {
throw new Error(payload.detail || payload.error || ("HTTP " + response.status));
}
return payload;
}
function setStatus(message, isError) {
const node = document.getElementById("status");
node.textContent = message || "";
node.className = isError ? "status error" : "status";
}
function renderTodo(todo) {
const details = todo.details ? '<p class="todo-details">' + escapeHtml(todo.details) + '</p>' : "";
const stateLabel = todo.done ? "Done" : "Open";
const toggleLabel = todo.done ? "Reopen" : "Mark Done";
let attachment = "";
if (todo.attachment) {
attachment =
'<div class="attachment">' +
'<div>' +
'<a href="' + escapeHtml(todo.attachment_url) + '">' + escapeHtml(todo.attachment.filename) + '</a>' +
'<small>' + escapeHtml(formatBytes(todo.attachment.size)) + " from LightningStor" + '</small>' +
'</div>' +
'<button class="secondary" type="button" data-download-url="' + escapeHtml(todo.attachment_url) + '">Open File</button>' +
'</div>';
}
return (
'<li class="todo-card' + (todo.done ? " done" : "") + '">' +
'<div class="todo-top">' +
'<div>' +
'<h3 class="todo-title">' + escapeHtml(todo.title) + '</h3>' +
'<div class="todo-badge">Task #' + String(todo.id).padStart(2, "0") + " " + stateLabel + '</div>' +
'</div>' +
'<button class="secondary" type="button" data-toggle-id="' + escapeHtml(String(todo.id)) + '">' + toggleLabel + '</button>' +
'</div>' +
details +
attachment +
'</li>'
);
}
function updateSummary(state) {
document.getElementById("todo-count").textContent = String(state.todo_count || 0);
document.getElementById("attachment-count").textContent = String(state.attachment_count || 0);
document.getElementById("root-boots").textContent = String(state.root_boot_count || 0);
document.getElementById("data-boots").textContent = String(state.data_boot_count || 0);
document.getElementById("bucket-name").textContent = state.bucket || "-";
document.getElementById("namespace-name").textContent = state.state_namespace || "-";
document.getElementById("latest-task").textContent = state.latest_todo_title || "No tasks yet";
}
function updateTodos(state) {
const list = document.getElementById("todo-list");
const empty = document.getElementById("todo-empty");
const todos = Array.isArray(state.todos) ? state.todos : [];
if (todos.length === 0) {
list.innerHTML = "";
empty.style.display = "block";
return;
}
empty.style.display = "none";
list.innerHTML = todos.map(renderTodo).join("");
}
async function loadState() {
const state = await requestJson("/api/todos");
updateSummary(state);
updateTodos(state);
return state;
}
async function refresh() {
setStatus("Refreshing app state...", false);
try {
await loadState();
setStatus("State refreshed.", false);
} catch (error) {
setStatus(error.message, true);
}
}
async function submitTodo(event) {
event.preventDefault();
const form = document.getElementById("todo-form");
const button = document.getElementById("submit-button");
const formData = new FormData(form);
button.disabled = true;
setStatus("Uploading task and attachment...", false);
try {
await requestJson("/api/todos", { method: "POST", body: formData });
form.reset();
await loadState();
setStatus("Task saved.", false);
} catch (error) {
setStatus(error.message, true);
} finally {
button.disabled = false;
}
}
async function onListClick(event) {
const toggleId = event.target.getAttribute("data-toggle-id");
if (toggleId) {
setStatus("Updating task...", false);
try {
await requestJson("/api/todos/" + toggleId + "/toggle", { method: "POST" });
await loadState();
setStatus("Task updated.", false);
} catch (error) {
setStatus(error.message, true);
}
return;
}
const downloadUrl = event.target.getAttribute("data-download-url");
if (downloadUrl) {
window.open(downloadUrl, "_blank");
}
}
document.getElementById("todo-form").addEventListener("submit", submitTodo);
document.getElementById("todo-list").addEventListener("click", onListClick);
document.getElementById("refresh-button").addEventListener("click", refresh);
refresh();
</script>
</body>
</html>
"""
def detect_default_gateway() -> str:
try:
with open("/proc/net/route", "r", encoding="utf-8") as handle:
next(handle, None)
for line in handle:
fields = line.strip().split()
if len(fields) < 4:
continue
if fields[1] != "00000000" or fields[2] == "00000000":
continue
if not (int(fields[3], 16) & 0x2):
continue
return socket.inet_ntoa(struct.pack("<L", int(fields[2], 16)))
except (FileNotFoundError, OSError, StopIteration, ValueError):
pass
return GATEWAY_IP_FALLBACK
CONTROL_PLANE_PROXY_HOST = detect_default_gateway()
FLAREDB_BASE_URL = "http://%s:8082" % CONTROL_PLANE_PROXY_HOST
LIGHTNINGSTOR_S3_ENDPOINT = "http://%s:9000" % CONTROL_PLANE_PROXY_HOST
IAM_ENDPOINT = "%s:50080" % CONTROL_PLANE_PROXY_HOST
BACKEND_LOCK = threading.RLock()
MUTATION_LOCK = threading.Lock()
BACKEND = {
"ready": False,
"config": None,
"s3_client": None,
"last_error": "",
}
def log_console(message: str) -> None:
try:
with open(CONSOLE_PATH, "a", encoding="utf-8") as console:
console.write(message + "\n")
except OSError:
pass
def sanitize(message: str) -> str:
return message.replace("\n", " ").replace("\r", " ")[:240]
def read_int(path: str) -> int:
try:
with open(path, "r", encoding="utf-8") as handle:
return int(handle.read().strip() or "0")
except (FileNotFoundError, OSError, ValueError):
return 0
def write_json_atomic(path: str, payload: dict) -> None:
os.makedirs(os.path.dirname(path), exist_ok=True)
tmp_path = path + ".tmp"
with open(tmp_path, "w", encoding="utf-8") as handle:
json.dump(payload, handle, indent=2, sort_keys=True)
handle.write("\n")
os.replace(tmp_path, path)
def now_iso() -> str:
return time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
def safe_filename(name: str) -> str:
base = os.path.basename(name or "attachment.bin").strip()
cleaned = []
for char in base:
if char.isalnum() or char in "._-":
cleaned.append(char)
else:
cleaned.append("-")
value = "".join(cleaned).strip(".-")
return value or "attachment.bin"
def load_or_create_config() -> dict:
if os.path.exists(CONFIG_PATH):
with open(CONFIG_PATH, "r", encoding="utf-8") as handle:
config = json.load(handle)
else:
suffix = uuid.uuid4().hex[:12]
prefix = "vm-demo-" + suffix
config = {
"principal_id": prefix,
"bucket": prefix,
"todo_prefix": prefix + "/todos/",
"next_id_key": prefix + "/next-id",
"state_namespace": FLAREDB_SHARED_NAMESPACE,
"access_key_id": "",
"secret_key": "",
}
write_json_atomic(CONFIG_PATH, config)
if "todo_prefix" not in config:
legacy_prefix = config.get("principal_id") or ("vm-demo-" + uuid.uuid4().hex[:12])
config["todo_prefix"] = legacy_prefix + "/todos/"
if "next_id_key" not in config:
config["next_id_key"] = config["todo_prefix"].rstrip("/") + "/next-id"
write_json_atomic(CONFIG_PATH, config)
return config
def grpcurl_json(endpoint: str, proto_dir: str, proto_file: str, service: str, payload: dict, ignore_errors=()) -> dict:
command = [
GRPCURL_BIN,
"-plaintext",
"-import-path",
proto_dir,
"-proto",
proto_file,
"-d",
json.dumps(payload, sort_keys=True),
endpoint,
service,
]
result = subprocess.run(
command,
capture_output=True,
text=True,
timeout=20,
check=False,
)
combined = (result.stdout or "") + "\n" + (result.stderr or "")
if result.returncode != 0:
lowered = combined.lower()
for pattern in ignore_errors:
if pattern.lower() in lowered:
return {}
raise RuntimeError("grpcurl %s failed: %s" % (service, sanitize(combined)))
output = result.stdout.strip()
return json.loads(output) if output else {}
def http_json(method: str, url: str, payload=None, timeout: int = 10) -> dict:
body = None
headers = {}
if payload is not None:
body = json.dumps(payload, sort_keys=True).encode("utf-8")
headers["Content-Type"] = "application/json"
request = urllib.request.Request(url, data=body, headers=headers, method=method)
with urllib.request.urlopen(request, timeout=timeout) as response:
return json.loads(response.read().decode("utf-8"))
def ensure_service_account(config: dict) -> None:
grpcurl_json(
IAM_ENDPOINT,
IAM_PROTO_DIR,
IAM_PROTO_FILE,
"iam.v1.IamAdmin/CreatePrincipal",
{
"id": config["principal_id"],
"kind": "PRINCIPAL_KIND_SERVICE_ACCOUNT",
"name": config["principal_id"],
"orgId": ORG_ID,
"projectId": PROJECT_ID,
},
ignore_errors=("already exists", "alreadyexists"),
)
def ensure_s3_credentials(config: dict) -> None:
if config.get("access_key_id") and config.get("secret_key"):
return
response = grpcurl_json(
IAM_ENDPOINT,
IAM_PROTO_DIR,
IAM_PROTO_FILE,
"iam.v1.IamCredential/CreateS3Credential",
{
"principalId": config["principal_id"],
"principalKind": "PRINCIPAL_KIND_SERVICE_ACCOUNT",
"orgId": ORG_ID,
"projectId": PROJECT_ID,
"description": "vm-demo",
},
)
config["access_key_id"] = response["accessKeyId"]
config["secret_key"] = response["secretKey"]
write_json_atomic(CONFIG_PATH, config)
def s3_client(config: dict):
return boto3.session.Session().client(
"s3",
endpoint_url=LIGHTNINGSTOR_S3_ENDPOINT,
region_name=AWS_REGION,
aws_access_key_id=config["access_key_id"],
aws_secret_access_key=config["secret_key"],
use_ssl=False,
verify=False,
config=Config(
retries={"max_attempts": 8, "mode": "standard"},
s3={"addressing_style": "path"},
signature_version="s3v4",
),
)
def ensure_bucket(config: dict, client) -> None:
try:
client.head_bucket(Bucket=config["bucket"])
return
except ClientError as error:
code = str(error.response.get("Error", {}).get("Code", ""))
if code not in ("404", "NoSuchBucket", "NotFound", "400", "403"):
raise
client.create_bucket(Bucket=config["bucket"])
def flaredb_scan(config: dict, start: str, end: str) -> list:
query = urllib.parse.urlencode(
{
"start": start,
"end": end,
"namespace": config["state_namespace"],
}
)
payload = http_json("GET", FLAREDB_BASE_URL + "/api/v1/scan?" + query)
return payload.get("data", {}).get("items", [])
def flaredb_get_value(config: dict, key: str):
items = flaredb_scan(config, key, key + "~")
if not items:
return None
return items[0].get("value")
def flaredb_put_value(config: dict, key: str, value: str) -> None:
http_json(
"PUT",
FLAREDB_BASE_URL + "/api/v1/kv/" + urllib.parse.quote(key, safe=""),
{
"value": value,
"namespace": config["state_namespace"],
},
)
def flaredb_put_json(config: dict, key: str, payload: dict) -> None:
flaredb_put_value(config, key, json.dumps(payload, sort_keys=True))
def flaredb_get_json(config: dict, key: str):
value = flaredb_get_value(config, key)
if value is None:
return None
return json.loads(value)
def flaredb_next_todo_id(config: dict) -> int:
value = flaredb_get_value(config, config["next_id_key"])
if value is None:
return 1
return int(value)
def todo_record_key(config: dict, todo_id: int) -> str:
return config["todo_prefix"] + ("%06d" % todo_id)
def todo_attachment_key(todo_id: int, filename: str) -> str:
return ATTACHMENT_PREFIX + "/" + ("%06d" % todo_id) + "/" + safe_filename(filename)
def todo_view(todo: dict) -> dict:
attachment = todo.get("attachment")
view = {
"id": int(todo["id"]),
"title": todo.get("title", ""),
"details": todo.get("details", ""),
"done": bool(todo.get("done", False)),
"created_at": todo.get("created_at", ""),
"updated_at": todo.get("updated_at", ""),
"attachment": None,
"attachment_url": "",
}
if isinstance(attachment, dict):
view["attachment"] = {
"filename": attachment.get("filename", ""),
"content_type": attachment.get("content_type", "application/octet-stream"),
"object_key": attachment.get("object_key", ""),
"size": int(attachment.get("size", 0)),
}
view["attachment_url"] = "/api/todos/%s/attachment" % int(todo["id"])
return view
def list_todos(config: dict) -> list:
todos = []
for item in flaredb_scan(config, config["todo_prefix"], config["todo_prefix"] + "~"):
try:
payload = json.loads(item.get("value", "{}"))
except json.JSONDecodeError:
continue
if isinstance(payload, dict) and "id" in payload:
todos.append(payload)
todos.sort(key=lambda todo: int(todo.get("id", 0)))
return todos
def snapshot_payload(config: dict, todos: list) -> dict:
latest = todos[-1] if todos else None
latest_attachment = latest.get("attachment") if isinstance(latest, dict) else None
return {
"bucket": config["bucket"],
"hostname": socket.gethostname(),
"latest_object_key": STATE_OBJECT_KEY,
"latest_todo_id": int(latest["id"]) if latest else 0,
"latest_todo_title": latest.get("title", "") if latest else "",
"latest_attachment_object_key": latest_attachment.get("object_key", "") if isinstance(latest_attachment, dict) else "",
"latest_attachment_filename": latest_attachment.get("filename", "") if isinstance(latest_attachment, dict) else "",
"object_store_backend": "lightningstor",
"object_store_endpoint": LIGHTNINGSTOR_S3_ENDPOINT,
"root_boot_count": read_int(ROOT_BOOT_COUNT_PATH),
"data_boot_count": read_int(DATA_BOOT_COUNT_PATH),
"state_backend": "flaredb",
"state_endpoint": FLAREDB_BASE_URL,
"state_namespace": config["state_namespace"],
"todo_prefix": config["todo_prefix"],
"next_id_key": config["next_id_key"],
"todo_count": len(todos),
"attachment_count": sum(1 for todo in todos if isinstance(todo.get("attachment"), dict)),
"todos": [todo_view(todo) for todo in todos],
}
def write_snapshot(config: dict, client, todos: list) -> dict:
payload = snapshot_payload(config, todos)
encoded = json.dumps(payload, sort_keys=True).encode("utf-8")
client.put_object(
Bucket=config["bucket"],
Key=STATE_OBJECT_KEY,
Body=encoded,
ContentType="application/json",
)
return payload
def read_snapshot(config: dict, client) -> dict:
response = client.get_object(Bucket=config["bucket"], Key=STATE_OBJECT_KEY)
return json.loads(response["Body"].read().decode("utf-8"))
def response_payload(config: dict, todos: list, snapshot: dict) -> dict:
return {
"status": "ok",
"hostname": socket.gethostname(),
"listen_port": LISTEN_PORT,
"root_boot_count": read_int(ROOT_BOOT_COUNT_PATH),
"data_boot_count": read_int(DATA_BOOT_COUNT_PATH),
"state_backend": "flaredb",
"state_endpoint": FLAREDB_BASE_URL,
"state_namespace": config["state_namespace"],
"todo_prefix": config["todo_prefix"],
"next_id_key": config["next_id_key"],
"object_store_backend": "lightningstor",
"object_store_endpoint": LIGHTNINGSTOR_S3_ENDPOINT,
"bucket": config["bucket"],
"latest_object_key": snapshot["latest_object_key"],
"latest_todo_id": int(snapshot["latest_todo_id"]),
"latest_todo_title": snapshot["latest_todo_title"],
"latest_attachment_object_key": snapshot["latest_attachment_object_key"],
"latest_attachment_filename": snapshot["latest_attachment_filename"],
"latest_object_todo_count": int(snapshot["todo_count"]),
"todo_count": len(todos),
"attachment_count": sum(1 for todo in todos if isinstance(todo.get("attachment"), dict)),
"todos": [todo_view(todo) for todo in todos],
}
def bootstrap() -> None:
deadline = time.time() + 120
while True:
try:
config = load_or_create_config()
ensure_service_account(config)
ensure_s3_credentials(config)
client = s3_client(config)
ensure_bucket(config, client)
if flaredb_get_value(config, config["next_id_key"]) is None:
flaredb_put_value(config, config["next_id_key"], "1")
todos = list_todos(config)
write_snapshot(config, client, todos)
with BACKEND_LOCK:
BACKEND["ready"] = True
BACKEND["config"] = config
BACKEND["s3_client"] = client
BACKEND["last_error"] = ""
return
except Exception as error:
message = sanitize(str(error))
with BACKEND_LOCK:
BACKEND["last_error"] = message
if time.time() >= deadline:
raise RuntimeError(message)
log_console("PHOTON_VM_DEMO_BOOTSTRAP_RETRY detail=%s" % message)
time.sleep(2)
def get_backend():
with BACKEND_LOCK:
if not BACKEND["ready"]:
raise RuntimeError(BACKEND["last_error"] or "backend not ready")
return BACKEND["config"], BACKEND["s3_client"]
def current_state() -> dict:
config, client = get_backend()
todos = list_todos(config)
snapshot = write_snapshot(config, client, todos)
snapshot = read_snapshot(config, client)
return response_payload(config, todos, snapshot)
def create_todo(title: str, details: str, attachment_name: str, attachment_file, attachment_size: int, attachment_content_type: str) -> dict:
config, client = get_backend()
try:
with MUTATION_LOCK:
todo_id = flaredb_next_todo_id(config)
created_at = now_iso()
attachment = None
safe_name = safe_filename(attachment_name)
if attachment_file is not None and attachment_size > 0:
object_key = todo_attachment_key(todo_id, safe_name)
attachment_file.file_object.seek(0)
client.put_object(
Bucket=config["bucket"],
Key=object_key,
Body=attachment_file.file_object,
ContentLength=attachment_size,
ContentType=attachment_content_type or "application/octet-stream",
)
attachment = {
"filename": safe_name,
"content_type": attachment_content_type or "application/octet-stream",
"object_key": object_key,
"size": attachment_size,
}
todo = {
"id": todo_id,
"title": title.strip() or "Untitled task",
"details": details.strip(),
"done": False,
"created_at": created_at,
"updated_at": created_at,
"attachment": attachment,
}
flaredb_put_json(config, todo_record_key(config, todo_id), todo)
flaredb_put_value(config, config["next_id_key"], str(todo_id + 1))
todos = list_todos(config)
snapshot = write_snapshot(config, client, todos)
snapshot = read_snapshot(config, client)
return response_payload(config, todos, snapshot)
finally:
if attachment_file is not None:
try:
attachment_file.close()
except Exception:
pass
def toggle_todo(todo_id: int) -> dict:
config, client = get_backend()
with MUTATION_LOCK:
todo = flaredb_get_json(config, todo_record_key(config, todo_id))
if not isinstance(todo, dict):
raise KeyError("todo %s not found" % todo_id)
todo["done"] = not bool(todo.get("done", False))
todo["updated_at"] = now_iso()
flaredb_put_json(config, todo_record_key(config, todo_id), todo)
todos = list_todos(config)
snapshot = write_snapshot(config, client, todos)
snapshot = read_snapshot(config, client)
return response_payload(config, todos, snapshot)
def attachment_response(todo_id: int):
config, client = get_backend()
todo = flaredb_get_json(config, todo_record_key(config, todo_id))
if not isinstance(todo, dict):
raise KeyError("todo %s not found" % todo_id)
attachment = todo.get("attachment")
if not isinstance(attachment, dict):
raise FileNotFoundError("todo %s has no attachment" % todo_id)
response = client.get_object(Bucket=config["bucket"], Key=attachment["object_key"])
return {
"filename": attachment.get("filename", "attachment.bin"),
"content_type": attachment.get("content_type", "application/octet-stream"),
"body": response["Body"].read(),
}
class Handler(BaseHTTPRequestHandler):
server_version = "PhotonVMDemo/2.0"
def log_message(self, format: str, *args) -> None:
return
def _send_bytes(self, body: bytes, status: int, content_type: str, extra_headers=None) -> None:
self.send_response(status)
self.send_header("Content-Type", content_type)
self.send_header("Content-Length", str(len(body)))
if extra_headers:
for key, value in extra_headers.items():
self.send_header(key, value)
self.end_headers()
self.wfile.write(body)
def _send_json(self, payload: dict, status: int = HTTPStatus.OK) -> None:
body = json.dumps(payload, sort_keys=True).encode("utf-8")
self._send_bytes(body, status, "application/json")
def _send_html(self, body: str) -> None:
self._send_bytes(body.encode("utf-8"), HTTPStatus.OK, "text/html; charset=utf-8")
def _parsed_path(self):
return urllib.parse.urlparse(self.path).path
def _todo_id_from_path(self, path: str, suffix: str):
prefix = "/api/todos/"
if not path.startswith(prefix) or not path.endswith(suffix):
return None
value = path[len(prefix):len(path) - len(suffix)]
if not value.isdigit():
return None
return int(value)
def _parse_todo_submission(self):
content_type = self.headers.get("Content-Type", "")
content_length = int(self.headers.get("Content-Length", "0") or "0")
if content_type.startswith("application/json"):
raw = self.rfile.read(content_length)
payload = json.loads(raw.decode("utf-8") or "{}")
return {
"title": str(payload.get("title", "")),
"details": str(payload.get("details", "")),
"attachment_name": "",
"attachment_file": None,
"attachment_size": 0,
"attachment_content_type": "",
}
if content_type.startswith("multipart/form-data"):
os.makedirs(UPLOAD_TMP_DIR, exist_ok=True)
fields = {}
files = {}
headers = {
"Content-Type": content_type,
"Content-Length": str(content_length),
}
def on_field(field) -> None:
field_name = (field.field_name or b"").decode("utf-8", errors="replace")
fields[field_name] = (field.value or b"").decode("utf-8", errors="replace")
def on_file(file_obj) -> None:
field_name = (file_obj.field_name or b"").decode("utf-8", errors="replace")
files[field_name] = file_obj
try:
parser = create_form_parser(
headers,
on_field,
on_file,
config={
"UPLOAD_DIR": UPLOAD_TMP_DIR,
"UPLOAD_DELETE_TMP": True,
"MAX_MEMORY_FILE_SIZE": MAX_IN_MEMORY_UPLOAD,
},
)
bytes_read = 0
while bytes_read < content_length:
chunk = self.rfile.read(min(262144, content_length - bytes_read))
if not chunk:
break
parser.write(chunk)
bytes_read += len(chunk)
parser.finalize()
except Exception:
for file_obj in files.values():
try:
file_obj.close()
except Exception:
pass
raise
title = fields.get("title", "")
details = fields.get("details", "")
attachment_file = files.get("attachment")
attachment_name = ""
attachment_size = 0
attachment_content_type = ""
if attachment_file is not None and int(attachment_file.size or 0) > 0:
attachment_name = (attachment_file.file_name or b"").decode("utf-8", errors="replace")
attachment_size = int(attachment_file.size or 0)
attachment_content_type = mimetypes.guess_type(attachment_name)[0] or "application/octet-stream"
return {
"title": title,
"details": details,
"attachment_name": attachment_name,
"attachment_file": attachment_file,
"attachment_size": attachment_size,
"attachment_content_type": attachment_content_type,
}
else:
raw = self.rfile.read(content_length)
fields = urllib.parse.parse_qs(raw.decode("utf-8", errors="replace"), keep_blank_values=True)
title = fields.get("title", [""])[0]
details = fields.get("details", [""])[0]
return {
"title": title,
"details": details,
"attachment_name": "",
"attachment_file": None,
"attachment_size": 0,
"attachment_content_type": "",
}
def do_GET(self) -> None:
path = self._parsed_path()
if path == "/":
self._send_html(UI_HTML)
return
if path == "/health":
with BACKEND_LOCK:
if BACKEND["ready"]:
self._send_json({"status": "ok"})
else:
self._send_json(
{"status": "starting", "detail": BACKEND["last_error"]},
HTTPStatus.SERVICE_UNAVAILABLE,
)
return
if path in ("/state", "/api/todos"):
try:
self._send_json(current_state())
except Exception as error:
self._send_json(
{"error": "backend_error", "detail": sanitize(str(error))},
HTTPStatus.SERVICE_UNAVAILABLE,
)
return
attachment_todo_id = self._todo_id_from_path(path, "/attachment")
if attachment_todo_id is not None:
try:
payload = attachment_response(attachment_todo_id)
self._send_bytes(
payload["body"],
HTTPStatus.OK,
payload["content_type"],
{
"Content-Disposition": 'inline; filename="%s"' % payload["filename"],
},
)
except FileNotFoundError:
self._send_json({"error": "not_found"}, HTTPStatus.NOT_FOUND)
except KeyError:
self._send_json({"error": "not_found"}, HTTPStatus.NOT_FOUND)
except Exception as error:
self._send_json(
{"error": "backend_error", "detail": sanitize(str(error))},
HTTPStatus.SERVICE_UNAVAILABLE,
)
return
self._send_json({"error": "not_found"}, HTTPStatus.NOT_FOUND)
def do_POST(self) -> None:
path = self._parsed_path()
if path == "/api/todos":
try:
submission = self._parse_todo_submission()
payload = create_todo(
submission["title"],
submission["details"],
submission["attachment_name"],
submission["attachment_file"],
submission["attachment_size"],
submission["attachment_content_type"],
)
log_console(
"PHOTON_VM_DEMO_TODO_CREATED count=%s title=%s attachment=%s"
% (
payload["todo_count"],
sanitize(payload["latest_todo_title"]),
payload["latest_attachment_filename"] or "none",
)
)
self._send_json(payload, HTTPStatus.CREATED)
except Exception as error:
self._send_json(
{"error": "backend_error", "detail": sanitize(str(error))},
HTTPStatus.SERVICE_UNAVAILABLE,
)
return
toggle_todo_id = self._todo_id_from_path(path, "/toggle")
if toggle_todo_id is not None:
try:
payload = toggle_todo(toggle_todo_id)
log_console(
"PHOTON_VM_DEMO_TODO_TOGGLED id=%s latest=%s"
% (toggle_todo_id, sanitize(payload["latest_todo_title"]))
)
self._send_json(payload)
except KeyError:
self._send_json({"error": "not_found"}, HTTPStatus.NOT_FOUND)
except Exception as error:
self._send_json(
{"error": "backend_error", "detail": sanitize(str(error))},
HTTPStatus.SERVICE_UNAVAILABLE,
)
return
self._send_json({"error": "not_found"}, HTTPStatus.NOT_FOUND)
def main() -> None:
os.makedirs(DATA_MOUNT, exist_ok=True)
bootstrap()
with BACKEND_LOCK:
config = BACKEND["config"]
server = ThreadingHTTPServer((LISTEN_HOST, LISTEN_PORT), Handler)
log_console(
"PHOTON_VM_DEMO_WEB_READY count=%s port=%s app=todo bucket=%s namespace=%s"
% (
read_int(ROOT_BOOT_COUNT_PATH),
LISTEN_PORT,
config["bucket"],
config["state_namespace"],
)
)
server.serve_forever()
if __name__ == "__main__":
try:
main()
except Exception:
log_console("PHOTON_VM_DEMO_FATAL")
for line in traceback.format_exc().splitlines():
log_console(sanitize(line))
raise
'';
in {
imports = [
(modulesPath + "/virtualisation/disk-image.nix")
(modulesPath + "/profiles/qemu-guest.nix")
];
image = {
baseName = "photon-vm-smoke";
format = "qcow2";
efiSupport = false;
};
virtualisation.diskSize = 4096;
boot.kernelParams = [ "console=ttyS0" "console=tty0" ];
networking.hostName = "photon-vm-smoke";
networking.useDHCP = lib.mkDefault true;
networking.firewall.enable = false;
services.getty.autologinUser = "root";
users.mutableUsers = false;
users.users.root.hashedPassword = "$6$ultracloud$aUJCEE5wm/b5O.9KIKGm84qUWdWXwnebsFEiMBF7u9Y7AOWodaMrjbbKGMOf0X59VJyJeMRsgbT7VWeqMHpUe.";
documentation.enable = false;
services.openssh.enable = false;
environment.systemPackages = [ pkgs.e2fsprogs pkgs.util-linux ];
systemd.services.photon-vm-smoke = {
description = "UltraCloud VM smoke marker";
wantedBy = [ "multi-user.target" ];
wants = [ "systemd-udev-settle.service" ];
after = [ "local-fs.target" "systemd-udev-settle.service" ];
path = with pkgs; [
bash
coreutils
e2fsprogs
gawk
gnugrep
gnused
util-linux
];
serviceConfig = {
Type = "simple";
Restart = "always";
RestartSec = "1";
};
script = ''
mkdir -p /var/lib/photon-vm-smoke
count_file=/var/lib/photon-vm-smoke/boot-count
if [ -f "$count_file" ]; then
count=$(( $(cat "$count_file") + 1 ))
else
count=1
fi
echo "$count" > "$count_file"
echo "PHOTON_VM_SMOKE_READY count=$count" >/dev/ttyS0
root_source="$(lsblk -nrpo NAME,MOUNTPOINT | awk '$2 == "/" { print $1; exit }' 2>/dev/null || true)"
root_disk=""
if [ -n "$root_source" ] && [ -b "$root_source" ]; then
root_disk="$(lsblk -ndo PKNAME "$root_source" 2>/dev/null || true)"
if [ -z "$root_disk" ]; then
root_disk="$(basename "$root_source")"
else
root_disk="/dev/$root_disk"
fi
fi
echo "PHOTON_VM_SMOKE_DATA_ROOT count=$count source=''${root_source:-none} root=''${root_disk:-unknown}" >/dev/ttyS0
data_disk=""
if [ -b /dev/disk/by-label/photon-vm-data ]; then
data_disk="$(readlink -f /dev/disk/by-label/photon-vm-data)"
fi
pick_data_disk() {
while IFS= read -r disk; do
[ -n "$disk" ] || continue
if [ -n "$root_source" ] && [ "$disk" = "$root_source" ]; then
continue
fi
if [ -n "$root_disk" ] && [ "$disk" = "$root_disk" ]; then
continue
fi
if lsblk -nrpo MOUNTPOINT "$disk" 2>/dev/null | grep -qx '/'; then
continue
fi
printf '%s\n' "$disk"
return 0
done < <(lsblk -dnpr -o NAME,TYPE,RO | awk '$2 == "disk" && $3 == "0" { print $1 }')
return 1
}
deadline=$((SECONDS + 60))
scan_attempt=0
while [ -z "$data_disk" ] && [ "$SECONDS" -lt "$deadline" ]; do
scan_attempt=$((scan_attempt + 1))
data_disk="$(pick_data_disk || true)"
echo "PHOTON_VM_SMOKE_DATA_SCAN count=$count attempt=$scan_attempt data=''${data_disk:-none}" >/dev/ttyS0
[ -n "$data_disk" ] && break
udevadm settle >/dev/null 2>&1 || true
sleep 1
done
if [ -z "$data_disk" ]; then
echo "PHOTON_VM_SMOKE_DATA_MISSING count=$count" >/dev/ttyS0
lsblk -dn -o NAME,TYPE,SIZE >/dev/ttyS0 2>&1 || true
exit 1
fi
echo "PHOTON_VM_SMOKE_DATA_PROBE count=$count root=''${root_disk:-unknown} data=$(basename "$data_disk")" >/dev/ttyS0
mkdir -p /mnt/photon-vm-data
if ! blkid "$data_disk" >/dev/null 2>&1; then
mkfs_output="$(mkfs.ext4 -L photon-vm-data -F "$data_disk" 2>&1)" || {
mkfs_output="$(printf '%s' "$mkfs_output" | tr '\r\n' ' ' | sed 's/ */ /g')"
echo "PHOTON_VM_SMOKE_DATA_ERROR count=$count step=mkfs device=$(basename "$data_disk") detail=''${mkfs_output}" >/dev/ttyS0
lsblk -dn -o NAME,TYPE,RO,SIZE >/dev/ttyS0 2>&1 || true
blockdev --getsize64 "$data_disk" >/dev/ttyS0 2>&1 || true
exit 1
}
fi
if ! mountpoint -q /mnt/photon-vm-data; then
if ! mount "$data_disk" /mnt/photon-vm-data; then
echo "PHOTON_VM_SMOKE_DATA_ERROR count=$count step=mount device=$(basename "$data_disk")" >/dev/ttyS0
lsblk -f >/dev/ttyS0 2>&1 || true
exit 1
fi
fi
data_count_file=/mnt/photon-vm-data/boot-count
if [ -f "$data_count_file" ]; then
data_count=$(( $(cat "$data_count_file") + 1 ))
else
data_count=1
fi
echo "$data_count" > "$data_count_file"
sync
echo "PHOTON_VM_SMOKE_DATA_READY count=$data_count device=$(basename "$data_disk")" >/dev/ttyS0
while true; do
echo "PHOTON_VM_SMOKE_HEARTBEAT count=$count ts=$(date +%s)" >/dev/ttyS0
sleep 2
done
'';
};
systemd.services.photon-vm-demo-api = {
description = "UltraCloud VM demo web app";
wantedBy = [ "multi-user.target" ];
wants = [ "network-online.target" "photon-vm-smoke.service" ];
after = [ "network-online.target" "photon-vm-smoke.service" ];
path = with pkgs; [
bash
coreutils
util-linux
];
serviceConfig = {
Type = "simple";
Restart = "always";
RestartSec = "1";
};
script = ''
deadline=$((SECONDS + 60))
while ! mountpoint -q /mnt/photon-vm-data; do
if [ "$SECONDS" -ge "$deadline" ]; then
echo "PHOTON_VM_DEMO_WEB_ERROR step=mount-timeout" >/dev/ttyS0
exit 1
fi
sleep 1
done
exec ${pythonWithBoto3}/bin/python3 ${photonVmDemoApi} >>/dev/ttyS0 2>&1
'';
};
system.stateVersion = "24.05";
}