{ modulesPath, lib, pkgs, ... }: let pythonWithBoto3 = pkgs.python3.withPackages (ps: [ ps.boto3 ps.python-multipart ]); photonVmDemoIamProto = pkgs.writeText "photon-vm-demo-iam.proto" (builtins.readFile ../../iam/proto/iam.proto); photonVmDemoApi = pkgs.writeText "photon-vm-demo-api.py" '' import html import json import mimetypes import os import socket import struct import subprocess import threading import time import traceback import urllib.parse import urllib.request import uuid from http import HTTPStatus from http.server import BaseHTTPRequestHandler, ThreadingHTTPServer import boto3 from botocore.config import Config from botocore.exceptions import ClientError from python_multipart import create_form_parser DATA_MOUNT = "/mnt/photon-vm-data" CONFIG_PATH = os.path.join(DATA_MOUNT, "demo-config.json") ROOT_BOOT_COUNT_PATH = "/var/lib/photon-vm-smoke/boot-count" DATA_BOOT_COUNT_PATH = os.path.join(DATA_MOUNT, "boot-count") CONSOLE_PATH = "/dev/ttyS0" LISTEN_HOST = "0.0.0.0" LISTEN_PORT = 8080 GATEWAY_IP_FALLBACK = "10.62.10.1" UPLOAD_TMP_DIR = os.path.join(DATA_MOUNT, ".upload-tmp") MAX_IN_MEMORY_UPLOAD = 1024 * 1024 AWS_REGION = "us-east-1" ORG_ID = "matrix-tenant-org" PROJECT_ID = "matrix-tenant-project" FLAREDB_SHARED_NAMESPACE = "validation" STATE_OBJECT_KEY = "state.json" ATTACHMENT_PREFIX = "attachments" GRPCURL_BIN = "${pkgs.grpcurl}/bin/grpcurl" IAM_PROTO_PATH = "${photonVmDemoIamProto}" IAM_PROTO_DIR = os.path.dirname(IAM_PROTO_PATH) IAM_PROTO_FILE = os.path.basename(IAM_PROTO_PATH) UI_HTML = """ Photon Tasks

UltraCloud demo

Photon Tasks

TODO items live in FlareDB, attached files live in LightningStor, and the whole app stays alive across restart and migration.

New Task

Add a title, optional notes, and a file attachment.

Files are uploaded into LightningStor and served back through the guest app.

Cluster Snapshot

Useful while you are bouncing the VM around the cluster.

0 Tasks
0 Attachments
0 Root boots
0 Data disk boots
Bucket: -
FlareDB namespace: -
Last task: No tasks yet

Tasks

Mark tasks done or open the attached file straight from the VM.

No tasks yet. Add one with a file to exercise FlareDB and LightningStor together.
    """ def detect_default_gateway() -> str: try: with open("/proc/net/route", "r", encoding="utf-8") as handle: next(handle, None) for line in handle: fields = line.strip().split() if len(fields) < 4: continue if fields[1] != "00000000" or fields[2] == "00000000": continue if not (int(fields[3], 16) & 0x2): continue return socket.inet_ntoa(struct.pack(" None: try: with open(CONSOLE_PATH, "a", encoding="utf-8") as console: console.write(message + "\n") except OSError: pass def sanitize(message: str) -> str: return message.replace("\n", " ").replace("\r", " ")[:240] def read_int(path: str) -> int: try: with open(path, "r", encoding="utf-8") as handle: return int(handle.read().strip() or "0") except (FileNotFoundError, OSError, ValueError): return 0 def write_json_atomic(path: str, payload: dict) -> None: os.makedirs(os.path.dirname(path), exist_ok=True) tmp_path = path + ".tmp" with open(tmp_path, "w", encoding="utf-8") as handle: json.dump(payload, handle, indent=2, sort_keys=True) handle.write("\n") os.replace(tmp_path, path) def now_iso() -> str: return time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()) def safe_filename(name: str) -> str: base = os.path.basename(name or "attachment.bin").strip() cleaned = [] for char in base: if char.isalnum() or char in "._-": cleaned.append(char) else: cleaned.append("-") value = "".join(cleaned).strip(".-") return value or "attachment.bin" def load_or_create_config() -> dict: if os.path.exists(CONFIG_PATH): with open(CONFIG_PATH, "r", encoding="utf-8") as handle: config = json.load(handle) else: suffix = uuid.uuid4().hex[:12] prefix = "vm-demo-" + suffix config = { "principal_id": prefix, "bucket": prefix, "todo_prefix": prefix + "/todos/", "next_id_key": prefix + "/next-id", "state_namespace": FLAREDB_SHARED_NAMESPACE, "access_key_id": "", "secret_key": "", } write_json_atomic(CONFIG_PATH, config) if "todo_prefix" not in config: legacy_prefix = config.get("principal_id") or ("vm-demo-" + uuid.uuid4().hex[:12]) config["todo_prefix"] = legacy_prefix + "/todos/" if "next_id_key" not in config: config["next_id_key"] = config["todo_prefix"].rstrip("/") + "/next-id" write_json_atomic(CONFIG_PATH, config) return config def grpcurl_json(endpoint: str, proto_dir: str, proto_file: str, service: str, payload: dict, ignore_errors=()) -> dict: command = [ GRPCURL_BIN, "-plaintext", "-import-path", proto_dir, "-proto", proto_file, "-d", json.dumps(payload, sort_keys=True), endpoint, service, ] result = subprocess.run( command, capture_output=True, text=True, timeout=20, check=False, ) combined = (result.stdout or "") + "\n" + (result.stderr or "") if result.returncode != 0: lowered = combined.lower() for pattern in ignore_errors: if pattern.lower() in lowered: return {} raise RuntimeError("grpcurl %s failed: %s" % (service, sanitize(combined))) output = result.stdout.strip() return json.loads(output) if output else {} def http_json(method: str, url: str, payload=None, timeout: int = 10) -> dict: body = None headers = {} if payload is not None: body = json.dumps(payload, sort_keys=True).encode("utf-8") headers["Content-Type"] = "application/json" request = urllib.request.Request(url, data=body, headers=headers, method=method) with urllib.request.urlopen(request, timeout=timeout) as response: return json.loads(response.read().decode("utf-8")) def ensure_service_account(config: dict) -> None: grpcurl_json( IAM_ENDPOINT, IAM_PROTO_DIR, IAM_PROTO_FILE, "iam.v1.IamAdmin/CreatePrincipal", { "id": config["principal_id"], "kind": "PRINCIPAL_KIND_SERVICE_ACCOUNT", "name": config["principal_id"], "orgId": ORG_ID, "projectId": PROJECT_ID, }, ignore_errors=("already exists", "alreadyexists"), ) def ensure_s3_credentials(config: dict) -> None: if config.get("access_key_id") and config.get("secret_key"): return response = grpcurl_json( IAM_ENDPOINT, IAM_PROTO_DIR, IAM_PROTO_FILE, "iam.v1.IamCredential/CreateS3Credential", { "principalId": config["principal_id"], "principalKind": "PRINCIPAL_KIND_SERVICE_ACCOUNT", "orgId": ORG_ID, "projectId": PROJECT_ID, "description": "vm-demo", }, ) config["access_key_id"] = response["accessKeyId"] config["secret_key"] = response["secretKey"] write_json_atomic(CONFIG_PATH, config) def s3_client(config: dict): return boto3.session.Session().client( "s3", endpoint_url=LIGHTNINGSTOR_S3_ENDPOINT, region_name=AWS_REGION, aws_access_key_id=config["access_key_id"], aws_secret_access_key=config["secret_key"], use_ssl=False, verify=False, config=Config( retries={"max_attempts": 8, "mode": "standard"}, s3={"addressing_style": "path"}, signature_version="s3v4", ), ) def ensure_bucket(config: dict, client) -> None: try: client.head_bucket(Bucket=config["bucket"]) return except ClientError as error: code = str(error.response.get("Error", {}).get("Code", "")) if code not in ("404", "NoSuchBucket", "NotFound", "400", "403"): raise client.create_bucket(Bucket=config["bucket"]) def flaredb_scan(config: dict, start: str, end: str) -> list: query = urllib.parse.urlencode( { "start": start, "end": end, "namespace": config["state_namespace"], } ) payload = http_json("GET", FLAREDB_BASE_URL + "/api/v1/scan?" + query) return payload.get("data", {}).get("items", []) def flaredb_get_value(config: dict, key: str): items = flaredb_scan(config, key, key + "~") if not items: return None return items[0].get("value") def flaredb_put_value(config: dict, key: str, value: str) -> None: http_json( "PUT", FLAREDB_BASE_URL + "/api/v1/kv/" + urllib.parse.quote(key, safe=""), { "value": value, "namespace": config["state_namespace"], }, ) def flaredb_put_json(config: dict, key: str, payload: dict) -> None: flaredb_put_value(config, key, json.dumps(payload, sort_keys=True)) def flaredb_get_json(config: dict, key: str): value = flaredb_get_value(config, key) if value is None: return None return json.loads(value) def flaredb_next_todo_id(config: dict) -> int: value = flaredb_get_value(config, config["next_id_key"]) if value is None: return 1 return int(value) def todo_record_key(config: dict, todo_id: int) -> str: return config["todo_prefix"] + ("%06d" % todo_id) def todo_attachment_key(todo_id: int, filename: str) -> str: return ATTACHMENT_PREFIX + "/" + ("%06d" % todo_id) + "/" + safe_filename(filename) def todo_view(todo: dict) -> dict: attachment = todo.get("attachment") view = { "id": int(todo["id"]), "title": todo.get("title", ""), "details": todo.get("details", ""), "done": bool(todo.get("done", False)), "created_at": todo.get("created_at", ""), "updated_at": todo.get("updated_at", ""), "attachment": None, "attachment_url": "", } if isinstance(attachment, dict): view["attachment"] = { "filename": attachment.get("filename", ""), "content_type": attachment.get("content_type", "application/octet-stream"), "object_key": attachment.get("object_key", ""), "size": int(attachment.get("size", 0)), } view["attachment_url"] = "/api/todos/%s/attachment" % int(todo["id"]) return view def list_todos(config: dict) -> list: todos = [] for item in flaredb_scan(config, config["todo_prefix"], config["todo_prefix"] + "~"): try: payload = json.loads(item.get("value", "{}")) except json.JSONDecodeError: continue if isinstance(payload, dict) and "id" in payload: todos.append(payload) todos.sort(key=lambda todo: int(todo.get("id", 0))) return todos def snapshot_payload(config: dict, todos: list) -> dict: latest = todos[-1] if todos else None latest_attachment = latest.get("attachment") if isinstance(latest, dict) else None return { "bucket": config["bucket"], "hostname": socket.gethostname(), "latest_object_key": STATE_OBJECT_KEY, "latest_todo_id": int(latest["id"]) if latest else 0, "latest_todo_title": latest.get("title", "") if latest else "", "latest_attachment_object_key": latest_attachment.get("object_key", "") if isinstance(latest_attachment, dict) else "", "latest_attachment_filename": latest_attachment.get("filename", "") if isinstance(latest_attachment, dict) else "", "object_store_backend": "lightningstor", "object_store_endpoint": LIGHTNINGSTOR_S3_ENDPOINT, "root_boot_count": read_int(ROOT_BOOT_COUNT_PATH), "data_boot_count": read_int(DATA_BOOT_COUNT_PATH), "state_backend": "flaredb", "state_endpoint": FLAREDB_BASE_URL, "state_namespace": config["state_namespace"], "todo_prefix": config["todo_prefix"], "next_id_key": config["next_id_key"], "todo_count": len(todos), "attachment_count": sum(1 for todo in todos if isinstance(todo.get("attachment"), dict)), "todos": [todo_view(todo) for todo in todos], } def write_snapshot(config: dict, client, todos: list) -> dict: payload = snapshot_payload(config, todos) encoded = json.dumps(payload, sort_keys=True).encode("utf-8") client.put_object( Bucket=config["bucket"], Key=STATE_OBJECT_KEY, Body=encoded, ContentType="application/json", ) return payload def read_snapshot(config: dict, client) -> dict: response = client.get_object(Bucket=config["bucket"], Key=STATE_OBJECT_KEY) return json.loads(response["Body"].read().decode("utf-8")) def response_payload(config: dict, todos: list, snapshot: dict) -> dict: return { "status": "ok", "hostname": socket.gethostname(), "listen_port": LISTEN_PORT, "root_boot_count": read_int(ROOT_BOOT_COUNT_PATH), "data_boot_count": read_int(DATA_BOOT_COUNT_PATH), "state_backend": "flaredb", "state_endpoint": FLAREDB_BASE_URL, "state_namespace": config["state_namespace"], "todo_prefix": config["todo_prefix"], "next_id_key": config["next_id_key"], "object_store_backend": "lightningstor", "object_store_endpoint": LIGHTNINGSTOR_S3_ENDPOINT, "bucket": config["bucket"], "latest_object_key": snapshot["latest_object_key"], "latest_todo_id": int(snapshot["latest_todo_id"]), "latest_todo_title": snapshot["latest_todo_title"], "latest_attachment_object_key": snapshot["latest_attachment_object_key"], "latest_attachment_filename": snapshot["latest_attachment_filename"], "latest_object_todo_count": int(snapshot["todo_count"]), "todo_count": len(todos), "attachment_count": sum(1 for todo in todos if isinstance(todo.get("attachment"), dict)), "todos": [todo_view(todo) for todo in todos], } def bootstrap() -> None: deadline = time.time() + 120 while True: try: config = load_or_create_config() ensure_service_account(config) ensure_s3_credentials(config) client = s3_client(config) ensure_bucket(config, client) if flaredb_get_value(config, config["next_id_key"]) is None: flaredb_put_value(config, config["next_id_key"], "1") todos = list_todos(config) write_snapshot(config, client, todos) with BACKEND_LOCK: BACKEND["ready"] = True BACKEND["config"] = config BACKEND["s3_client"] = client BACKEND["last_error"] = "" return except Exception as error: message = sanitize(str(error)) with BACKEND_LOCK: BACKEND["last_error"] = message if time.time() >= deadline: raise RuntimeError(message) log_console("PHOTON_VM_DEMO_BOOTSTRAP_RETRY detail=%s" % message) time.sleep(2) def get_backend(): with BACKEND_LOCK: if not BACKEND["ready"]: raise RuntimeError(BACKEND["last_error"] or "backend not ready") return BACKEND["config"], BACKEND["s3_client"] def current_state() -> dict: config, client = get_backend() todos = list_todos(config) snapshot = write_snapshot(config, client, todos) snapshot = read_snapshot(config, client) return response_payload(config, todos, snapshot) def create_todo(title: str, details: str, attachment_name: str, attachment_file, attachment_size: int, attachment_content_type: str) -> dict: config, client = get_backend() try: with MUTATION_LOCK: todo_id = flaredb_next_todo_id(config) created_at = now_iso() attachment = None safe_name = safe_filename(attachment_name) if attachment_file is not None and attachment_size > 0: object_key = todo_attachment_key(todo_id, safe_name) attachment_file.file_object.seek(0) client.put_object( Bucket=config["bucket"], Key=object_key, Body=attachment_file.file_object, ContentLength=attachment_size, ContentType=attachment_content_type or "application/octet-stream", ) attachment = { "filename": safe_name, "content_type": attachment_content_type or "application/octet-stream", "object_key": object_key, "size": attachment_size, } todo = { "id": todo_id, "title": title.strip() or "Untitled task", "details": details.strip(), "done": False, "created_at": created_at, "updated_at": created_at, "attachment": attachment, } flaredb_put_json(config, todo_record_key(config, todo_id), todo) flaredb_put_value(config, config["next_id_key"], str(todo_id + 1)) todos = list_todos(config) snapshot = write_snapshot(config, client, todos) snapshot = read_snapshot(config, client) return response_payload(config, todos, snapshot) finally: if attachment_file is not None: try: attachment_file.close() except Exception: pass def toggle_todo(todo_id: int) -> dict: config, client = get_backend() with MUTATION_LOCK: todo = flaredb_get_json(config, todo_record_key(config, todo_id)) if not isinstance(todo, dict): raise KeyError("todo %s not found" % todo_id) todo["done"] = not bool(todo.get("done", False)) todo["updated_at"] = now_iso() flaredb_put_json(config, todo_record_key(config, todo_id), todo) todos = list_todos(config) snapshot = write_snapshot(config, client, todos) snapshot = read_snapshot(config, client) return response_payload(config, todos, snapshot) def attachment_response(todo_id: int): config, client = get_backend() todo = flaredb_get_json(config, todo_record_key(config, todo_id)) if not isinstance(todo, dict): raise KeyError("todo %s not found" % todo_id) attachment = todo.get("attachment") if not isinstance(attachment, dict): raise FileNotFoundError("todo %s has no attachment" % todo_id) response = client.get_object(Bucket=config["bucket"], Key=attachment["object_key"]) return { "filename": attachment.get("filename", "attachment.bin"), "content_type": attachment.get("content_type", "application/octet-stream"), "body": response["Body"].read(), } class Handler(BaseHTTPRequestHandler): server_version = "PhotonVMDemo/2.0" def log_message(self, format: str, *args) -> None: return def _send_bytes(self, body: bytes, status: int, content_type: str, extra_headers=None) -> None: self.send_response(status) self.send_header("Content-Type", content_type) self.send_header("Content-Length", str(len(body))) if extra_headers: for key, value in extra_headers.items(): self.send_header(key, value) self.end_headers() self.wfile.write(body) def _send_json(self, payload: dict, status: int = HTTPStatus.OK) -> None: body = json.dumps(payload, sort_keys=True).encode("utf-8") self._send_bytes(body, status, "application/json") def _send_html(self, body: str) -> None: self._send_bytes(body.encode("utf-8"), HTTPStatus.OK, "text/html; charset=utf-8") def _parsed_path(self): return urllib.parse.urlparse(self.path).path def _todo_id_from_path(self, path: str, suffix: str): prefix = "/api/todos/" if not path.startswith(prefix) or not path.endswith(suffix): return None value = path[len(prefix):len(path) - len(suffix)] if not value.isdigit(): return None return int(value) def _parse_todo_submission(self): content_type = self.headers.get("Content-Type", "") content_length = int(self.headers.get("Content-Length", "0") or "0") if content_type.startswith("application/json"): raw = self.rfile.read(content_length) payload = json.loads(raw.decode("utf-8") or "{}") return { "title": str(payload.get("title", "")), "details": str(payload.get("details", "")), "attachment_name": "", "attachment_file": None, "attachment_size": 0, "attachment_content_type": "", } if content_type.startswith("multipart/form-data"): os.makedirs(UPLOAD_TMP_DIR, exist_ok=True) fields = {} files = {} headers = { "Content-Type": content_type, "Content-Length": str(content_length), } def on_field(field) -> None: field_name = (field.field_name or b"").decode("utf-8", errors="replace") fields[field_name] = (field.value or b"").decode("utf-8", errors="replace") def on_file(file_obj) -> None: field_name = (file_obj.field_name or b"").decode("utf-8", errors="replace") files[field_name] = file_obj try: parser = create_form_parser( headers, on_field, on_file, config={ "UPLOAD_DIR": UPLOAD_TMP_DIR, "UPLOAD_DELETE_TMP": True, "MAX_MEMORY_FILE_SIZE": MAX_IN_MEMORY_UPLOAD, }, ) bytes_read = 0 while bytes_read < content_length: chunk = self.rfile.read(min(262144, content_length - bytes_read)) if not chunk: break parser.write(chunk) bytes_read += len(chunk) parser.finalize() except Exception: for file_obj in files.values(): try: file_obj.close() except Exception: pass raise title = fields.get("title", "") details = fields.get("details", "") attachment_file = files.get("attachment") attachment_name = "" attachment_size = 0 attachment_content_type = "" if attachment_file is not None and int(attachment_file.size or 0) > 0: attachment_name = (attachment_file.file_name or b"").decode("utf-8", errors="replace") attachment_size = int(attachment_file.size or 0) attachment_content_type = mimetypes.guess_type(attachment_name)[0] or "application/octet-stream" return { "title": title, "details": details, "attachment_name": attachment_name, "attachment_file": attachment_file, "attachment_size": attachment_size, "attachment_content_type": attachment_content_type, } else: raw = self.rfile.read(content_length) fields = urllib.parse.parse_qs(raw.decode("utf-8", errors="replace"), keep_blank_values=True) title = fields.get("title", [""])[0] details = fields.get("details", [""])[0] return { "title": title, "details": details, "attachment_name": "", "attachment_file": None, "attachment_size": 0, "attachment_content_type": "", } def do_GET(self) -> None: path = self._parsed_path() if path == "/": self._send_html(UI_HTML) return if path == "/health": with BACKEND_LOCK: if BACKEND["ready"]: self._send_json({"status": "ok"}) else: self._send_json( {"status": "starting", "detail": BACKEND["last_error"]}, HTTPStatus.SERVICE_UNAVAILABLE, ) return if path in ("/state", "/api/todos"): try: self._send_json(current_state()) except Exception as error: self._send_json( {"error": "backend_error", "detail": sanitize(str(error))}, HTTPStatus.SERVICE_UNAVAILABLE, ) return attachment_todo_id = self._todo_id_from_path(path, "/attachment") if attachment_todo_id is not None: try: payload = attachment_response(attachment_todo_id) self._send_bytes( payload["body"], HTTPStatus.OK, payload["content_type"], { "Content-Disposition": 'inline; filename="%s"' % payload["filename"], }, ) except FileNotFoundError: self._send_json({"error": "not_found"}, HTTPStatus.NOT_FOUND) except KeyError: self._send_json({"error": "not_found"}, HTTPStatus.NOT_FOUND) except Exception as error: self._send_json( {"error": "backend_error", "detail": sanitize(str(error))}, HTTPStatus.SERVICE_UNAVAILABLE, ) return self._send_json({"error": "not_found"}, HTTPStatus.NOT_FOUND) def do_POST(self) -> None: path = self._parsed_path() if path == "/api/todos": try: submission = self._parse_todo_submission() payload = create_todo( submission["title"], submission["details"], submission["attachment_name"], submission["attachment_file"], submission["attachment_size"], submission["attachment_content_type"], ) log_console( "PHOTON_VM_DEMO_TODO_CREATED count=%s title=%s attachment=%s" % ( payload["todo_count"], sanitize(payload["latest_todo_title"]), payload["latest_attachment_filename"] or "none", ) ) self._send_json(payload, HTTPStatus.CREATED) except Exception as error: self._send_json( {"error": "backend_error", "detail": sanitize(str(error))}, HTTPStatus.SERVICE_UNAVAILABLE, ) return toggle_todo_id = self._todo_id_from_path(path, "/toggle") if toggle_todo_id is not None: try: payload = toggle_todo(toggle_todo_id) log_console( "PHOTON_VM_DEMO_TODO_TOGGLED id=%s latest=%s" % (toggle_todo_id, sanitize(payload["latest_todo_title"])) ) self._send_json(payload) except KeyError: self._send_json({"error": "not_found"}, HTTPStatus.NOT_FOUND) except Exception as error: self._send_json( {"error": "backend_error", "detail": sanitize(str(error))}, HTTPStatus.SERVICE_UNAVAILABLE, ) return self._send_json({"error": "not_found"}, HTTPStatus.NOT_FOUND) def main() -> None: os.makedirs(DATA_MOUNT, exist_ok=True) bootstrap() with BACKEND_LOCK: config = BACKEND["config"] server = ThreadingHTTPServer((LISTEN_HOST, LISTEN_PORT), Handler) log_console( "PHOTON_VM_DEMO_WEB_READY count=%s port=%s app=todo bucket=%s namespace=%s" % ( read_int(ROOT_BOOT_COUNT_PATH), LISTEN_PORT, config["bucket"], config["state_namespace"], ) ) server.serve_forever() if __name__ == "__main__": try: main() except Exception: log_console("PHOTON_VM_DEMO_FATAL") for line in traceback.format_exc().splitlines(): log_console(sanitize(line)) raise ''; in { imports = [ (modulesPath + "/virtualisation/disk-image.nix") (modulesPath + "/profiles/qemu-guest.nix") ]; image = { baseName = "photon-vm-smoke"; format = "qcow2"; efiSupport = false; }; virtualisation.diskSize = 4096; boot.kernelParams = [ "console=ttyS0" "console=tty0" ]; networking.hostName = "photon-vm-smoke"; networking.useDHCP = lib.mkDefault true; networking.firewall.enable = false; services.getty.autologinUser = "root"; users.mutableUsers = false; users.users.root.hashedPassword = "$6$ultracloud$aUJCEE5wm/b5O.9KIKGm84qUWdWXwnebsFEiMBF7u9Y7AOWodaMrjbbKGMOf0X59VJyJeMRsgbT7VWeqMHpUe."; documentation.enable = false; services.openssh.enable = false; environment.systemPackages = [ pkgs.e2fsprogs pkgs.util-linux ]; systemd.services.photon-vm-smoke = { description = "UltraCloud VM smoke marker"; wantedBy = [ "multi-user.target" ]; wants = [ "systemd-udev-settle.service" ]; after = [ "local-fs.target" "systemd-udev-settle.service" ]; path = with pkgs; [ bash coreutils e2fsprogs gawk gnugrep gnused util-linux ]; serviceConfig = { Type = "simple"; Restart = "always"; RestartSec = "1"; }; script = '' mkdir -p /var/lib/photon-vm-smoke count_file=/var/lib/photon-vm-smoke/boot-count if [ -f "$count_file" ]; then count=$(( $(cat "$count_file") + 1 )) else count=1 fi echo "$count" > "$count_file" echo "PHOTON_VM_SMOKE_READY count=$count" >/dev/ttyS0 root_source="$(lsblk -nrpo NAME,MOUNTPOINT | awk '$2 == "/" { print $1; exit }' 2>/dev/null || true)" root_disk="" if [ -n "$root_source" ] && [ -b "$root_source" ]; then root_disk="$(lsblk -ndo PKNAME "$root_source" 2>/dev/null || true)" if [ -z "$root_disk" ]; then root_disk="$(basename "$root_source")" else root_disk="/dev/$root_disk" fi fi echo "PHOTON_VM_SMOKE_DATA_ROOT count=$count source=''${root_source:-none} root=''${root_disk:-unknown}" >/dev/ttyS0 data_disk="" if [ -b /dev/disk/by-label/photon-vm-data ]; then data_disk="$(readlink -f /dev/disk/by-label/photon-vm-data)" fi pick_data_disk() { while IFS= read -r disk; do [ -n "$disk" ] || continue if [ -n "$root_source" ] && [ "$disk" = "$root_source" ]; then continue fi if [ -n "$root_disk" ] && [ "$disk" = "$root_disk" ]; then continue fi if lsblk -nrpo MOUNTPOINT "$disk" 2>/dev/null | grep -qx '/'; then continue fi printf '%s\n' "$disk" return 0 done < <(lsblk -dnpr -o NAME,TYPE,RO | awk '$2 == "disk" && $3 == "0" { print $1 }') return 1 } deadline=$((SECONDS + 60)) scan_attempt=0 while [ -z "$data_disk" ] && [ "$SECONDS" -lt "$deadline" ]; do scan_attempt=$((scan_attempt + 1)) data_disk="$(pick_data_disk || true)" echo "PHOTON_VM_SMOKE_DATA_SCAN count=$count attempt=$scan_attempt data=''${data_disk:-none}" >/dev/ttyS0 [ -n "$data_disk" ] && break udevadm settle >/dev/null 2>&1 || true sleep 1 done if [ -z "$data_disk" ]; then echo "PHOTON_VM_SMOKE_DATA_MISSING count=$count" >/dev/ttyS0 lsblk -dn -o NAME,TYPE,SIZE >/dev/ttyS0 2>&1 || true exit 1 fi echo "PHOTON_VM_SMOKE_DATA_PROBE count=$count root=''${root_disk:-unknown} data=$(basename "$data_disk")" >/dev/ttyS0 mkdir -p /mnt/photon-vm-data if ! blkid "$data_disk" >/dev/null 2>&1; then mkfs_output="$(mkfs.ext4 -L photon-vm-data -F "$data_disk" 2>&1)" || { mkfs_output="$(printf '%s' "$mkfs_output" | tr '\r\n' ' ' | sed 's/ */ /g')" echo "PHOTON_VM_SMOKE_DATA_ERROR count=$count step=mkfs device=$(basename "$data_disk") detail=''${mkfs_output}" >/dev/ttyS0 lsblk -dn -o NAME,TYPE,RO,SIZE >/dev/ttyS0 2>&1 || true blockdev --getsize64 "$data_disk" >/dev/ttyS0 2>&1 || true exit 1 } fi if ! mountpoint -q /mnt/photon-vm-data; then if ! mount "$data_disk" /mnt/photon-vm-data; then echo "PHOTON_VM_SMOKE_DATA_ERROR count=$count step=mount device=$(basename "$data_disk")" >/dev/ttyS0 lsblk -f >/dev/ttyS0 2>&1 || true exit 1 fi fi data_count_file=/mnt/photon-vm-data/boot-count if [ -f "$data_count_file" ]; then data_count=$(( $(cat "$data_count_file") + 1 )) else data_count=1 fi echo "$data_count" > "$data_count_file" sync echo "PHOTON_VM_SMOKE_DATA_READY count=$data_count device=$(basename "$data_disk")" >/dev/ttyS0 while true; do echo "PHOTON_VM_SMOKE_HEARTBEAT count=$count ts=$(date +%s)" >/dev/ttyS0 sleep 2 done ''; }; systemd.services.photon-vm-demo-api = { description = "UltraCloud VM demo web app"; wantedBy = [ "multi-user.target" ]; wants = [ "network-online.target" "photon-vm-smoke.service" ]; after = [ "network-online.target" "photon-vm-smoke.service" ]; path = with pkgs; [ bash coreutils util-linux ]; serviceConfig = { Type = "simple"; Restart = "always"; RestartSec = "1"; }; script = '' deadline=$((SECONDS + 60)) while ! mountpoint -q /mnt/photon-vm-data; do if [ "$SECONDS" -ge "$deadline" ]; then echo "PHOTON_VM_DEMO_WEB_ERROR step=mount-timeout" >/dev/ttyS0 exit 1 fi sleep 1 done exec ${pythonWithBoto3}/bin/python3 ${photonVmDemoApi} >>/dev/ttyS0 2>&1 ''; }; system.stateVersion = "24.05"; }