Initial commit

This commit is contained in:
Soma Nakamura 2026-02-13 17:08:29 +09:00
commit b4c72b4a11
Signed by: centra
GPG key ID: 0C09689D20B25ACA
14 changed files with 5399 additions and 0 deletions

3
.gitignore vendored Normal file
View file

@ -0,0 +1,3 @@
target/
state.json
*.log

2538
Cargo.lock generated Normal file

File diff suppressed because it is too large Load diff

22
Cargo.toml Normal file
View file

@ -0,0 +1,22 @@
[package]
name = "lightscale-server"
version = "0.1.0"
edition = "2021"
[dependencies]
anyhow = "1"
axum = { version = "0.7", features = ["json"] }
base64 = "0.22"
blake3 = "1"
clap = { version = "4", features = ["derive", "env"] }
ipnet = "2"
rand = "0.8"
serde = { version = "1", features = ["derive"] }
serde_json = "1"
sqlx = { version = "0.8", features = ["json", "postgres", "runtime-tokio-rustls"] }
thiserror = "1"
time = { version = "0.3", features = ["serde", "formatting"] }
tokio = { version = "1", features = ["fs", "io-util", "macros", "rt-multi-thread"] }
tracing = "0.1"
tracing-subscriber = { version = "0.3", features = ["fmt", "env-filter"] }
uuid = { version = "1", features = ["serde", "v4"] }

139
README.md Normal file
View file

@ -0,0 +1,139 @@
# lightscale-server
Minimal control-plane server for Lightscale. This version focuses on network, node, and token
management and returns netmap data to clients. It does not implement the data plane (WireGuard,
TURN) yet.
## Run
```sh
cargo run -- --listen 0.0.0.0:8080 --state ./state.json
```
To protect admin endpoints, set an admin token (also supports `LIGHTSCALE_ADMIN_TOKEN`):
```sh
cargo run -- --listen 0.0.0.0:8080 --state ./state.json --admin-token <token>
```
Use a shared Postgres/CockroachDB backend for multi-server control plane:
```sh
cargo run -- --listen 0.0.0.0:8080 --db-url postgres://lightscale@127.0.0.1/lightscale?sslmode=disable
```
Optional relay config (control-plane only for now):
```sh
cargo run -- --listen 0.0.0.0:8080 --state ./state.json \
--stun stun1.example.com:3478,stun2.example.com:3478 \
--turn turn.example.com:3478 \
--stream-relay relay.example.com:443 \
--udp-relay relay.example.com:3478 \
--udp-relay-listen 0.0.0.0:3478 \
--stream-relay-listen 0.0.0.0:443
```
These values are surfaced in the netmap for clients. A minimal UDP relay is available when
`--udp-relay-listen` is set, and a minimal stream relay is available with
`--stream-relay-listen`. TURN is still unimplemented.
IPv6-only control plane is supported by binding to an IPv6 address and using IPv6 control URLs
from clients, for example:
```sh
cargo run -- --listen [::]:8080 --db-url postgres://lightscale@127.0.0.1/lightscale?sslmode=disable
```
## API quickstart
Create a network:
```sh
curl -X POST http://127.0.0.1:8080/v1/networks \
-H 'authorization: Bearer <admin_token>' \
-H 'content-type: application/json' \
-d '{"name":"lab","requires_approval":true,"bootstrap_token_ttl_seconds":3600,"bootstrap_token_uses":1,"bootstrap_token_tags":["dev"]}'
```
Create an enrollment token later:
```sh
curl -X POST http://127.0.0.1:8080/v1/networks/<network_id>/tokens \
-H 'authorization: Bearer <admin_token>' \
-H 'content-type: application/json' \
-d '{"ttl_seconds":3600,"uses":1,"tags":[]}'
```
Revoke an enrollment token:
```sh
curl -X POST http://127.0.0.1:8080/v1/tokens/<token>/revoke \
-H 'authorization: Bearer <admin_token>'
```
Register a node:
```sh
curl -X POST http://127.0.0.1:8080/v1/register \
-H 'content-type: application/json' \
-d '{"token":"<token>","node_name":"laptop","machine_public_key":"...","wg_public_key":"..."}'
```
Register a node using an auth URL flow:
```sh
curl -X POST http://127.0.0.1:8080/v1/register-url \
-H 'content-type: application/json' \
-d '{"network_id":"<network_id>","node_name":"laptop","machine_public_key":"...","wg_public_key":"..."}'
```
Then open the returned `auth_path` on the server to approve:
```sh
curl http://127.0.0.1:8080/v1/register/approve/<node_id>/<secret>
```
Manual approval endpoint (for admins):
```sh
curl -X POST http://127.0.0.1:8080/v1/admin/nodes/<node_id>/approve \
-H 'authorization: Bearer <admin_token>'
```
List nodes in a network (admin):
```sh
curl http://127.0.0.1:8080/v1/admin/networks/<network_id>/nodes \
-H 'authorization: Bearer <admin_token>'
```
Update a node's name or tags (admin):
```sh
curl -X PUT http://127.0.0.1:8080/v1/admin/nodes/<node_id> \
-H 'authorization: Bearer <admin_token>' \
-H 'content-type: application/json' \
-d '{"name":"laptop","tags":["dev","lab"]}'
```
Heartbeat and update endpoints/routes (optional listen_port lets the server add the
observed public IP as an endpoint):
```sh
curl -X POST http://127.0.0.1:8080/v1/heartbeat \
-H 'content-type: application/json' \
-d '{"node_id":"<node_id>","endpoints":["203.0.113.1:51820"],"listen_port":51820,"routes":[]}'
```
Fetch netmap:
```sh
curl http://127.0.0.1:8080/v1/netmap/<node_id>
```
Long-poll for netmap updates:
```sh
curl "http://127.0.0.1:8080/v1/netmap/<node_id>/longpoll?since=0&timeout_seconds=30"
```

61
flake.lock generated Normal file
View file

@ -0,0 +1,61 @@
{
"nodes": {
"flake-utils": {
"inputs": {
"systems": "systems"
},
"locked": {
"lastModified": 1731533236,
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1768564909,
"narHash": "sha256-Kell/SpJYVkHWMvnhqJz/8DqQg2b6PguxVWOuadbHCc=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "e4bae1bd10c9c57b2cf517953ab70060a828ee6f",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"flake-utils": "flake-utils",
"nixpkgs": "nixpkgs"
}
},
"systems": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
}
},
"root": "root",
"version": 7
}

23
flake.nix Normal file
View file

@ -0,0 +1,23 @@
{
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
flake-utils.url = "github:numtide/flake-utils";
};
outputs = { self, nixpkgs, flake-utils }:
flake-utils.lib.eachDefaultSystem (system:
let
pkgs = import nixpkgs { inherit system; };
in
{
devShells.default = pkgs.mkShell {
buildInputs = [
pkgs.rustc
pkgs.cargo
pkgs.rustfmt
pkgs.clippy
pkgs.rust-analyzer
];
};
});
}

1402
src/api.rs Normal file

File diff suppressed because it is too large Load diff

9
src/app.rs Normal file
View file

@ -0,0 +1,9 @@
use crate::model::RelayConfig;
use crate::state::StateStore;
#[derive(Clone)]
pub struct AppState {
pub store: StateStore,
pub relay: RelayConfig,
pub admin_token: Option<String>,
}

143
src/main.rs Normal file
View file

@ -0,0 +1,143 @@
mod api;
mod app;
mod model;
mod netid;
mod stream_relay;
mod udp_relay;
mod state;
use crate::api::{
admin_nodes, approve_node, approve_node_secret, audit_log, create_network, create_token,
get_acl, get_key_policy, heartbeat, netmap, netmap_longpoll, node_keys, register,
register_url, revoke_node, revoke_token, rotate_keys, update_acl, update_key_policy,
update_node,
};
use crate::app::AppState;
use crate::model::RelayConfig;
use axum::routing::{get, post, put};
use axum::Router;
use clap::Parser;
use state::StateStore;
use std::net::SocketAddr;
use std::path::PathBuf;
use tracing_subscriber::EnvFilter;
#[derive(Parser, Debug)]
#[command(name = "lightscale-server")]
struct Args {
#[arg(long, default_value = "0.0.0.0:8080")]
listen: String,
#[arg(long, default_value = "state.json")]
state: PathBuf,
#[arg(long)]
db_url: Option<String>,
#[arg(long, env = "LIGHTSCALE_ADMIN_TOKEN")]
admin_token: Option<String>,
#[arg(long, value_delimiter = ',')]
stun: Vec<String>,
#[arg(long, value_delimiter = ',')]
turn: Vec<String>,
#[arg(long, value_delimiter = ',')]
stream_relay: Vec<String>,
#[arg(long, value_delimiter = ',')]
udp_relay: Vec<String>,
#[arg(long)]
udp_relay_listen: Option<String>,
#[arg(long)]
stream_relay_listen: Option<String>,
}
#[tokio::main]
async fn main() -> anyhow::Result<()> {
tracing_subscriber::fmt()
.with_env_filter(EnvFilter::from_default_env())
.init();
let args = Args::parse();
let store = if let Some(db_url) = args.db_url.as_deref() {
StateStore::load_db(db_url).await?
} else {
StateStore::load(Some(args.state)).await?
};
let relay = RelayConfig {
stun_servers: args.stun,
turn_servers: args.turn,
stream_relay_servers: args.stream_relay,
udp_relay_servers: args.udp_relay,
};
if args.admin_token.is_none() {
tracing::warn!("admin token not set; admin endpoints are unsecured");
}
let app_state = AppState {
store,
relay,
admin_token: args.admin_token,
};
let app = Router::new()
.route("/healthz", get(healthz))
.route("/v1/networks", post(create_network))
.route("/v1/networks/:network_id/tokens", post(create_token))
.route(
"/v1/networks/:network_id/acl",
get(get_acl).put(update_acl),
)
.route(
"/v1/networks/:network_id/key-policy",
get(get_key_policy).put(update_key_policy),
)
.route("/v1/tokens/:token_id/revoke", post(revoke_token))
.route("/v1/register", post(register))
.route("/v1/register-url", post(register_url))
.route(
"/v1/register/approve/:node_id/:secret",
get(approve_node_secret),
)
.route("/v1/admin/nodes/:node_id/approve", post(approve_node))
.route("/v1/nodes/:node_id/rotate-keys", post(rotate_keys))
.route("/v1/nodes/:node_id/revoke", post(revoke_node))
.route("/v1/nodes/:node_id/keys", get(node_keys))
.route(
"/v1/admin/networks/:network_id/nodes",
get(admin_nodes),
)
.route("/v1/admin/nodes/:node_id", put(update_node))
.route("/v1/audit", get(audit_log))
.route("/v1/heartbeat", post(heartbeat))
.route("/v1/netmap/:node_id", get(netmap))
.route("/v1/netmap/:node_id/longpoll", get(netmap_longpoll))
.layer(axum::Extension(app_state));
let addr: SocketAddr = args.listen.parse()?;
tracing::info!("listening on {}", addr);
let listener = tokio::net::TcpListener::bind(addr).await?;
if let Some(listen) = args.udp_relay_listen {
let udp_addr: SocketAddr = listen.parse()?;
tokio::spawn(async move {
if let Err(err) = udp_relay::run(udp_addr).await {
tracing::error!("udp relay error: {}", err);
}
});
tracing::info!("udp relay listening on {}", udp_addr);
}
if let Some(listen) = args.stream_relay_listen {
let stream_addr: SocketAddr = listen.parse()?;
tokio::spawn(async move {
if let Err(err) = stream_relay::run(stream_addr).await {
tracing::error!("stream relay error: {}", err);
}
});
tracing::info!("stream relay listening on {}", stream_addr);
}
axum::serve(listener, app.into_make_service_with_connect_info::<SocketAddr>()).await?;
Ok(())
}
async fn healthz() -> &'static str {
"ok"
}

453
src/model.rs Normal file
View file

@ -0,0 +1,453 @@
use serde::{Deserialize, Serialize};
#[derive(Clone, Serialize, Deserialize)]
pub struct NetworkState {
pub id: String,
pub name: String,
pub overlay_v4: String,
pub overlay_v6: String,
pub dns_domain: String,
#[serde(default)]
pub requires_approval: bool,
#[serde(default)]
pub acl: AclPolicy,
#[serde(default)]
pub key_policy: KeyRotationPolicy,
pub created_at: i64,
pub next_ipv4: u32,
pub next_ipv6: u128,
}
#[derive(Clone, Serialize, Deserialize)]
pub struct NodeState {
pub id: String,
pub network_id: String,
pub name: String,
pub machine_public_key: String,
pub wg_public_key: String,
pub ipv4: String,
pub ipv6: String,
pub endpoints: Vec<String>,
pub tags: Vec<String>,
pub routes: Vec<Route>,
#[serde(default)]
pub created_at: i64,
pub last_seen: i64,
#[serde(default)]
pub probe_requested_at: Option<i64>,
#[serde(default = "default_true")]
pub approved: bool,
#[serde(default)]
pub approved_at: Option<i64>,
#[serde(default)]
pub auth_secret: Option<String>,
#[serde(default)]
pub auth_expires_at: Option<i64>,
#[serde(default)]
pub node_token: Option<String>,
#[serde(default)]
pub revoked_at: Option<i64>,
#[serde(default)]
pub key_history: Vec<KeyRecord>,
}
#[derive(Clone, Serialize, Deserialize)]
pub struct TokenState {
pub token: String,
pub network_id: String,
pub expires_at: i64,
pub uses_left: u32,
pub tags: Vec<String>,
#[serde(default)]
pub revoked_at: Option<i64>,
}
#[derive(Clone, Serialize, Deserialize)]
pub struct Route {
pub prefix: String,
pub kind: RouteKind,
pub enabled: bool,
#[serde(default)]
pub mapped_prefix: Option<String>,
}
#[derive(Clone, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum RouteKind {
Subnet,
Exit,
}
#[derive(Clone, Serialize, Deserialize)]
pub struct NetworkInfo {
pub id: String,
pub name: String,
pub overlay_v4: String,
pub overlay_v6: String,
pub dns_domain: String,
pub requires_approval: bool,
#[serde(default)]
pub key_rotation_max_age_seconds: Option<u64>,
}
#[derive(Clone, Serialize, Deserialize)]
pub struct NodeInfo {
pub id: String,
pub name: String,
pub dns_name: String,
pub ipv4: String,
pub ipv6: String,
pub wg_public_key: String,
pub machine_public_key: String,
pub endpoints: Vec<String>,
pub tags: Vec<String>,
pub routes: Vec<Route>,
pub last_seen: i64,
pub approved: bool,
#[serde(default)]
pub key_rotation_required: bool,
#[serde(default)]
pub revoked: bool,
}
#[derive(Clone, Serialize, Deserialize)]
pub struct PeerInfo {
pub id: String,
pub name: String,
pub dns_name: String,
pub ipv4: String,
pub ipv6: String,
pub wg_public_key: String,
pub endpoints: Vec<String>,
pub tags: Vec<String>,
pub routes: Vec<Route>,
pub last_seen: i64,
}
#[derive(Clone, Serialize, Deserialize)]
pub struct NetMap {
pub network: NetworkInfo,
pub node: NodeInfo,
pub peers: Vec<PeerInfo>,
pub relay: Option<RelayConfig>,
#[serde(default)]
pub probe_requests: Vec<ProbeRequest>,
pub generated_at: i64,
#[serde(default)]
pub revision: u64,
}
#[derive(Clone, Serialize, Deserialize)]
pub struct ProbeRequest {
pub peer_id: String,
pub endpoints: Vec<String>,
pub ipv4: String,
pub ipv6: String,
pub requested_at: i64,
}
#[derive(Clone, Serialize, Deserialize, Default)]
pub struct AclPolicy {
#[serde(default)]
pub default_action: AclAction,
#[serde(default)]
pub rules: Vec<AclRule>,
}
#[derive(Clone, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum AclAction {
Allow,
Deny,
}
impl Default for AclAction {
fn default() -> Self {
Self::Allow
}
}
#[derive(Clone, Serialize, Deserialize, Default)]
pub struct AclSelector {
#[serde(default)]
pub any: bool,
#[serde(default)]
pub tags: Vec<String>,
#[serde(default)]
pub node_ids: Vec<String>,
#[serde(default)]
pub names: Vec<String>,
}
#[derive(Clone, Serialize, Deserialize)]
pub struct AclRule {
pub action: AclAction,
#[serde(default)]
pub src: AclSelector,
#[serde(default)]
pub dst: AclSelector,
}
#[derive(Clone, Serialize, Deserialize, Default)]
pub struct KeyRotationPolicy {
#[serde(default)]
pub max_age_seconds: Option<u64>,
}
#[derive(Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]
#[serde(rename_all = "snake_case")]
pub enum KeyType {
Machine,
WireGuard,
}
#[derive(Clone, Serialize, Deserialize)]
pub struct KeyRecord {
pub key_type: KeyType,
pub public_key: String,
pub created_at: i64,
#[serde(default)]
pub revoked_at: Option<i64>,
}
#[derive(Clone, Serialize, Deserialize, Default)]
pub struct RelayConfig {
pub stun_servers: Vec<String>,
pub turn_servers: Vec<String>,
pub stream_relay_servers: Vec<String>,
#[serde(default)]
pub udp_relay_servers: Vec<String>,
}
#[derive(Clone, Serialize, Deserialize)]
pub struct EnrollmentToken {
pub token: String,
pub expires_at: i64,
pub uses_left: u32,
pub tags: Vec<String>,
pub revoked_at: Option<i64>,
}
#[derive(Clone, Serialize, Deserialize)]
pub struct CreateNetworkRequest {
pub name: String,
pub dns_domain: Option<String>,
pub requires_approval: Option<bool>,
pub key_rotation_max_age_seconds: Option<u64>,
pub bootstrap_token_ttl_seconds: Option<u64>,
pub bootstrap_token_uses: Option<u32>,
pub bootstrap_token_tags: Option<Vec<String>>,
}
#[derive(Clone, Serialize, Deserialize)]
pub struct CreateNetworkResponse {
pub network: NetworkInfo,
pub bootstrap_token: Option<EnrollmentToken>,
}
#[derive(Clone, Serialize, Deserialize)]
pub struct CreateTokenRequest {
pub ttl_seconds: u64,
pub uses: u32,
pub tags: Vec<String>,
}
#[derive(Clone, Serialize, Deserialize)]
pub struct CreateTokenResponse {
pub token: EnrollmentToken,
}
#[derive(Clone, Serialize, Deserialize)]
pub struct AdminNodesResponse {
pub nodes: Vec<NodeInfo>,
}
#[derive(Clone, Serialize, Deserialize)]
pub struct UpdateAclRequest {
pub policy: AclPolicy,
}
#[derive(Clone, Serialize, Deserialize)]
pub struct UpdateAclResponse {
pub policy: AclPolicy,
}
#[derive(Clone, Serialize, Deserialize)]
pub struct UpdateNodeRequest {
pub name: Option<String>,
pub tags: Option<Vec<String>>,
}
#[derive(Clone, Serialize, Deserialize)]
pub struct UpdateNodeResponse {
pub node: NodeInfo,
}
#[derive(Clone, Serialize, Deserialize)]
pub struct KeyPolicyResponse {
pub policy: KeyRotationPolicy,
}
#[derive(Clone, Serialize, Deserialize)]
pub struct KeyRotationRequest {
pub machine_public_key: Option<String>,
pub wg_public_key: Option<String>,
}
#[derive(Clone, Serialize, Deserialize)]
pub struct KeyRotationResponse {
pub node_id: String,
pub machine_public_key: String,
pub wg_public_key: String,
}
#[derive(Clone, Serialize, Deserialize)]
pub struct KeyHistoryResponse {
pub node_id: String,
pub keys: Vec<KeyRecord>,
}
#[derive(Clone, Serialize, Deserialize)]
pub struct AuditEntry {
pub id: String,
pub timestamp: i64,
pub network_id: Option<String>,
pub node_id: Option<String>,
pub action: String,
#[serde(default)]
pub detail: Option<serde_json::Value>,
}
#[derive(Clone, Serialize, Deserialize)]
pub struct AuditLogResponse {
pub entries: Vec<AuditEntry>,
}
#[derive(Clone, Serialize, Deserialize)]
pub struct RegisterRequest {
pub token: String,
pub node_name: String,
pub machine_public_key: String,
pub wg_public_key: String,
}
#[derive(Clone, Serialize, Deserialize)]
pub struct RegisterResponse {
pub node_token: String,
pub netmap: NetMap,
}
#[derive(Clone, Serialize, Deserialize)]
pub struct RegisterUrlRequest {
pub network_id: String,
pub node_name: String,
pub machine_public_key: String,
pub wg_public_key: String,
pub ttl_seconds: Option<u64>,
}
#[derive(Clone, Serialize, Deserialize)]
pub struct RegisterUrlResponse {
pub node_id: String,
pub network_id: String,
pub ipv4: String,
pub ipv6: String,
pub auth_path: String,
pub expires_at: i64,
pub node_token: String,
}
#[derive(Clone, Serialize, Deserialize)]
pub struct HeartbeatRequest {
pub node_id: String,
pub endpoints: Vec<String>,
pub listen_port: Option<u16>,
pub routes: Vec<Route>,
#[serde(default)]
pub probe: Option<bool>,
}
#[derive(Clone, Serialize, Deserialize)]
pub struct HeartbeatResponse {
pub netmap: NetMap,
}
impl From<&NetworkState> for NetworkInfo {
fn from(state: &NetworkState) -> Self {
Self {
id: state.id.clone(),
name: state.name.clone(),
overlay_v4: state.overlay_v4.clone(),
overlay_v6: state.overlay_v6.clone(),
dns_domain: state.dns_domain.clone(),
requires_approval: state.requires_approval,
key_rotation_max_age_seconds: state.key_policy.max_age_seconds,
}
}
}
impl NodeInfo {
pub fn from_state(node: &NodeState, dns_domain: &str, approved: bool, key_rotation_required: bool) -> Self {
Self {
id: node.id.clone(),
name: node.name.clone(),
dns_name: format!("{}.{}", node.name, dns_domain),
ipv4: node.ipv4.clone(),
ipv6: node.ipv6.clone(),
wg_public_key: node.wg_public_key.clone(),
machine_public_key: node.machine_public_key.clone(),
endpoints: node.endpoints.clone(),
tags: node.tags.clone(),
routes: node.routes.clone(),
last_seen: node.last_seen,
approved,
key_rotation_required,
revoked: node.revoked_at.is_some(),
}
}
}
impl From<(&NodeState, &str)> for PeerInfo {
fn from((node, dns_domain): (&NodeState, &str)) -> Self {
Self {
id: node.id.clone(),
name: node.name.clone(),
dns_name: format!("{}.{}", node.name, dns_domain),
ipv4: node.ipv4.clone(),
ipv6: node.ipv6.clone(),
wg_public_key: node.wg_public_key.clone(),
endpoints: node.endpoints.clone(),
tags: node.tags.clone(),
routes: node.routes.clone(),
last_seen: node.last_seen,
}
}
}
impl From<TokenState> for EnrollmentToken {
fn from(token: TokenState) -> Self {
Self {
token: token.token,
expires_at: token.expires_at,
uses_left: token.uses_left,
tags: token.tags,
revoked_at: token.revoked_at,
}
}
}
impl From<&TokenState> for EnrollmentToken {
fn from(token: &TokenState) -> Self {
Self {
token: token.token.clone(),
expires_at: token.expires_at,
uses_left: token.uses_left,
tags: token.tags.clone(),
revoked_at: token.revoked_at,
}
}
}
fn default_true() -> bool {
true
}

31
src/netid.rs Normal file
View file

@ -0,0 +1,31 @@
use blake3::Hash;
use uuid::Uuid;
pub fn derive_overlay_prefixes(network_id: &Uuid) -> (String, String) {
let hash = blake3::hash(network_id.as_bytes());
let v6 = derive_ipv6_ula(&hash);
let v4 = derive_ipv4_overlay(&hash);
(v4, v6)
}
fn derive_ipv6_ula(hash: &Hash) -> String {
let bytes = hash.as_bytes();
let b0 = bytes[0];
let b1 = bytes[1];
let b2 = bytes[2];
let b3 = bytes[3];
let b4 = bytes[4];
format!(
"fd{:02x}:{:02x}{:02x}:{:02x}{:02x}::/48",
b0, b1, b2, b3, b4
)
}
fn derive_ipv4_overlay(hash: &Hash) -> String {
let bytes = hash.as_bytes();
let raw = u16::from_be_bytes([bytes[5], bytes[6]]);
let idx = raw & 0x3fff;
let second = 64 + ((idx >> 8) as u8);
let third = (idx & 0xff) as u8;
format!("100.{}.{}.0/24", second, third)
}

195
src/state.rs Normal file
View file

@ -0,0 +1,195 @@
use crate::model::{AuditEntry, NetworkState, NodeState, TokenState};
use anyhow::Result;
use serde::{Deserialize, Serialize};
use sqlx::postgres::PgPoolOptions;
use sqlx::types::Json;
use sqlx::{PgPool, Row};
use std::collections::HashMap;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use tokio::sync::RwLock;
#[derive(Clone, Serialize, Deserialize)]
pub struct State {
pub version: u32,
#[serde(default)]
pub revision: u64,
pub networks: HashMap<String, NetworkState>,
pub nodes: HashMap<String, NodeState>,
pub tokens: HashMap<String, TokenState>,
#[serde(default)]
pub audit_log: Vec<AuditEntry>,
}
impl Default for State {
fn default() -> Self {
Self {
version: 1,
revision: 0,
networks: HashMap::new(),
nodes: HashMap::new(),
tokens: HashMap::new(),
audit_log: Vec::new(),
}
}
}
#[derive(Clone)]
pub struct StateStore {
backend: StoreBackend,
}
#[derive(Clone)]
enum StoreBackend {
File(FileStore),
Db(DbStore),
}
#[derive(Clone)]
struct FileStore {
inner: Arc<RwLock<State>>,
path: Option<PathBuf>,
}
#[derive(Clone)]
struct DbStore {
pool: PgPool,
}
impl StateStore {
pub async fn load(path: Option<PathBuf>) -> Result<Self> {
let state = match &path {
Some(path) => load_state(path).await.unwrap_or_default(),
None => State::default(),
};
Ok(Self {
backend: StoreBackend::File(FileStore {
inner: Arc::new(RwLock::new(state)),
path,
}),
})
}
pub async fn load_db(db_url: &str) -> Result<Self> {
let pool = PgPoolOptions::new()
.max_connections(5)
.connect(db_url)
.await?;
init_db(&pool).await?;
Ok(Self {
backend: StoreBackend::Db(DbStore { pool }),
})
}
pub async fn read<F, R>(&self, f: F) -> Result<R>
where
F: FnOnce(&State) -> Result<R>,
{
match &self.backend {
StoreBackend::File(store) => {
let guard = store.inner.read().await;
f(&guard)
}
StoreBackend::Db(store) => {
let state = load_state_db(&store.pool).await?;
f(&state)
}
}
}
pub async fn write<F, R>(&self, f: F) -> Result<R>
where
F: FnOnce(&mut State) -> Result<R>,
{
match &self.backend {
StoreBackend::File(store) => {
let mut guard = store.inner.write().await;
let result = f(&mut guard)?;
guard.revision = guard.revision.saturating_add(1);
let snapshot = guard.clone();
drop(guard);
persist_file(store.path.as_deref(), snapshot).await?;
Ok(result)
}
StoreBackend::Db(store) => write_state_db(&store.pool, f).await,
}
}
}
async fn load_state(path: &Path) -> Result<State> {
match tokio::fs::read_to_string(path).await {
Ok(contents) => Ok(serde_json::from_str(&contents)?),
Err(_) => Ok(State::default()),
}
}
async fn persist_file(path: Option<&Path>, state: State) -> Result<()> {
let Some(path) = path else {
return Ok(());
};
if let Some(parent) = path.parent() {
if !parent.as_os_str().is_empty() {
tokio::fs::create_dir_all(parent).await?;
}
}
let json = serde_json::to_string_pretty(&state)?;
tokio::fs::write(path, json).await?;
Ok(())
}
async fn init_db(pool: &PgPool) -> Result<()> {
const INIT_LOCK_KEY: i64 = 0x4c53434c;
let mut tx = pool.begin().await?;
sqlx::query("SELECT pg_advisory_xact_lock($1)")
.bind(INIT_LOCK_KEY)
.execute(&mut *tx)
.await?;
sqlx::query(
"CREATE TABLE IF NOT EXISTS lightscale_state (id INT PRIMARY KEY, state JSONB NOT NULL)",
)
.execute(&mut *tx)
.await?;
let exists = sqlx::query("SELECT 1 FROM lightscale_state WHERE id = 1")
.fetch_optional(&mut *tx)
.await?;
if exists.is_none() {
let state = State::default();
sqlx::query("INSERT INTO lightscale_state (id, state) VALUES ($1, $2)")
.bind(1i32)
.bind(Json(&state))
.execute(&mut *tx)
.await?;
}
tx.commit().await?;
Ok(())
}
async fn load_state_db(pool: &PgPool) -> Result<State> {
let row = sqlx::query("SELECT state FROM lightscale_state WHERE id = 1")
.fetch_one(pool)
.await?;
let Json(state): Json<State> = row.try_get("state")?;
Ok(state)
}
async fn write_state_db<F, R>(pool: &PgPool, f: F) -> Result<R>
where
F: FnOnce(&mut State) -> Result<R>,
{
let mut tx = pool.begin().await?;
let row = sqlx::query("SELECT state FROM lightscale_state WHERE id = 1 FOR UPDATE")
.fetch_one(&mut *tx)
.await?;
let Json(mut state): Json<State> = row.try_get("state")?;
let result = f(&mut state)?;
state.revision = state.revision.saturating_add(1);
sqlx::query("UPDATE lightscale_state SET state = $1 WHERE id = 1")
.bind(Json(&state))
.execute(&mut *tx)
.await?;
tx.commit().await?;
Ok(result)
}

220
src/stream_relay.rs Normal file
View file

@ -0,0 +1,220 @@
use anyhow::{anyhow, Result};
use std::collections::HashMap;
use std::net::SocketAddr;
use std::sync::Arc;
use std::sync::atomic::{AtomicU64, Ordering};
use tokio::io::{AsyncReadExt, AsyncWriteExt};
use tokio::net::{TcpListener, TcpStream};
use tokio::sync::{mpsc, RwLock};
use tracing::warn;
const MAGIC: &[u8; 4] = b"LSR2";
const TYPE_REGISTER: u8 = 1;
const TYPE_SEND: u8 = 2;
const TYPE_DELIVER: u8 = 3;
const HEADER_LEN: usize = 8;
const MAX_ID_LEN: usize = 64;
const MAX_FRAME_LEN: usize = 64 * 1024;
static NEXT_CONN_ID: AtomicU64 = AtomicU64::new(1);
#[derive(Clone)]
struct PeerConn {
id: u64,
sender: mpsc::UnboundedSender<Vec<u8>>,
}
enum RelayPacket {
Register { node_id: String },
Send {
from_id: String,
to_id: String,
payload: Vec<u8>,
},
}
pub async fn run(listen: SocketAddr) -> Result<()> {
let listener = TcpListener::bind(listen)
.await
.map_err(|err| anyhow!("stream relay bind failed: {}", err))?;
let peers: Arc<RwLock<HashMap<String, Vec<PeerConn>>>> =
Arc::new(RwLock::new(HashMap::new()));
loop {
let (stream, _) = listener
.accept()
.await
.map_err(|err| anyhow!("stream relay accept failed: {}", err))?;
let peers = peers.clone();
tokio::spawn(async move {
if let Err(err) = handle_connection(stream, peers).await {
warn!("stream relay connection error: {}", err);
}
});
}
}
async fn handle_connection(
stream: TcpStream,
peers: Arc<RwLock<HashMap<String, Vec<PeerConn>>>>,
) -> Result<()> {
let (mut reader, mut writer) = stream.into_split();
let (tx, mut rx) = mpsc::unbounded_channel::<Vec<u8>>();
let conn_id = NEXT_CONN_ID.fetch_add(1, Ordering::Relaxed);
let writer_task = tokio::spawn(async move {
while let Some(frame) = rx.recv().await {
if let Err(err) = write_frame(&mut writer, &frame).await {
warn!("stream relay write failed: {}", err);
break;
}
}
});
let register = read_frame(&mut reader).await?;
let packet = parse_packet(&register).ok_or_else(|| anyhow!("invalid register frame"))?;
let node_id = match packet {
RelayPacket::Register { node_id } => node_id,
_ => return Err(anyhow!("expected register frame")),
};
{
let mut guard = peers.write().await;
guard
.entry(node_id.clone())
.or_default()
.push(PeerConn { id: conn_id, sender: tx });
}
loop {
let frame = match read_frame(&mut reader).await {
Ok(frame) => frame,
Err(_) => break,
};
let Some(packet) = parse_packet(&frame) else {
warn!("stream relay: invalid frame from {}", node_id);
continue;
};
match packet {
RelayPacket::Register { .. } => {
warn!("stream relay: unexpected register from {}", node_id);
}
RelayPacket::Send {
from_id,
to_id,
payload,
} => {
if from_id != node_id {
warn!("stream relay: spoofed from_id {} for {}", from_id, node_id);
continue;
}
let targets = peers.read().await.get(&to_id).cloned();
if let Some(targets) = targets {
let deliver = build_packet(TYPE_DELIVER, &from_id, "", &payload)?;
for target in targets {
let _ = target.sender.send(deliver.clone());
}
}
}
}
}
{
let mut guard = peers.write().await;
if let Some(list) = guard.get_mut(&node_id) {
list.retain(|conn| conn.id != conn_id);
if list.is_empty() {
guard.remove(&node_id);
}
}
}
writer_task.abort();
Ok(())
}
async fn read_frame(reader: &mut tokio::net::tcp::OwnedReadHalf) -> Result<Vec<u8>> {
let mut len_buf = [0u8; 4];
reader.read_exact(&mut len_buf).await?;
let len = u32::from_be_bytes(len_buf) as usize;
if len == 0 || len > MAX_FRAME_LEN {
return Err(anyhow!("invalid frame length {}", len));
}
let mut buf = vec![0u8; len];
reader.read_exact(&mut buf).await?;
Ok(buf)
}
async fn write_frame(
writer: &mut tokio::net::tcp::OwnedWriteHalf,
body: &[u8],
) -> Result<()> {
if body.is_empty() || body.len() > MAX_FRAME_LEN {
return Err(anyhow!("invalid frame length {}", body.len()));
}
let len = body.len() as u32;
writer.write_all(&len.to_be_bytes()).await?;
writer.write_all(body).await?;
Ok(())
}
fn parse_packet(buf: &[u8]) -> Option<RelayPacket> {
if buf.len() < HEADER_LEN {
return None;
}
if &buf[0..4] != MAGIC {
return None;
}
let msg_type = buf[4];
let from_len = buf[5] as usize;
let to_len = buf[6] as usize;
if from_len > MAX_ID_LEN || to_len > MAX_ID_LEN {
return None;
}
let offset = HEADER_LEN;
if buf.len() < offset + from_len + to_len {
return None;
}
let from_end = offset + from_len;
let to_end = from_end + to_len;
let from_id = std::str::from_utf8(&buf[offset..from_end]).ok()?.to_string();
let to_id = std::str::from_utf8(&buf[from_end..to_end]).ok()?.to_string();
let payload = buf[to_end..].to_vec();
match msg_type {
TYPE_REGISTER => {
if from_id.is_empty() || !to_id.is_empty() {
None
} else {
Some(RelayPacket::Register { node_id: from_id })
}
}
TYPE_SEND => {
if from_id.is_empty() || to_id.is_empty() {
None
} else {
Some(RelayPacket::Send {
from_id,
to_id,
payload,
})
}
}
_ => None,
}
}
fn build_packet(msg_type: u8, from_id: &str, to_id: &str, payload: &[u8]) -> Result<Vec<u8>> {
if from_id.len() > MAX_ID_LEN || to_id.len() > MAX_ID_LEN {
return Err(anyhow!("relay id too long"));
}
let mut buf = Vec::with_capacity(HEADER_LEN + from_id.len() + to_id.len() + payload.len());
buf.extend_from_slice(MAGIC);
buf.push(msg_type);
buf.push(from_id.len() as u8);
buf.push(to_id.len() as u8);
buf.push(0);
buf.extend_from_slice(from_id.as_bytes());
buf.extend_from_slice(to_id.as_bytes());
buf.extend_from_slice(payload);
Ok(buf)
}

160
src/udp_relay.rs Normal file
View file

@ -0,0 +1,160 @@
use anyhow::{anyhow, Result};
use std::collections::HashMap;
use std::net::SocketAddr;
use std::sync::Arc;
use tokio::net::UdpSocket;
use tokio::sync::RwLock;
use tokio::time::{sleep, Duration, Instant};
use tracing::warn;
const MAGIC: &[u8; 4] = b"LSR1";
const TYPE_REGISTER: u8 = 1;
const TYPE_SEND: u8 = 2;
const TYPE_DELIVER: u8 = 3;
const HEADER_LEN: usize = 8;
const MAX_ID_LEN: usize = 64;
const PEER_TTL: Duration = Duration::from_secs(300);
#[derive(Clone)]
struct Peer {
addr: SocketAddr,
last_seen: Instant,
}
enum RelayPacket {
Register { node_id: String },
Send {
from_id: String,
to_id: String,
payload: Vec<u8>,
},
}
pub async fn run(listen: SocketAddr) -> Result<()> {
let socket = UdpSocket::bind(listen)
.await
.map_err(|err| anyhow!("udp relay bind failed: {}", err))?;
let peers: Arc<RwLock<HashMap<String, Peer>>> = Arc::new(RwLock::new(HashMap::new()));
let cleanup_peers = peers.clone();
tokio::spawn(async move { cleanup_loop(cleanup_peers).await });
let mut buf = vec![0u8; 2048];
loop {
let (len, addr) = socket
.recv_from(&mut buf)
.await
.map_err(|err| anyhow!("udp relay recv failed: {}", err))?;
let packet = match parse_packet(&buf[..len]) {
Some(packet) => packet,
None => {
warn!("udp relay: invalid packet from {}", addr);
continue;
}
};
match packet {
RelayPacket::Register { node_id } => {
upsert_peer(&peers, node_id, addr).await;
}
RelayPacket::Send {
from_id,
to_id,
payload,
} => {
upsert_peer(&peers, from_id.clone(), addr).await;
let target = peers.read().await.get(&to_id).cloned();
if let Some(peer) = target {
let deliver = build_packet(TYPE_DELIVER, &from_id, "", &payload)?;
if let Err(err) = socket.send_to(&deliver, peer.addr).await {
warn!("udp relay send failed: {}", err);
}
} else {
warn!("udp relay: unknown target {}", to_id);
}
}
}
}
}
async fn upsert_peer(peers: &Arc<RwLock<HashMap<String, Peer>>>, node_id: String, addr: SocketAddr) {
let mut guard = peers.write().await;
guard.insert(
node_id,
Peer {
addr,
last_seen: Instant::now(),
},
);
}
async fn cleanup_loop(peers: Arc<RwLock<HashMap<String, Peer>>>) {
loop {
sleep(Duration::from_secs(60)).await;
let now = Instant::now();
let mut guard = peers.write().await;
guard.retain(|_, peer| now.duration_since(peer.last_seen) < PEER_TTL);
}
}
fn parse_packet(buf: &[u8]) -> Option<RelayPacket> {
if buf.len() < HEADER_LEN {
return None;
}
if &buf[0..4] != MAGIC {
return None;
}
let msg_type = buf[4];
let from_len = buf[5] as usize;
let to_len = buf[6] as usize;
if from_len > MAX_ID_LEN || to_len > MAX_ID_LEN {
return None;
}
let offset = HEADER_LEN;
if buf.len() < offset + from_len + to_len {
return None;
}
let from_end = offset + from_len;
let to_end = from_end + to_len;
let from_id = std::str::from_utf8(&buf[offset..from_end]).ok()?.to_string();
let to_id = std::str::from_utf8(&buf[from_end..to_end]).ok()?.to_string();
let payload = buf[to_end..].to_vec();
match msg_type {
TYPE_REGISTER => {
if from_id.is_empty() || !to_id.is_empty() {
None
} else {
Some(RelayPacket::Register { node_id: from_id })
}
}
TYPE_SEND => {
if from_id.is_empty() || to_id.is_empty() {
None
} else {
Some(RelayPacket::Send {
from_id,
to_id,
payload,
})
}
}
_ => None,
}
}
fn build_packet(msg_type: u8, from_id: &str, to_id: &str, payload: &[u8]) -> Result<Vec<u8>> {
if from_id.len() > MAX_ID_LEN || to_id.len() > MAX_ID_LEN {
return Err(anyhow!("relay id too long"));
}
let mut buf = Vec::with_capacity(HEADER_LEN + from_id.len() + to_id.len() + payload.len());
buf.extend_from_slice(MAGIC);
buf.push(msg_type);
buf.push(from_id.len() as u8);
buf.push(to_id.len() as u8);
buf.push(0);
buf.extend_from_slice(from_id.as_bytes());
buf.extend_from_slice(to_id.as_bytes());
buf.extend_from_slice(payload);
Ok(buf)
}