Compare commits

...

2 commits

48 changed files with 3759 additions and 676 deletions

2
.cargo/config.toml Normal file
View file

@ -0,0 +1,2 @@
[build]
target-dir = "work/cargo-target"

View file

@ -17,6 +17,7 @@ jobs:
any_changed: ${{ steps.detect.outputs.any_changed }}
build_changed: ${{ steps.detect.outputs.build_changed }}
global_changed: ${{ steps.detect.outputs.global_changed }}
shared_crates: ${{ steps.detect.outputs.shared_crates }}
shared_crates_changed: ${{ steps.detect.outputs.shared_crates_changed }}
steps:
- uses: actions/checkout@v4
@ -81,15 +82,19 @@ jobs:
needs: filter
if: ${{ needs.filter.outputs.shared_crates_changed == 'true' }}
runs-on: ubuntu-latest
name: gate (shared crates)
strategy:
fail-fast: false
matrix:
crate: ${{ fromJSON(needs.filter.outputs.shared_crates) }}
name: gate (shared crate: ${{ matrix.crate }})
steps:
- uses: actions/checkout@v4
- uses: DeterminateSystems/nix-installer-action@v11
- uses: DeterminateSystems/magic-nix-cache-action@v8
- name: Run Shared Crates Gate
- name: Run Shared Crate Gate
run: |
nix run ./nix/ci#gate-ci -- --shared-crates --tier 0 --no-logs
nix run ./nix/ci#gate-ci -- --shared-crate ${{ matrix.crate }} --tier 0 --no-logs
# Build server packages (tier 1+)
build:

View file

@ -3,9 +3,11 @@
.PHONY: all build cluster-up cluster-down cluster-status cluster-validate cluster-smoke cluster-matrix cluster-bench-storage clean
# Build all services (using Nix)
PACKAGE ?= default
# Build a single package by default; set PACKAGE=default to build the full bundle.
build:
nix build .#packages.x86_64-linux.default
nix build .#$(PACKAGE)
# Cluster Management
cluster-up:

View file

@ -496,6 +496,8 @@ pub enum DnsPublishMode {
LoadBalancer,
/// Publish the first healthy instance IP directly.
Direct,
/// Publish all healthy instance IPs directly as a DNS record set.
DirectMulti,
}
/// Desired DNS publication for a service.
@ -894,9 +896,15 @@ pub struct PublishedLoadBalancerState {
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct PublishedDnsRecordState {
pub zone_id: String,
#[serde(default)]
pub record_id: String,
#[serde(default)]
pub record_ids: Vec<String>,
pub fqdn: String,
#[serde(default)]
pub value: String,
#[serde(default)]
pub values: Vec<String>,
}
/// Observed publication state stored separately from ServiceSpec desired state.
@ -1215,8 +1223,10 @@ mod tests {
dns: Some(PublishedDnsRecordState {
zone_id: "zone-1".to_string(),
record_id: "record-1".to_string(),
record_ids: vec!["record-1".to_string()],
fqdn: "api.test.cluster.local".to_string(),
value: "10.0.0.50".to_string(),
values: vec!["10.0.0.50".to_string()],
}),
observed_at: None,
};

View file

@ -30,7 +30,7 @@ pub async fn issue_controller_token(
Some(existing) => existing,
None => {
client
.create_service_account(principal_id, principal_id, project_id)
.create_service_account(principal_id, principal_id, org_id, project_id)
.await?
}
};

View file

@ -1,4 +1,4 @@
use std::collections::{HashMap, HashSet};
use std::collections::{BTreeMap, HashMap, HashSet};
use anyhow::{Context, Result};
use chainfire_client::Client;
@ -354,24 +354,33 @@ impl PublicationController {
return Ok(existing.cloned());
};
let Some(value) = desired_dns_value(spec, healthy_instances, load_balancer) else {
let desired_values = desired_dns_values(spec, healthy_instances, load_balancer);
if desired_values.is_empty() {
if let Some(existing) = existing {
self.cleanup_dns(auth_token, existing).await?;
}
return Ok(None);
};
}
let zone_name = normalize_zone_name(&spec.zone);
let record_name = record_name_for_service(spec, service);
let fqdn = format!("{}.{}", record_name, zone_name);
let primary_value = desired_values.first().cloned().unwrap_or_default();
if self.config.dry_run {
info!(service = %service.name, fqdn = %fqdn, value = %value, "would reconcile native DNS record");
info!(
service = %service.name,
fqdn = %fqdn,
values = ?desired_values,
"would reconcile native DNS record set"
);
return Ok(existing.cloned().or(Some(PublishedDnsRecordState {
zone_id: String::new(),
record_id: String::new(),
record_ids: Vec::new(),
fqdn,
value,
value: primary_value,
values: desired_values,
})));
}
@ -380,22 +389,28 @@ impl PublicationController {
let zone =
ensure_zone(&mut zone_client, auth_token, &zone_name, org_id, project_id).await?;
let record = ensure_record(
let records = ensure_records(
&mut record_client,
auth_token,
existing,
&zone.id,
&record_name,
spec.ttl,
&value,
&desired_values,
)
.await?;
let record_ids = records
.iter()
.map(|record| record.id.clone())
.collect::<Vec<_>>();
Ok(Some(PublishedDnsRecordState {
zone_id: zone.id,
record_id: record.id,
record_id: record_ids.first().cloned().unwrap_or_default(),
record_ids,
fqdn,
value,
value: primary_value,
values: desired_values,
}))
}
@ -408,18 +423,25 @@ impl PublicationController {
return Ok(());
};
let mut record_client = RecordServiceClient::connect(endpoint.clone()).await?;
match record_client
.delete_record(authorized_request(
DeleteRecordRequest {
id: dns_state.record_id.clone(),
},
auth_token,
))
.await
{
Ok(_) => {}
Err(status) if status.code() == Code::NotFound => {}
Err(status) => return Err(status.into()),
let mut record_ids = dns_state.record_ids.clone();
if record_ids.is_empty() && !dns_state.record_id.is_empty() {
record_ids.push(dns_state.record_id.clone());
}
record_ids.sort();
record_ids.dedup();
for record_id in record_ids {
match record_client
.delete_record(authorized_request(
DeleteRecordRequest { id: record_id },
auth_token,
))
.await
{
Ok(_) => {}
Err(status) if status.code() == Code::NotFound => {}
Err(status) => return Err(status.into()),
}
}
Ok(())
}
@ -812,15 +834,15 @@ async fn ensure_zone(
.context("FlashDNS returned empty CreateZone response")?)
}
async fn ensure_record(
async fn ensure_records(
client: &mut RecordServiceClient<tonic::transport::Channel>,
auth_token: &str,
existing: Option<&PublishedDnsRecordState>,
zone_id: &str,
name: &str,
ttl: u32,
value: &str,
) -> Result<RecordInfo> {
desired_values: &[String],
) -> Result<Vec<RecordInfo>> {
let records = client
.list_records(authorized_request(
ListRecordsRequest {
@ -839,73 +861,96 @@ async fn ensure_record(
let mut matching = records
.iter()
.filter(|record| {
existing.map(|state| state.record_id.as_str()) == Some(record.id.as_str())
|| record.name == name
record.name == name
|| existing.map(|state| state.record_id.as_str()) == Some(record.id.as_str())
|| existing
.map(|state| state.record_ids.iter().any(|id| id == &record.id))
.unwrap_or(false)
})
.cloned()
.collect::<Vec<_>>();
matching.sort_by(|lhs, rhs| {
record_a_value(lhs)
.cmp(&record_a_value(rhs))
.then_with(|| lhs.id.cmp(&rhs.id))
});
if let Some(record) = matching.first().cloned() {
let record_value = record
.data
.as_ref()
.and_then(|data| data.data.as_ref())
.and_then(|data| match data {
record_data::Data::A(record) => Some(record.address.clone()),
_ => None,
});
let mut records_by_value: BTreeMap<String, Vec<RecordInfo>> = BTreeMap::new();
for record in matching {
let Some(value) = record_a_value(&record) else {
continue;
};
records_by_value.entry(value).or_default().push(record);
}
if record_value.as_deref() != Some(value) || record.ttl != ttl {
let updated = client
.update_record(authorized_request(
UpdateRecordRequest {
id: record.id.clone(),
ttl: Some(ttl),
let mut ensured = Vec::new();
for desired_value in desired_values {
if let Some(record) = records_by_value.get_mut(desired_value).and_then(|records| {
if records.is_empty() {
None
} else {
Some(records.remove(0))
}
}) {
if record.ttl != ttl || !record.enabled {
let updated = client
.update_record(authorized_request(
UpdateRecordRequest {
id: record.id.clone(),
ttl: Some(ttl),
data: Some(RecordData {
data: Some(record_data::Data::A(ARecord {
address: desired_value.to_string(),
})),
}),
enabled: Some(true),
},
auth_token,
))
.await?
.into_inner()
.record
.context("FlashDNS returned empty UpdateRecord response")?;
ensured.push(updated);
} else {
ensured.push(record);
}
continue;
}
ensured.push(
client
.create_record(authorized_request(
CreateRecordRequest {
zone_id: zone_id.to_string(),
name: name.to_string(),
record_type: "A".to_string(),
ttl,
data: Some(RecordData {
data: Some(record_data::Data::A(ARecord {
address: value.to_string(),
address: desired_value.to_string(),
})),
}),
enabled: Some(true),
},
auth_token,
))
.await?
.into_inner()
.record
.context("FlashDNS returned empty UpdateRecord response")?;
matching.remove(0);
for extra in matching {
delete_record(client, auth_token, &extra.id).await?;
}
return Ok(updated);
}
for extra in matching.into_iter().skip(1) {
delete_record(client, auth_token, &extra.id).await?;
}
return Ok(record);
.context("FlashDNS returned empty CreateRecord response")?,
);
}
Ok(client
.create_record(authorized_request(
CreateRecordRequest {
zone_id: zone_id.to_string(),
name: name.to_string(),
record_type: "A".to_string(),
ttl,
data: Some(RecordData {
data: Some(record_data::Data::A(ARecord {
address: value.to_string(),
})),
}),
},
auth_token,
))
.await?
.into_inner()
.record
.context("FlashDNS returned empty CreateRecord response")?)
for extra in records_by_value.into_values().flatten() {
delete_record(client, auth_token, &extra.id).await?;
}
ensured.sort_by(|lhs, rhs| {
record_a_value(lhs)
.cmp(&record_a_value(rhs))
.then_with(|| lhs.id.cmp(&rhs.id))
});
Ok(ensured)
}
async fn delete_record(
@ -940,23 +985,54 @@ fn resolve_target_port(service: &ServiceSpec, spec: &LoadBalancerPublicationSpec
.or_else(|| service.ports.as_ref().and_then(|ports| ports.grpc))
}
fn desired_dns_value(
fn record_a_value(record: &RecordInfo) -> Option<String> {
record
.data
.as_ref()
.and_then(|data| data.data.as_ref())
.and_then(|data| match data {
record_data::Data::A(record) => Some(record.address.clone()),
_ => None,
})
}
fn normalize_dns_values(values: impl IntoIterator<Item = String>) -> Vec<String> {
let mut values = values
.into_iter()
.map(|value| value.trim().to_string())
.filter(|value| !value.is_empty())
.collect::<Vec<_>>();
values.sort();
values.dedup();
values
}
fn desired_dns_values(
spec: &DnsPublicationSpec,
healthy_instances: &[ServiceInstanceSpec],
load_balancer: Option<&PublishedLoadBalancerState>,
) -> Option<String> {
) -> Vec<String> {
match spec.mode {
DnsPublishMode::LoadBalancer => load_balancer
.and_then(|state| state.vip_address.clone())
.filter(|value| !value.is_empty() && value != "0.0.0.0")
.or_else(|| {
healthy_instances
.first()
.map(|instance| instance.ip.clone())
}),
DnsPublishMode::Direct => healthy_instances
.first()
.map(|instance| instance.ip.clone()),
DnsPublishMode::LoadBalancer => normalize_dns_values(
load_balancer
.and_then(|state| state.vip_address.clone())
.filter(|value| !value.is_empty() && value != "0.0.0.0")
.or_else(|| {
healthy_instances
.first()
.map(|instance| instance.ip.clone())
})
.into_iter(),
),
DnsPublishMode::Direct => normalize_dns_values(
healthy_instances
.first()
.map(|instance| instance.ip.clone())
.into_iter(),
),
DnsPublishMode::DirectMulti => {
normalize_dns_values(healthy_instances.iter().map(|instance| instance.ip.clone()))
}
}
}
@ -1153,7 +1229,7 @@ mod tests {
}
#[test]
fn test_dns_value_falls_back_to_healthy_instance_when_vip_missing() {
fn test_dns_values_fall_back_to_healthy_instance_when_vip_missing() {
let spec = DnsPublicationSpec {
zone: "native.cluster.test".to_string(),
name: Some("api".to_string()),
@ -1178,8 +1254,73 @@ mod tests {
}];
assert_eq!(
desired_dns_value(&spec, &instances, None).as_deref(),
Some("10.0.0.11")
desired_dns_values(&spec, &instances, None),
vec!["10.0.0.11".to_string()]
);
}
#[test]
fn test_direct_multi_dns_publishes_all_healthy_instance_ips() {
let spec = DnsPublicationSpec {
zone: "native.cluster.test".to_string(),
name: Some("daemon".to_string()),
ttl: 60,
mode: DnsPublishMode::DirectMulti,
};
let instances = vec![
ServiceInstanceSpec {
instance_id: "daemon-node02".to_string(),
service: "daemon".to_string(),
node_id: "node02".to_string(),
ip: "10.0.0.12".to_string(),
port: 8080,
mesh_port: None,
version: None,
health_check: None,
process: None,
container: None,
managed_by: None,
state: Some("healthy".to_string()),
last_heartbeat: None,
observed_at: None,
},
ServiceInstanceSpec {
instance_id: "daemon-node01".to_string(),
service: "daemon".to_string(),
node_id: "node01".to_string(),
ip: "10.0.0.11".to_string(),
port: 8080,
mesh_port: None,
version: None,
health_check: None,
process: None,
container: None,
managed_by: None,
state: Some("healthy".to_string()),
last_heartbeat: None,
observed_at: None,
},
ServiceInstanceSpec {
instance_id: "daemon-node03".to_string(),
service: "daemon".to_string(),
node_id: "node03".to_string(),
ip: "10.0.0.11".to_string(),
port: 8080,
mesh_port: None,
version: None,
health_check: None,
process: None,
container: None,
managed_by: None,
state: Some("healthy".to_string()),
last_heartbeat: None,
observed_at: None,
},
];
assert_eq!(
desired_dns_values(&spec, &instances, None),
vec!["10.0.0.11".to_string(), "10.0.0.12".to_string()]
);
}

290
flake.nix
View file

@ -71,37 +71,160 @@
clusterPython = pkgs.python3.withPackages (ps: [ ps.python-snappy ]);
# Keep package builds stable even when docs or archived assets change.
repoSrc = pkgs.lib.cleanSourceWith {
src = ./.;
filter = path: type:
let
rel = pkgs.lib.removePrefix ((toString ./. ) + "/") (toString path);
topLevel = builtins.head (pkgs.lib.splitString "/" rel);
includedTopLevels = [
"apigateway"
"chainfire"
"coronafs"
"crates"
"creditservice"
"deployer"
"fiberlb"
"flashdns"
"flaredb"
"iam"
"k8shost"
"lightningstor"
"mtls-agent"
"nightlight"
"plasmavmc"
"prismnet"
];
in
rel == ""
|| builtins.elem rel [ "flake.nix" "flake.lock" ]
|| builtins.elem topLevel includedTopLevels;
# Keep Rust package builds stable without invalidating every package on
# unrelated workspace changes.
workspaceSourceRoots = {
chainfire = [ "chainfire" ];
flaredb = [ "flaredb" ];
iam = [
"apigateway"
"chainfire"
"creditservice"
"crates/photon-auth-client"
"crates/photon-config"
"crates/photon-runtime"
"crates/photon-state"
"flaredb"
"iam"
];
coronafs = [ "coronafs" ];
plasmavmc = [
"apigateway"
"chainfire"
"creditservice"
"crates/photon-auth-client"
"crates/photon-config"
"crates/photon-runtime"
"crates/photon-state"
"flaredb"
"iam"
"lightningstor"
"plasmavmc"
"prismnet"
];
prismnet = [
"apigateway"
"chainfire"
"creditservice"
"crates/photon-auth-client"
"crates/photon-config"
"crates/photon-runtime"
"crates/photon-state"
"flaredb"
"iam"
"prismnet"
];
flashdns = [
"apigateway"
"chainfire"
"creditservice"
"crates/photon-auth-client"
"crates/photon-config"
"crates/photon-runtime"
"crates/photon-state"
"flashdns"
"flaredb"
"iam"
];
fiberlb = [
"apigateway"
"chainfire"
"creditservice"
"crates/photon-auth-client"
"crates/photon-config"
"crates/photon-runtime"
"crates/photon-state"
"fiberlb"
"flaredb"
"iam"
];
lightningstor = [
"apigateway"
"chainfire"
"creditservice"
"crates/photon-auth-client"
"crates/photon-config"
"crates/photon-runtime"
"crates/photon-state"
"flaredb"
"iam"
"lightningstor"
];
nightlight = [ "nightlight" ];
creditservice = [
"apigateway"
"chainfire"
"creditservice"
"crates/photon-auth-client"
"crates/photon-config"
"crates/photon-runtime"
"crates/photon-state"
"flaredb"
"iam"
];
apigateway = [
"apigateway"
"chainfire"
"creditservice"
"crates/photon-auth-client"
"crates/photon-config"
"crates/photon-runtime"
"crates/photon-state"
"flaredb"
"iam"
];
k8shost = [
"apigateway"
"chainfire"
"creditservice"
"crates/photon-auth-client"
"crates/photon-config"
"crates/photon-runtime"
"crates/photon-state"
"fiberlb"
"flaredb"
"flashdns"
"iam"
"k8shost"
"lightningstor"
"plasmavmc"
"prismnet"
];
deployer = [
"apigateway"
"chainfire"
"creditservice"
"crates/photon-auth-client"
"crates/photon-config"
"crates/photon-runtime"
"crates/photon-state"
"deployer"
"fiberlb"
"flaredb"
"flashdns"
"iam"
];
};
mkWorkspaceSrc = workspaceSubdir:
let
sourceRoots = workspaceSourceRoots.${workspaceSubdir} or [ workspaceSubdir ];
in
pkgs.lib.cleanSourceWith {
src = ./.;
filter = path: type:
let
rel = pkgs.lib.removePrefix ((toString ./. ) + "/") (toString path);
in
rel == ""
|| builtins.elem rel [ "flake.nix" "flake.lock" ]
|| builtins.any (root:
rel == root
|| pkgs.lib.hasPrefix "${root}/" rel
|| pkgs.lib.hasPrefix "${rel}/" root
) sourceRoots;
};
flakeBundleSrc = pkgs.lib.cleanSourceWith {
src = ./.;
filter = path: type:
@ -440,12 +563,12 @@
# workspaceSubdir: subdirectory containing Cargo.toml (e.g., "chainfire")
# mainCrate: optional main crate name if different from workspace
# description: package description for meta
# doCheck: whether to run tests during build (default: true)
buildRustWorkspace = { name, workspaceSubdir, mainCrate ? null, description ? "", doCheck ? true }:
# doCheck: whether to run tests during build (default: false)
buildRustWorkspace = { name, workspaceSubdir, mainCrate ? null, description ? "", doCheck ? false }:
pkgs.rustPlatform.buildRustPackage ({
pname = name;
version = "0.1.0";
src = repoSrc;
src = mkWorkspaceSrc workspaceSubdir;
cargoLock = {
lockFile = ./${workspaceSubdir}/Cargo.lock;
@ -486,6 +609,43 @@
cargoBuildFlags = [ "-p" mainCrate ];
});
# Helper function to build multiple binaries from the same workspace in
# one cargo invocation. This is mainly used by the VM cluster builds so
# a single host build can satisfy several services from the same
# workspace.
buildRustWorkspaceBundle = { name, workspaceSubdir, crates, description ? "", doCheck ? false }:
pkgs.rustPlatform.buildRustPackage {
pname = name;
version = "0.1.0";
src = mkWorkspaceSrc workspaceSubdir;
cargoLock = {
lockFile = ./${workspaceSubdir}/Cargo.lock;
};
buildAndTestSubdir = workspaceSubdir;
postUnpack = ''
cp $sourceRoot/${workspaceSubdir}/Cargo.lock $sourceRoot/Cargo.lock
'';
nativeBuildInputs = commonNativeBuildInputs;
buildInputs = commonBuildInputs;
inherit (commonEnvVars) LIBCLANG_PATH PROTOC ROCKSDB_LIB_DIR;
inherit doCheck;
cargoBuildFlags = pkgs.lib.concatMap (crate: [ "-p" crate ]) crates;
meta = with pkgs.lib; {
description = description;
homepage = "https://github.com/yourorg/plasmacloud";
license = licenses.asl20;
maintainers = [ ];
platforms = platforms.linux;
};
};
in
{
# ======================================================================
@ -658,6 +818,16 @@
description = "LightningStor distributed storage node daemon";
};
lightningstor-workspace = buildRustWorkspaceBundle {
name = "lightningstor-workspace";
workspaceSubdir = "lightningstor";
crates = [
"lightningstor-server"
"lightningstor-node"
];
description = "Combined LightningStor server and node workspace build";
};
# --------------------------------------------------------------------
# NightLight: Prometheus-compatible Metrics Store
# --------------------------------------------------------------------
@ -768,6 +938,20 @@
description = "Label-aware service scheduler for PhotonCloud bare-metal fleets";
};
deployer-workspace = buildRustWorkspaceBundle {
name = "deployer-workspace";
workspaceSubdir = "deployer";
crates = [
"deployer-server"
"deployer-ctl"
"node-agent"
"nix-agent"
"plasmacloud-reconciler"
"fleet-scheduler"
];
description = "Combined deployer workspace build for cluster images and checks";
};
vmClusterDeployerState =
self.nixosConfigurations.node01.config.system.build.plasmacloudDeployerClusterState;
@ -787,18 +971,12 @@
self.packages.${system}.prismnet-server
self.packages.${system}.flashdns-server
self.packages.${system}.fiberlb-server
self.packages.${system}.lightningstor-server
self.packages.${system}.lightningstor-node
self.packages.${system}.lightningstor-workspace
self.packages.${system}.nightlight-server
self.packages.${system}.creditservice-server
self.packages.${system}.apigateway-server
self.packages.${system}.k8shost-server
self.packages.${system}.deployer-server
self.packages.${system}.deployer-ctl
self.packages.${system}.plasmacloud-reconciler
self.packages.${system}.nix-agent
self.packages.${system}.node-agent
self.packages.${system}.fleet-scheduler
self.packages.${system}.deployer-workspace
self.packages.${system}.vmClusterDeployerState
];
};
@ -970,9 +1148,9 @@
PHOTONCLOUD_CHAINFIRE_SERVER_BIN =
"${self.packages.${system}.chainfire-server}/bin/chainfire";
PHOTONCLOUD_DEPLOYER_SERVER_BIN =
"${self.packages.${system}.deployer-server}/bin/deployer-server";
"${self.packages.${system}.deployer-workspace}/bin/deployer-server";
PHOTONCLOUD_DEPLOYER_CTL_BIN =
"${self.packages.${system}.deployer-ctl}/bin/deployer-ctl";
"${self.packages.${system}.deployer-workspace}/bin/deployer-ctl";
} ''
export HOME="$TMPDIR/home"
mkdir -p "$HOME"
@ -1007,9 +1185,9 @@
PHOTONCLOUD_CHAINFIRE_SERVER_BIN =
"${self.packages.${system}.chainfire-server}/bin/chainfire";
PHOTONCLOUD_DEPLOYER_CTL_BIN =
"${self.packages.${system}.deployer-ctl}/bin/deployer-ctl";
"${self.packages.${system}.deployer-workspace}/bin/deployer-ctl";
PHOTONCLOUD_PLASMACLOUD_RECONCILER_BIN =
"${self.packages.${system}.plasmacloud-reconciler}/bin/plasmacloud-reconciler";
"${self.packages.${system}.deployer-workspace}/bin/plasmacloud-reconciler";
} ''
export HOME="$TMPDIR/home"
mkdir -p "$HOME"
@ -1044,11 +1222,11 @@
PHOTONCLOUD_CHAINFIRE_SERVER_BIN =
"${self.packages.${system}.chainfire-server}/bin/chainfire";
PHOTONCLOUD_DEPLOYER_CTL_BIN =
"${self.packages.${system}.deployer-ctl}/bin/deployer-ctl";
"${self.packages.${system}.deployer-workspace}/bin/deployer-ctl";
PHOTONCLOUD_NODE_AGENT_BIN =
"${self.packages.${system}.node-agent}/bin/node-agent";
"${self.packages.${system}.deployer-workspace}/bin/node-agent";
PHOTONCLOUD_FLEET_SCHEDULER_BIN =
"${self.packages.${system}.fleet-scheduler}/bin/fleet-scheduler";
"${self.packages.${system}.deployer-workspace}/bin/fleet-scheduler";
} ''
export HOME="$TMPDIR/home"
mkdir -p "$HOME"
@ -1229,19 +1407,21 @@
prismnet-server = self.packages.${final.system}.prismnet-server;
flashdns-server = self.packages.${final.system}.flashdns-server;
fiberlb-server = self.packages.${final.system}.fiberlb-server;
lightningstor-server = self.packages.${final.system}.lightningstor-server;
lightningstor-node = self.packages.${final.system}.lightningstor-node;
lightningstor-workspace = self.packages.${final.system}.lightningstor-workspace;
lightningstor-server = self.packages.${final.system}.lightningstor-workspace;
lightningstor-node = self.packages.${final.system}.lightningstor-workspace;
nightlight-server = self.packages.${final.system}.nightlight-server;
creditservice-server = self.packages.${final.system}.creditservice-server;
apigateway-server = self.packages.${final.system}.apigateway-server;
k8shost-server = self.packages.${final.system}.k8shost-server;
deployer-server = self.packages.${final.system}.deployer-server;
deployer-ctl = self.packages.${final.system}.deployer-ctl;
plasmacloud-reconciler = self.packages.${final.system}.plasmacloud-reconciler;
deployer-workspace = self.packages.${final.system}.deployer-workspace;
deployer-server = self.packages.${final.system}.deployer-workspace;
deployer-ctl = self.packages.${final.system}.deployer-workspace;
plasmacloud-reconciler = self.packages.${final.system}.deployer-workspace;
plasmacloudFlakeBundle = self.packages.${final.system}.plasmacloudFlakeBundle;
nix-agent = self.packages.${final.system}.nix-agent;
node-agent = self.packages.${final.system}.node-agent;
fleet-scheduler = self.packages.${final.system}.fleet-scheduler;
nix-agent = self.packages.${final.system}.deployer-workspace;
node-agent = self.packages.${final.system}.deployer-workspace;
fleet-scheduler = self.packages.${final.system}.deployer-workspace;
};
};
}

View file

@ -152,7 +152,9 @@ impl DnsMetadataStore {
)
.execute(pool)
.await
.map_err(|e| MetadataError::Storage(format!("Failed to initialize Postgres schema: {}", e)))?;
.map_err(|e| {
MetadataError::Storage(format!("Failed to initialize Postgres schema: {}", e))
})?;
Ok(())
}
@ -165,7 +167,9 @@ impl DnsMetadataStore {
)
.execute(pool)
.await
.map_err(|e| MetadataError::Storage(format!("Failed to initialize SQLite schema: {}", e)))?;
.map_err(|e| {
MetadataError::Storage(format!("Failed to initialize SQLite schema: {}", e))
})?;
Ok(())
}
@ -192,9 +196,7 @@ impl DnsMetadataStore {
.bind(value)
.execute(pool.as_ref())
.await
.map_err(|e| {
MetadataError::Storage(format!("Postgres put failed: {}", e))
})?;
.map_err(|e| MetadataError::Storage(format!("Postgres put failed: {}", e)))?;
}
SqlStorageBackend::Sqlite(pool) => {
sqlx::query(
@ -395,10 +397,15 @@ impl DnsMetadataStore {
format!("/flashdns/zone_ids/{}", zone_id)
}
fn record_key(zone_id: &ZoneId, record_name: &str, record_type: RecordType) -> String {
fn record_key(
zone_id: &ZoneId,
record_name: &str,
record_type: RecordType,
record_id: &RecordId,
) -> String {
format!(
"/flashdns/records/{}/{}/{}",
zone_id, record_name, record_type
"/flashdns/records/{}/{}/{}/{}",
zone_id, record_name, record_type, record_id
)
}
@ -406,6 +413,20 @@ impl DnsMetadataStore {
format!("/flashdns/records/{}/", zone_id)
}
fn record_type_prefix(zone_id: &ZoneId, record_name: &str, record_type: RecordType) -> String {
format!(
"/flashdns/records/{}/{}/{}/",
zone_id, record_name, record_type
)
}
fn legacy_record_key(zone_id: &ZoneId, record_name: &str, record_type: RecordType) -> String {
format!(
"/flashdns/records/{}/{}/{}",
zone_id, record_name, record_type
)
}
fn record_id_key(record_id: &RecordId) -> String {
format!("/flashdns/record_ids/{}", record_id)
}
@ -521,7 +542,18 @@ impl DnsMetadataStore {
/// Save record
pub async fn save_record(&self, record: &Record) -> Result<()> {
let key = Self::record_key(&record.zone_id, &record.name, record.record_type);
let key = Self::record_key(
&record.zone_id,
&record.name,
record.record_type,
&record.id,
);
let id_key = Self::record_id_key(&record.id);
if let Some(existing_key) = self.get(&id_key).await? {
if existing_key != key {
self.delete_key(&existing_key).await?;
}
}
let value = serde_json::to_string(record).map_err(|e| {
MetadataError::Serialization(format!("Failed to serialize record: {}", e))
})?;
@ -529,29 +561,40 @@ impl DnsMetadataStore {
self.put(&key, &value).await?;
// Also save record ID mapping
let id_key = Self::record_id_key(&record.id);
self.put(&id_key, &key).await?;
Ok(())
}
/// Load record by name and type
/// Load the first record by name and type, preserving compatibility with
/// older single-record keys.
pub async fn load_record(
&self,
zone_id: &ZoneId,
record_name: &str,
record_type: RecordType,
) -> Result<Option<Record>> {
let key = Self::record_key(zone_id, record_name, record_type);
let prefix = Self::record_type_prefix(zone_id, record_name, record_type);
let mut records = self
.get_prefix(&prefix)
.await?
.into_iter()
.filter_map(|(_, value)| serde_json::from_str::<Record>(&value).ok())
.collect::<Vec<_>>();
if let Some(value) = self.get(&key).await? {
let record: Record = serde_json::from_str(&value).map_err(|e| {
MetadataError::Serialization(format!("Failed to deserialize record: {}", e))
})?;
Ok(Some(record))
} else {
Ok(None)
if records.is_empty() {
let legacy_key = Self::legacy_record_key(zone_id, record_name, record_type);
if let Some(value) = self.get(&legacy_key).await? {
let record: Record = serde_json::from_str(&value).map_err(|e| {
MetadataError::Serialization(format!("Failed to deserialize record: {}", e))
})?;
return Ok(Some(record));
}
return Ok(None);
}
records.sort_by(|lhs, rhs| lhs.id.to_string().cmp(&rhs.id.to_string()));
Ok(records.into_iter().next())
}
/// Load record by ID
@ -574,10 +617,24 @@ impl DnsMetadataStore {
/// Delete record
pub async fn delete_record(&self, record: &Record) -> Result<()> {
let key = Self::record_key(&record.zone_id, &record.name, record.record_type);
let id_key = Self::record_id_key(&record.id);
self.delete_key(&key).await?;
if let Some(key) = self.get(&id_key).await? {
self.delete_key(&key).await?;
} else {
self.delete_key(&Self::record_key(
&record.zone_id,
&record.name,
record.record_type,
&record.id,
))
.await?;
self.delete_key(&Self::legacy_record_key(
&record.zone_id,
&record.name,
record.record_type,
))
.await?;
}
self.delete_key(&id_key).await?;
Ok(())
@ -601,6 +658,7 @@ impl DnsMetadataStore {
a.name
.cmp(&b.name)
.then(a.record_type.type_code().cmp(&b.record_type.type_code()))
.then(a.id.to_string().cmp(&b.id.to_string()))
});
Ok(records)
@ -829,4 +887,38 @@ mod tests {
.unwrap();
assert!(deleted.is_none());
}
#[tokio::test]
async fn test_record_set_supports_multiple_records_with_same_name_and_type() {
let store = DnsMetadataStore::new_in_memory();
let zone_name = ZoneName::new("example.com").unwrap();
let zone = Zone::new(zone_name, "test-org", "test-project");
store.save_zone(&zone).await.unwrap();
let first = Record::new(
zone.id,
"api",
RecordData::a_from_str("192.168.1.10").unwrap(),
);
let second = Record::new(
zone.id,
"api",
RecordData::a_from_str("192.168.1.11").unwrap(),
);
store.save_record(&first).await.unwrap();
store.save_record(&second).await.unwrap();
let records = store.list_records_by_name(&zone.id, "api").await.unwrap();
assert_eq!(records.len(), 2);
assert!(records.iter().any(|record| record.id == first.id));
assert!(records.iter().any(|record| record.id == second.id));
store.delete_record(&first).await.unwrap();
let remaining = store.list_records_by_name(&zone.id, "api").await.unwrap();
assert_eq!(remaining.len(), 1);
assert_eq!(remaining[0].id, second.id);
}
}

View file

@ -4,14 +4,15 @@
use iam_types::{
Condition as TypesCondition, ConditionExpr as TypesConditionExpr,
Permission as TypesPermission, PolicyBinding as TypesBinding, Principal as TypesPrincipal,
PrincipalKind as TypesPrincipalKind, PrincipalRef as TypesPrincipalRef, Role as TypesRole,
Scope as TypesScope,
Organization as TypesOrganization, Permission as TypesPermission,
PolicyBinding as TypesBinding, Principal as TypesPrincipal,
PrincipalKind as TypesPrincipalKind, PrincipalRef as TypesPrincipalRef,
Project as TypesProject, Role as TypesRole, Scope as TypesScope,
};
use crate::proto::{
self, condition_expr, scope, Condition, ConditionExpr, Permission, PolicyBinding, Principal,
PrincipalKind, PrincipalRef, Role, Scope,
self, condition_expr, scope, Condition, ConditionExpr, Organization, Permission, PolicyBinding,
Principal, PrincipalKind, PrincipalRef, Project, Role, Scope,
};
// ============================================================================
@ -98,6 +99,68 @@ impl From<Principal> for TypesPrincipal {
}
}
// ============================================================================
// Organization / Project conversions
// ============================================================================
impl From<TypesOrganization> for Organization {
fn from(org: TypesOrganization) -> Self {
Organization {
id: org.id,
name: org.name,
description: org.description,
metadata: org.metadata,
created_at: org.created_at,
updated_at: org.updated_at,
enabled: org.enabled,
}
}
}
impl From<Organization> for TypesOrganization {
fn from(org: Organization) -> Self {
TypesOrganization {
id: org.id,
name: org.name,
description: org.description,
metadata: org.metadata,
created_at: org.created_at,
updated_at: org.updated_at,
enabled: org.enabled,
}
}
}
impl From<TypesProject> for Project {
fn from(project: TypesProject) -> Self {
Project {
id: project.id,
org_id: project.org_id,
name: project.name,
description: project.description,
metadata: project.metadata,
created_at: project.created_at,
updated_at: project.updated_at,
enabled: project.enabled,
}
}
}
impl From<Project> for TypesProject {
fn from(project: Project) -> Self {
TypesProject {
id: project.id,
org_id: project.org_id,
name: project.name,
description: project.description,
metadata: project.metadata,
created_at: project.created_at,
updated_at: project.updated_at,
enabled: project.enabled,
}
}
}
// ============================================================================
// Scope conversions
// ============================================================================

View file

@ -2,19 +2,23 @@ use std::sync::Arc;
use std::time::{SystemTime, UNIX_EPOCH};
use aes_gcm::{aead::Aead, Aes256Gcm, Key, KeyInit, Nonce};
use argon2::{password_hash::{PasswordHasher, SaltString}, Argon2};
use argon2::{
password_hash::{PasswordHasher, SaltString},
Argon2,
};
use base64::{engine::general_purpose::STANDARD, Engine};
use rand_core::{OsRng, RngCore};
use tonic::{Request, Response, Status};
use iam_store::CredentialStore;
use iam_types::{Argon2Params, CredentialRecord, PrincipalKind as TypesPrincipalKind};
use iam_store::{CredentialStore, PrincipalStore};
use iam_types::{
Argon2Params, CredentialRecord, PrincipalKind as TypesPrincipalKind, PrincipalRef,
};
use crate::proto::{
iam_credential_server::IamCredential, CreateS3CredentialRequest,
CreateS3CredentialResponse, Credential, GetSecretKeyRequest, GetSecretKeyResponse,
ListCredentialsRequest, ListCredentialsResponse, PrincipalKind, RevokeCredentialRequest,
RevokeCredentialResponse,
iam_credential_server::IamCredential, CreateS3CredentialRequest, CreateS3CredentialResponse,
Credential, GetSecretKeyRequest, GetSecretKeyResponse, ListCredentialsRequest,
ListCredentialsResponse, PrincipalKind, RevokeCredentialRequest, RevokeCredentialResponse,
};
fn now_ts() -> u64 {
@ -26,12 +30,20 @@ fn now_ts() -> u64 {
pub struct IamCredentialService {
store: Arc<CredentialStore>,
principal_store: Arc<PrincipalStore>,
cipher: Aes256Gcm,
key_id: String,
admin_token: Option<String>,
}
impl IamCredentialService {
pub fn new(store: Arc<CredentialStore>, master_key: &[u8], key_id: &str) -> Result<Self, Status> {
pub fn new(
store: Arc<CredentialStore>,
principal_store: Arc<PrincipalStore>,
master_key: &[u8],
key_id: &str,
admin_token: Option<String>,
) -> Result<Self, Status> {
if master_key.len() != 32 {
return Err(Status::failed_precondition(
"IAM_CRED_MASTER_KEY must be 32 bytes",
@ -40,8 +52,10 @@ impl IamCredentialService {
let cipher = Aes256Gcm::new(Key::<Aes256Gcm>::from_slice(master_key));
Ok(Self {
store,
principal_store,
cipher,
key_id: key_id.to_string(),
admin_token,
})
}
@ -93,6 +107,41 @@ impl IamCredentialService {
.decrypt(nonce, ct)
.map_err(|e| Status::internal(format!("decrypt failed: {}", e)))
}
fn admin_token_valid(metadata: &tonic::metadata::MetadataMap, token: &str) -> bool {
if let Some(value) = metadata.get("x-iam-admin-token") {
if let Ok(raw) = value.to_str() {
if raw.trim() == token {
return true;
}
}
}
if let Some(value) = metadata.get("authorization") {
if let Ok(raw) = value.to_str() {
let raw = raw.trim();
if let Some(rest) = raw.strip_prefix("Bearer ") {
return rest.trim() == token;
}
if let Some(rest) = raw.strip_prefix("bearer ") {
return rest.trim() == token;
}
}
}
false
}
fn require_admin_token<T>(&self, request: &Request<T>) -> Result<(), Status> {
if let Some(token) = self.admin_token.as_deref() {
if !Self::admin_token_valid(request.metadata(), token) {
return Err(Status::unauthenticated(
"missing or invalid IAM admin token",
));
}
}
Ok(())
}
}
fn map_principal_kind(kind: i32) -> Result<TypesPrincipalKind, Status> {
@ -110,9 +159,22 @@ impl IamCredential for IamCredentialService {
&self,
request: Request<CreateS3CredentialRequest>,
) -> Result<Response<CreateS3CredentialResponse>, Status> {
self.require_admin_token(&request)?;
let req = request.into_inner();
let now = now_ts();
let principal_kind = map_principal_kind(req.principal_kind)?;
let principal_ref = PrincipalRef::new(principal_kind.clone(), &req.principal_id);
let principal = self
.principal_store
.get(&principal_ref)
.await
.map_err(|e| Status::internal(format!("principal lookup failed: {}", e)))?
.ok_or_else(|| Status::not_found("principal not found"))?;
if principal.org_id != req.org_id || principal.project_id != req.project_id {
return Err(Status::invalid_argument(
"credential tenant does not match principal tenant",
));
}
let (secret_b64, raw_secret) = Self::generate_secret();
let (hash, kdf) = Self::hash_secret(&raw_secret);
let secret_enc = self.encrypt_secret(&raw_secret)?;
@ -156,6 +218,7 @@ impl IamCredential for IamCredentialService {
&self,
request: Request<GetSecretKeyRequest>,
) -> Result<Response<GetSecretKeyResponse>, Status> {
self.require_admin_token(&request)?;
let req = request.into_inner();
let record = match self.store.get(&req.access_key_id).await {
Ok(Some((rec, _))) => rec,
@ -195,6 +258,7 @@ impl IamCredential for IamCredentialService {
&self,
request: Request<ListCredentialsRequest>,
) -> Result<Response<ListCredentialsResponse>, Status> {
self.require_admin_token(&request)?;
let req = request.into_inner();
let items = self
.store
@ -219,13 +283,16 @@ impl IamCredential for IamCredentialService {
},
})
.collect();
Ok(Response::new(ListCredentialsResponse { credentials: creds }))
Ok(Response::new(ListCredentialsResponse {
credentials: creds,
}))
}
async fn revoke_credential(
&self,
request: Request<RevokeCredentialRequest>,
) -> Result<Response<RevokeCredentialResponse>, Status> {
self.require_admin_token(&request)?;
let req = request.into_inner();
let revoked = self
.store
@ -241,17 +308,41 @@ mod tests {
use super::*;
use base64::engine::general_purpose::STANDARD;
use iam_store::Backend;
use iam_types::Principal;
fn test_service() -> IamCredentialService {
fn test_service() -> (IamCredentialService, Arc<PrincipalStore>) {
let backend = Arc::new(Backend::memory());
let store = Arc::new(CredentialStore::new(backend));
let store = Arc::new(CredentialStore::new(backend.clone()));
let principal_store = Arc::new(PrincipalStore::new(backend));
let master_key = [0x42u8; 32];
IamCredentialService::new(store, &master_key, "test-key").unwrap()
(
IamCredentialService::new(
store,
principal_store.clone(),
&master_key,
"test-key",
None,
)
.unwrap(),
principal_store,
)
}
async fn seed_service_account(
principal_store: &PrincipalStore,
principal_id: &str,
org_id: &str,
project_id: &str,
) {
let principal =
Principal::new_service_account(principal_id, principal_id, org_id, project_id);
principal_store.create(&principal).await.unwrap();
}
#[tokio::test]
async fn create_and_get_roundtrip() {
let svc = test_service();
let (svc, principal_store) = test_service();
seed_service_account(&principal_store, "p1", "org-a", "project-a").await;
let create = svc
.create_s3_credential(Request::new(CreateS3CredentialRequest {
principal_id: "p1".into(),
@ -284,7 +375,9 @@ mod tests {
#[tokio::test]
async fn list_filters_by_principal() {
let svc = test_service();
let (svc, principal_store) = test_service();
seed_service_account(&principal_store, "pA", "org-a", "project-a").await;
seed_service_account(&principal_store, "pB", "org-b", "project-b").await;
let a = svc
.create_s3_credential(Request::new(CreateS3CredentialRequest {
principal_id: "pA".into(),
@ -322,7 +415,8 @@ mod tests {
#[tokio::test]
async fn revoke_blocks_get() {
let svc = test_service();
let (svc, principal_store) = test_service();
seed_service_account(&principal_store, "p1", "org-a", "project-a").await;
let created = svc
.create_s3_credential(Request::new(CreateS3CredentialRequest {
principal_id: "p1".into(),
@ -365,7 +459,8 @@ mod tests {
#[tokio::test]
async fn expired_key_is_denied() {
let svc = test_service();
let (svc, principal_store) = test_service();
seed_service_account(&principal_store, "p1", "org-a", "project-a").await;
// Manually insert an expired record
let expired = CredentialRecord {
access_key_id: "expired-ak".into(),
@ -401,8 +496,9 @@ mod tests {
#[test]
fn master_key_length_enforced() {
let backend = Arc::new(Backend::memory());
let store = Arc::new(CredentialStore::new(backend));
let bad = IamCredentialService::new(store.clone(), &[0u8; 16], "k");
let store = Arc::new(CredentialStore::new(backend.clone()));
let principal_store = Arc::new(PrincipalStore::new(backend));
let bad = IamCredentialService::new(store.clone(), principal_store, &[0u8; 16], "k", None);
assert!(bad.is_err());
}
}

View file

@ -5,8 +5,8 @@ use std::time::{SystemTime, UNIX_EPOCH};
use apigateway_api::proto::{AuthorizeRequest, AuthorizeResponse, Subject};
use apigateway_api::GatewayAuthService;
use base64::{engine::general_purpose::URL_SAFE_NO_PAD, Engine};
use iam_authz::{AuthzContext, AuthzDecision, AuthzRequest, PolicyEvaluator};
use iam_authn::InternalTokenService;
use iam_authz::{AuthzContext, AuthzDecision, AuthzRequest, PolicyEvaluator};
use iam_store::{PrincipalStore, TokenStore};
use iam_types::{InternalTokenClaims, Principal, PrincipalRef, Resource};
use sha2::{Digest, Sha256};
@ -87,7 +87,10 @@ impl GatewayAuthService for GatewayAuthServiceImpl {
Err(err) => return Ok(Response::new(deny_response(err.to_string()))),
};
if let Some(reason) = self.check_token_revoked(&claims.principal_id, token).await? {
if let Some(reason) = self
.check_token_revoked(&claims.principal_id, token)
.await?
{
return Ok(Response::new(deny_response(reason)));
}
@ -108,7 +111,10 @@ impl GatewayAuthService for GatewayAuthServiceImpl {
}
let (action, resource, context, org_id, project_id) =
build_authz_request(&req, &claims, &principal);
match build_authz_request(&req, &claims, &principal) {
Ok(values) => values,
Err(reason) => return Ok(Response::new(deny_response(reason))),
};
let authz_request =
AuthzRequest::new(principal.clone(), action, resource).with_context(context);
let decision = self
@ -181,9 +187,9 @@ fn build_authz_request(
req: &AuthorizeRequest,
claims: &InternalTokenClaims,
principal: &Principal,
) -> (String, Resource, AuthzContext, String, String) {
) -> Result<(String, Resource, AuthzContext, String, String), String> {
let action = action_for_request(req);
let (org_id, project_id) = resolve_org_project(req, claims, principal);
let (org_id, project_id) = resolve_org_project(req, claims, principal)?;
let mut resource = Resource::new(
"gateway_route",
resource_id_for_request(req),
@ -212,7 +218,7 @@ fn build_authz_request(
context = context.with_source_ip(ip);
}
(action, resource, context, org_id, project_id)
Ok((action, resource, context, org_id, project_id))
}
fn action_for_request(req: &AuthorizeRequest) -> String {
@ -265,7 +271,7 @@ fn resolve_org_project(
req: &AuthorizeRequest,
claims: &InternalTokenClaims,
principal: &Principal,
) -> (String, String) {
) -> Result<(String, String), String> {
let allow_header_override = allow_header_tenant_override();
let org_id = claims
.org_id
@ -279,7 +285,7 @@ fn resolve_org_project(
None
}
})
.unwrap_or_else(|| "system".to_string());
.ok_or_else(|| "tenant resolution failed: missing org_id".to_string())?;
let project_id = claims
.project_id
@ -293,9 +299,9 @@ fn resolve_org_project(
None
}
})
.unwrap_or_else(|| "system".to_string());
.ok_or_else(|| "tenant resolution failed: missing project_id".to_string())?;
(org_id, project_id)
Ok((org_id, project_id))
}
fn allow_header_tenant_override() -> bool {
@ -383,7 +389,14 @@ mod tests {
token_store.clone(),
evaluator,
);
(service, token_service, role_store, binding_store, token_store, principal)
(
service,
token_service,
role_store,
binding_store,
token_store,
principal,
)
}
#[tokio::test]

File diff suppressed because it is too large Load diff

View file

@ -25,6 +25,7 @@ pub struct IamTokenService {
token_service: Arc<InternalTokenService>,
principal_store: Arc<PrincipalStore>,
token_store: Arc<TokenStore>,
admin_token: Option<String>,
}
impl IamTokenService {
@ -33,11 +34,13 @@ impl IamTokenService {
token_service: Arc<InternalTokenService>,
principal_store: Arc<PrincipalStore>,
token_store: Arc<TokenStore>,
admin_token: Option<String>,
) -> Self {
Self {
token_service,
principal_store,
token_store,
admin_token,
}
}
@ -117,6 +120,110 @@ impl IamTokenService {
}
Ok(token_id)
}
fn admin_token_valid(metadata: &tonic::metadata::MetadataMap, token: &str) -> bool {
if let Some(value) = metadata.get("x-iam-admin-token") {
if let Ok(raw) = value.to_str() {
if raw.trim() == token {
return true;
}
}
}
if let Some(value) = metadata.get("authorization") {
if let Ok(raw) = value.to_str() {
let raw = raw.trim();
if let Some(rest) = raw.strip_prefix("Bearer ") {
return rest.trim() == token;
}
if let Some(rest) = raw.strip_prefix("bearer ") {
return rest.trim() == token;
}
}
}
false
}
fn require_admin_token<T>(&self, request: &Request<T>) -> Result<(), Status> {
if let Some(token) = self.admin_token.as_deref() {
if !Self::admin_token_valid(request.metadata(), token) {
return Err(Status::unauthenticated(
"missing or invalid IAM admin token",
));
}
}
Ok(())
}
fn validate_scope_for_principal(
principal: &iam_types::Principal,
scope: &TypesScope,
) -> Result<(), Status> {
let principal_org = principal.org_id.as_deref();
let principal_project = principal.project_id.as_deref();
match principal.kind {
TypesPrincipalKind::ServiceAccount => match scope {
TypesScope::System => Err(Status::permission_denied(
"service accounts cannot mint system-scoped tokens",
)),
TypesScope::Org { id } => {
if principal_org == Some(id.as_str()) {
Ok(())
} else {
Err(Status::permission_denied(
"service account token scope must match principal tenant",
))
}
}
TypesScope::Project { id, org_id } => {
if principal_org == Some(org_id.as_str())
&& principal_project == Some(id.as_str())
{
Ok(())
} else {
Err(Status::permission_denied(
"service account token scope must match principal tenant",
))
}
}
TypesScope::Resource {
project_id, org_id, ..
} => {
if principal_org == Some(org_id.as_str())
&& principal_project == Some(project_id.as_str())
{
Ok(())
} else {
Err(Status::permission_denied(
"service account token scope must match principal tenant",
))
}
}
},
_ => {
if let Some(org_id) = principal_org {
match scope {
TypesScope::System => {}
TypesScope::Org { id } if id == org_id => {}
TypesScope::Project {
org_id: scope_org, ..
}
| TypesScope::Resource {
org_id: scope_org, ..
} if scope_org == org_id => {}
_ => {
return Err(Status::permission_denied(
"token scope must match principal tenant",
))
}
}
}
Ok(())
}
}
}
}
#[tonic::async_trait]
@ -125,6 +232,7 @@ impl IamToken for IamTokenService {
&self,
request: Request<IssueTokenRequest>,
) -> Result<Response<IssueTokenResponse>, Status> {
self.require_admin_token(&request)?;
let req = request.into_inner();
// Get principal kind
@ -149,6 +257,7 @@ impl IamToken for IamTokenService {
// Convert scope
let scope = Self::convert_scope(&req.scope);
Self::validate_scope_for_principal(&principal, &scope)?;
// Determine TTL
let ttl = if req.ttl_seconds > 0 {
@ -395,7 +504,7 @@ mod tests {
#[tokio::test]
async fn test_issue_token_principal_not_found() {
let (token_service, principal_store, token_store) = test_setup();
let service = IamTokenService::new(token_service, principal_store, token_store);
let service = IamTokenService::new(token_service, principal_store, token_store, None);
let req = IssueTokenRequest {
principal_id: "nonexistent".into(),
@ -412,8 +521,12 @@ mod tests {
#[tokio::test]
async fn test_revoke_and_validate_blocklist() {
let (token_service, principal_store, token_store) = test_setup();
let service =
IamTokenService::new(token_service, principal_store.clone(), token_store.clone());
let service = IamTokenService::new(
token_service,
principal_store.clone(),
token_store.clone(),
None,
);
// create principal
let principal = Principal::new_user("alice", "Alice");
@ -468,8 +581,12 @@ mod tests {
#[tokio::test]
async fn test_validate_token_principal_disabled() {
let (token_service, principal_store, token_store) = test_setup();
let service =
IamTokenService::new(token_service, principal_store.clone(), token_store.clone());
let service = IamTokenService::new(
token_service,
principal_store.clone(),
token_store.clone(),
None,
);
let principal = Principal::new_user("alice", "Alice");
principal_store.create(&principal).await.unwrap();
@ -505,4 +622,31 @@ mod tests {
assert!(!valid_resp.valid);
assert!(valid_resp.reason.contains("disabled"));
}
#[tokio::test]
async fn test_service_account_system_scope_is_rejected() {
let (token_service, principal_store, token_store) = test_setup();
let service = IamTokenService::new(
token_service,
principal_store.clone(),
token_store.clone(),
None,
);
let principal = Principal::new_service_account("svc-1", "Service 1", "org-1", "proj-1");
principal_store.create(&principal).await.unwrap();
let result = service
.issue_token(Request::new(IssueTokenRequest {
principal_id: "svc-1".into(),
principal_kind: PrincipalKind::ServiceAccount as i32,
roles: vec![],
scope: Some(IamTokenService::convert_scope_to_proto(&TypesScope::System)),
ttl_seconds: 3600,
}))
.await;
assert!(result.is_err());
assert_eq!(result.unwrap_err().code(), tonic::Code::PermissionDenied);
}
}

View file

@ -41,8 +41,6 @@ pub enum AuthnCredentials {
BearerToken(String),
/// mTLS certificate info
Certificate(CertificateInfo),
/// API key
ApiKey(String),
}
/// Authentication provider trait
@ -153,19 +151,6 @@ impl CombinedAuthProvider {
internal_claims: None,
})
}
/// Authenticate using API key
async fn authenticate_api_key(&self, _api_key: &str) -> Result<AuthnResult> {
// API key authentication would typically:
// 1. Look up the API key in the store
// 2. Verify it's valid and not expired
// 3. Return the associated principal
// For now, this is a stub
Err(Error::Iam(IamError::AuthnFailed(
"API key authentication not yet implemented".into(),
)))
}
}
impl Default for CombinedAuthProvider {
@ -180,7 +165,6 @@ impl AuthnProvider for CombinedAuthProvider {
match credentials {
AuthnCredentials::BearerToken(token) => self.authenticate_bearer(token).await,
AuthnCredentials::Certificate(cert_info) => self.authenticate_certificate(cert_info),
AuthnCredentials::ApiKey(key) => self.authenticate_api_key(key).await,
}
}
}
@ -242,10 +226,6 @@ fn parse_scheme_credentials(value: &str) -> Option<AuthnCredentials> {
if scheme.eq_ignore_ascii_case("bearer") {
return Some(AuthnCredentials::BearerToken(token.to_string()));
}
if scheme.eq_ignore_ascii_case("apikey") {
return Some(AuthnCredentials::ApiKey(token.to_string()));
}
None
}
@ -274,16 +254,6 @@ mod tests {
));
}
#[test]
fn test_extract_api_key() {
let creds = extract_credentials_from_headers(Some("ApiKey secret-key"), None);
assert!(matches!(
creds,
Some(AuthnCredentials::ApiKey(k)) if k == "secret-key"
));
}
#[test]
fn test_extract_bearer_token_case_insensitive() {
let creds = extract_credentials_from_headers(Some("bearer abc123"), None);
@ -314,14 +284,4 @@ mod tests {
Some(AuthnCredentials::BearerToken(t)) if t == "abc123"
));
}
#[test]
fn test_extract_api_key_case_insensitive() {
let creds = extract_credentials_from_headers(Some("apikey secret-key"), None);
assert!(matches!(
creds,
Some(AuthnCredentials::ApiKey(k)) if k == "secret-key"
));
}
}

View file

@ -8,17 +8,21 @@ use std::time::Duration;
use iam_api::proto::{
iam_admin_client::IamAdminClient, iam_authz_client::IamAuthzClient,
iam_token_client::IamTokenClient, AuthorizeRequest, AuthzContext, CreateBindingRequest,
CreatePrincipalRequest, CreateRoleRequest, DeleteBindingRequest, GetPrincipalRequest,
GetRoleRequest, IssueTokenRequest, ListBindingsRequest, ListPrincipalsRequest,
ListRolesRequest, Principal as ProtoPrincipal, PrincipalKind as ProtoPrincipalKind,
PrincipalRef as ProtoPrincipalRef, ResourceRef as ProtoResourceRef, RevokeTokenRequest,
Scope as ProtoScope, ValidateTokenRequest,
CreateOrganizationRequest, CreatePrincipalRequest, CreateProjectRequest, CreateRoleRequest,
DeleteBindingRequest, DeleteOrganizationRequest, DeleteProjectRequest, GetOrganizationRequest,
GetPrincipalRequest, GetProjectRequest, GetRoleRequest, IssueTokenRequest, ListBindingsRequest,
ListOrganizationsRequest, ListPrincipalsRequest, ListProjectsRequest, ListRolesRequest,
Organization as ProtoOrganization, Principal as ProtoPrincipal,
PrincipalKind as ProtoPrincipalKind, PrincipalRef as ProtoPrincipalRef,
Project as ProtoProject, ResourceRef as ProtoResourceRef, RevokeTokenRequest,
Scope as ProtoScope, UpdateOrganizationRequest, UpdateProjectRequest, ValidateTokenRequest,
};
use iam_types::{
AuthMethod, Error, IamError, InternalTokenClaims, PolicyBinding, Principal,
PrincipalKind as TypesPrincipalKind, PrincipalRef, Resource, Result, Role, Scope,
AuthMethod, Error, IamError, InternalTokenClaims, Organization, PolicyBinding, Principal,
PrincipalKind as TypesPrincipalKind, PrincipalRef, Project, Resource, Result, Role, Scope,
};
use tonic::transport::{Channel, ClientTlsConfig, Endpoint};
use tonic::{metadata::MetadataValue, Request};
const TRANSIENT_RPC_RETRY_ATTEMPTS: usize = 3;
const TRANSIENT_RPC_INITIAL_BACKOFF: Duration = Duration::from_millis(200);
@ -33,6 +37,8 @@ pub struct IamClientConfig {
pub timeout_ms: u64,
/// Enable TLS
pub tls: bool,
/// Optional admin token for privileged APIs
pub admin_token: Option<String>,
}
impl IamClientConfig {
@ -42,6 +48,7 @@ impl IamClientConfig {
endpoint: endpoint.into(),
timeout_ms: 5000,
tls: true,
admin_token: load_admin_token_from_env(),
}
}
@ -56,11 +63,23 @@ impl IamClientConfig {
self.tls = false;
self
}
/// Set admin token for privileged APIs.
pub fn with_admin_token(mut self, admin_token: impl Into<String>) -> Self {
let token = admin_token.into();
self.admin_token = if token.trim().is_empty() {
None
} else {
Some(token)
};
self
}
}
/// IAM client
pub struct IamClient {
channel: Channel,
admin_token: Option<String>,
}
impl IamClient {
@ -90,7 +109,10 @@ impl IamClient {
.await
.map_err(|e| Error::Internal(e.to_string()))?;
Ok(Self { channel })
Ok(Self {
channel,
admin_token: config.admin_token,
})
}
fn authz_client(&self) -> IamAuthzClient<Channel> {
@ -105,6 +127,22 @@ impl IamClient {
IamTokenClient::new(self.channel.clone())
}
fn inject_admin_token<T>(&self, request: &mut Request<T>) {
let Some(token) = self.admin_token.as_ref() else {
return;
};
if let Ok(value) = MetadataValue::try_from(token.as_str()) {
request.metadata_mut().insert("x-iam-admin-token", value);
}
}
fn admin_request<T>(&self, message: T) -> Request<T> {
let mut request = Request::new(message);
self.inject_admin_token(&mut request);
request
}
async fn call_with_retry<T, F, Fut>(operation: &'static str, mut op: F) -> Result<T>
where
F: FnMut() -> Fut,
@ -219,7 +257,8 @@ impl IamClient {
let resp = Self::call_with_retry("create_principal", || {
let mut client = self.admin_client();
let req = req.clone();
async move { client.create_principal(req).await }
let request = self.admin_request(req);
async move { client.create_principal(request).await }
})
.await?
.into_inner();
@ -234,7 +273,8 @@ impl IamClient {
let resp = Self::call_with_retry("get_principal", || {
let mut client = self.admin_client();
let req = req.clone();
async move { client.get_principal(req).await }
let request = self.admin_request(req);
async move { client.get_principal(request).await }
})
.await;
match resp {
@ -249,13 +289,14 @@ impl IamClient {
&self,
id: &str,
name: &str,
org_id: &str,
project_id: &str,
) -> Result<Principal> {
let req = CreatePrincipalRequest {
id: id.into(),
kind: ProtoPrincipalKind::ServiceAccount as i32,
name: name.into(),
org_id: None,
org_id: Some(org_id.into()),
project_id: Some(project_id.into()),
email: None,
metadata: Default::default(),
@ -263,7 +304,8 @@ impl IamClient {
let resp = Self::call_with_retry("create_service_account", || {
let mut client = self.admin_client();
let req = req.clone();
async move { client.create_principal(req).await }
let request = self.admin_request(req);
async move { client.create_principal(request).await }
})
.await?
.into_inner();
@ -283,7 +325,8 @@ impl IamClient {
let resp = Self::call_with_retry("list_principals", || {
let mut client = self.admin_client();
let req = req.clone();
async move { client.list_principals(req).await }
let request = self.admin_request(req);
async move { client.list_principals(request).await }
})
.await?
.into_inner();
@ -295,6 +338,219 @@ impl IamClient {
.collect())
}
// ========================================================================
// Tenant Management APIs
// ========================================================================
/// Create an organization.
pub async fn create_organization(
&self,
id: &str,
name: &str,
description: &str,
) -> Result<Organization> {
let req = CreateOrganizationRequest {
id: id.into(),
name: name.into(),
description: description.into(),
metadata: Default::default(),
};
let resp = Self::call_with_retry("create_organization", || {
let mut client = self.admin_client();
let req = req.clone();
let request = self.admin_request(req);
async move { client.create_organization(request).await }
})
.await?
.into_inner();
Ok(ProtoOrganization::into(resp))
}
/// Get an organization by id.
pub async fn get_organization(&self, id: &str) -> Result<Option<Organization>> {
let req = GetOrganizationRequest { id: id.into() };
let resp = Self::call_with_retry("get_organization", || {
let mut client = self.admin_client();
let req = req.clone();
let request = self.admin_request(req);
async move { client.get_organization(request).await }
})
.await;
match resp {
Ok(r) => Ok(Some(r.into_inner().into())),
Err(Error::Internal(message)) if tonic_not_found(&message) => Ok(None),
Err(err) => Err(err),
}
}
/// List organizations.
pub async fn list_organizations(&self) -> Result<Vec<Organization>> {
let req = ListOrganizationsRequest {
include_disabled: false,
page_size: 0,
page_token: String::new(),
};
let resp = Self::call_with_retry("list_organizations", || {
let mut client = self.admin_client();
let req = req.clone();
let request = self.admin_request(req);
async move { client.list_organizations(request).await }
})
.await?
.into_inner();
Ok(resp.organizations.into_iter().map(Into::into).collect())
}
/// Update an organization.
pub async fn update_organization(
&self,
id: &str,
name: Option<&str>,
description: Option<&str>,
enabled: Option<bool>,
) -> Result<Organization> {
let req = UpdateOrganizationRequest {
id: id.into(),
name: name.map(str::to_string),
description: description.map(str::to_string),
metadata: Default::default(),
enabled,
};
let resp = Self::call_with_retry("update_organization", || {
let mut client = self.admin_client();
let req = req.clone();
let request = self.admin_request(req);
async move { client.update_organization(request).await }
})
.await?
.into_inner();
Ok(resp.into())
}
/// Delete an organization.
pub async fn delete_organization(&self, id: &str) -> Result<bool> {
let req = DeleteOrganizationRequest { id: id.into() };
let resp = Self::call_with_retry("delete_organization", || {
let mut client = self.admin_client();
let req = req.clone();
let request = self.admin_request(req);
async move { client.delete_organization(request).await }
})
.await?
.into_inner();
Ok(resp.deleted)
}
/// Create a project.
pub async fn create_project(
&self,
org_id: &str,
id: &str,
name: &str,
description: &str,
) -> Result<Project> {
let req = CreateProjectRequest {
id: id.into(),
org_id: org_id.into(),
name: name.into(),
description: description.into(),
metadata: Default::default(),
};
let resp = Self::call_with_retry("create_project", || {
let mut client = self.admin_client();
let req = req.clone();
let request = self.admin_request(req);
async move { client.create_project(request).await }
})
.await?
.into_inner();
Ok(ProtoProject::into(resp))
}
/// Get a project by org + id.
pub async fn get_project(&self, org_id: &str, id: &str) -> Result<Option<Project>> {
let req = GetProjectRequest {
org_id: org_id.into(),
id: id.into(),
};
let resp = Self::call_with_retry("get_project", || {
let mut client = self.admin_client();
let req = req.clone();
let request = self.admin_request(req);
async move { client.get_project(request).await }
})
.await;
match resp {
Ok(r) => Ok(Some(r.into_inner().into())),
Err(Error::Internal(message)) if tonic_not_found(&message) => Ok(None),
Err(err) => Err(err),
}
}
/// List projects, optionally filtered by organization.
pub async fn list_projects(&self, org_id: Option<&str>) -> Result<Vec<Project>> {
let req = ListProjectsRequest {
org_id: org_id.map(str::to_string),
include_disabled: false,
page_size: 0,
page_token: String::new(),
};
let resp = Self::call_with_retry("list_projects", || {
let mut client = self.admin_client();
let req = req.clone();
let request = self.admin_request(req);
async move { client.list_projects(request).await }
})
.await?
.into_inner();
Ok(resp.projects.into_iter().map(Into::into).collect())
}
/// Update a project.
pub async fn update_project(
&self,
org_id: &str,
id: &str,
name: Option<&str>,
description: Option<&str>,
enabled: Option<bool>,
) -> Result<Project> {
let req = UpdateProjectRequest {
org_id: org_id.into(),
id: id.into(),
name: name.map(str::to_string),
description: description.map(str::to_string),
metadata: Default::default(),
enabled,
};
let resp = Self::call_with_retry("update_project", || {
let mut client = self.admin_client();
let req = req.clone();
let request = self.admin_request(req);
async move { client.update_project(request).await }
})
.await?
.into_inner();
Ok(resp.into())
}
/// Delete a project.
pub async fn delete_project(&self, org_id: &str, id: &str) -> Result<bool> {
let req = DeleteProjectRequest {
org_id: org_id.into(),
id: id.into(),
};
let resp = Self::call_with_retry("delete_project", || {
let mut client = self.admin_client();
let req = req.clone();
let request = self.admin_request(req);
async move { client.delete_project(request).await }
})
.await?
.into_inner();
Ok(resp.deleted)
}
// ========================================================================
// Role Management APIs
// ========================================================================
@ -305,7 +561,8 @@ impl IamClient {
let resp = Self::call_with_retry("get_role", || {
let mut client = self.admin_client();
let req = req.clone();
async move { client.get_role(req).await }
let request = self.admin_request(req);
async move { client.get_role(request).await }
})
.await;
match resp {
@ -326,7 +583,8 @@ impl IamClient {
let resp = Self::call_with_retry("list_roles", || {
let mut client = self.admin_client();
let req = req.clone();
async move { client.list_roles(req).await }
let request = self.admin_request(req);
async move { client.list_roles(request).await }
})
.await?
.into_inner();
@ -351,7 +609,8 @@ impl IamClient {
let resp = Self::call_with_retry("create_role", || {
let mut client = self.admin_client();
let req = req.clone();
async move { client.create_role(req).await }
let request = self.admin_request(req);
async move { client.create_role(request).await }
})
.await?
.into_inner();
@ -375,7 +634,8 @@ impl IamClient {
let resp = Self::call_with_retry("create_binding", || {
let mut client = self.admin_client();
let req = req.clone();
async move { client.create_binding(req).await }
let request = self.admin_request(req);
async move { client.create_binding(request).await }
})
.await?
.into_inner();
@ -390,7 +650,8 @@ impl IamClient {
let resp = Self::call_with_retry("delete_binding", || {
let mut client = self.admin_client();
let req = req.clone();
async move { client.delete_binding(req).await }
let request = self.admin_request(req);
async move { client.delete_binding(request).await }
})
.await?
.into_inner();
@ -414,7 +675,8 @@ impl IamClient {
let resp = Self::call_with_retry("list_bindings_for_principal", || {
let mut client = self.admin_client();
let req = req.clone();
async move { client.list_bindings(req).await }
let request = self.admin_request(req);
async move { client.list_bindings(request).await }
})
.await?
.into_inner();
@ -435,7 +697,8 @@ impl IamClient {
let resp = Self::call_with_retry("list_bindings_for_scope", || {
let mut client = self.admin_client();
let req = req.clone();
async move { client.list_bindings(req).await }
let request = self.admin_request(req);
async move { client.list_bindings(request).await }
})
.await?
.into_inner();
@ -469,7 +732,8 @@ impl IamClient {
let resp = Self::call_with_retry("issue_token", || {
let mut client = self.token_client();
let req = req.clone();
async move { client.issue_token(req).await }
let request = self.admin_request(req);
async move { client.issue_token(request).await }
})
.await?
.into_inner();
@ -553,6 +817,14 @@ impl IamClient {
}
}
fn load_admin_token_from_env() -> Option<String> {
std::env::var("IAM_ADMIN_TOKEN")
.or_else(|_| std::env::var("PHOTON_IAM_ADMIN_TOKEN"))
.ok()
.map(|value| value.trim().to_string())
.filter(|value| !value.is_empty())
}
fn retry_delay(attempt: usize) -> Duration {
TRANSIENT_RPC_INITIAL_BACKOFF
.saturating_mul(1u32 << attempt.min(3))

View file

@ -27,8 +27,10 @@ use iam_api::{
use iam_authn::{InternalTokenConfig, InternalTokenService, SigningKey};
use iam_authz::{PolicyCache, PolicyCacheConfig, PolicyEvaluator};
use iam_store::{
Backend, BackendConfig, BindingStore, CredentialStore, PrincipalStore, RoleStore, TokenStore,
Backend, BackendConfig, BindingStore, CredentialStore, GroupStore, OrgStore, PrincipalStore,
ProjectStore, RoleStore, TokenStore,
};
use iam_types::{Organization, Project, Scope};
use config::{BackendKind, ServerConfig};
@ -62,6 +64,19 @@ fn load_admin_token() -> Option<String> {
.filter(|value| !value.is_empty())
}
fn allow_unauthenticated_admin() -> bool {
std::env::var("IAM_ALLOW_UNAUTHENTICATED_ADMIN")
.or_else(|_| std::env::var("PHOTON_IAM_ALLOW_UNAUTHENTICATED_ADMIN"))
.ok()
.map(|value| {
matches!(
value.trim().to_ascii_lowercase().as_str(),
"1" | "true" | "yes" | "y" | "on"
)
})
.unwrap_or(false)
}
fn admin_token_valid(metadata: &MetadataMap, token: &str) -> bool {
if let Some(value) = metadata.get("x-iam-admin-token") {
if let Ok(raw) = value.to_str() {
@ -86,6 +101,102 @@ fn admin_token_valid(metadata: &MetadataMap, token: &str) -> bool {
false
}
async fn backfill_tenant_registry(
principal_store: &Arc<PrincipalStore>,
binding_store: &Arc<BindingStore>,
org_store: &Arc<OrgStore>,
project_store: &Arc<ProjectStore>,
) -> Result<(), Box<dyn std::error::Error>> {
let mut principals = Vec::new();
for kind in [
iam_types::PrincipalKind::User,
iam_types::PrincipalKind::ServiceAccount,
iam_types::PrincipalKind::Group,
] {
principals.extend(principal_store.list_by_kind(&kind).await?);
}
for principal in principals {
match (principal.org_id.as_deref(), principal.project_id.as_deref()) {
(Some(org_id), Some(project_id)) => {
let mut org = Organization::new(org_id, org_id);
let mut project = Project::new(project_id, org_id, project_id);
let now = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap_or_default()
.as_secs();
org.created_at = now;
org.updated_at = now;
project.created_at = now;
project.updated_at = now;
org_store.create_if_missing(&org).await?;
project_store.create_if_missing(&project).await?;
}
(Some(org_id), None) => {
let mut org = Organization::new(org_id, org_id);
let now = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap_or_default()
.as_secs();
org.created_at = now;
org.updated_at = now;
org_store.create_if_missing(&org).await?;
}
(None, Some(project_id)) => {
warn!(
principal_id = %principal.id,
project_id = %project_id,
"service account missing org_id; tenant registry backfill skipped"
);
}
(None, None) => {}
}
}
for binding in binding_store.list_all().await? {
match binding.scope {
Scope::System => {}
Scope::Org { id } => {
let mut org = Organization::new(&id, &id);
org.created_at = now_ts();
org.updated_at = org.created_at;
org_store.create_if_missing(&org).await?;
}
Scope::Project { id, org_id } => {
let mut org = Organization::new(&org_id, &org_id);
let mut project = Project::new(&id, &org_id, &id);
org.created_at = now_ts();
org.updated_at = org.created_at;
project.created_at = now_ts();
project.updated_at = project.created_at;
org_store.create_if_missing(&org).await?;
project_store.create_if_missing(&project).await?;
}
Scope::Resource {
project_id, org_id, ..
} => {
let mut org = Organization::new(&org_id, &org_id);
let mut project = Project::new(&project_id, &org_id, &project_id);
org.created_at = now_ts();
org.updated_at = org.created_at;
project.created_at = now_ts();
project.updated_at = project.created_at;
org_store.create_if_missing(&org).await?;
project_store.create_if_missing(&project).await?;
}
}
}
Ok(())
}
fn now_ts() -> u64 {
SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap_or_default()
.as_secs()
}
/// IAM Server
#[derive(Parser, Debug)]
#[command(name = "iam-server")]
@ -195,10 +306,14 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
let binding_store = Arc::new(BindingStore::new(backend.clone()));
let credential_store = Arc::new(CredentialStore::new(backend.clone()));
let token_store = Arc::new(TokenStore::new(backend.clone()));
let group_store = Arc::new(GroupStore::new(backend.clone()));
let org_store = Arc::new(OrgStore::new(backend.clone()));
let project_store = Arc::new(ProjectStore::new(backend.clone()));
// Initialize builtin roles
info!("Initializing builtin roles...");
role_store.init_builtin_roles().await?;
backfill_tenant_registry(&principal_store, &binding_store, &org_store, &project_store).await?;
// Create policy cache
let cache_config = PolicyCacheConfig {
@ -210,9 +325,10 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
let cache = Arc::new(PolicyCache::new(cache_config));
// Create evaluator
let evaluator = Arc::new(PolicyEvaluator::new(
let evaluator = Arc::new(PolicyEvaluator::with_group_store(
binding_store.clone(),
role_store.clone(),
group_store.clone(),
cache,
));
@ -244,15 +360,21 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
let token_config =
InternalTokenConfig::new(signing_key.clone(), &config.authn.internal_token.issuer)
.with_default_ttl(Duration::from_secs(
config.authn.internal_token.default_ttl_seconds,
))
.with_max_ttl(Duration::from_secs(
config.authn.internal_token.max_ttl_seconds,
));
.with_default_ttl(Duration::from_secs(
config.authn.internal_token.default_ttl_seconds,
))
.with_max_ttl(Duration::from_secs(
config.authn.internal_token.max_ttl_seconds,
));
let token_service = Arc::new(InternalTokenService::new(token_config));
let admin_token = load_admin_token();
if admin_token.is_none() && !allow_unauthenticated_admin() {
return Err(
"IAM admin token not configured. Set IAM_ADMIN_TOKEN or explicitly allow dev mode with IAM_ALLOW_UNAUTHENTICATED_ADMIN=true."
.into(),
);
}
let credential_master_key = std::env::var("IAM_CRED_MASTER_KEY")
.ok()
.map(|value| value.into_bytes())
@ -270,6 +392,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
token_service.clone(),
principal_store.clone(),
token_store.clone(),
admin_token.clone(),
);
let gateway_auth_service = GatewayAuthServiceImpl::new(
token_service.clone(),
@ -277,17 +400,25 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
token_store.clone(),
evaluator.clone(),
);
let credential_service =
IamCredentialService::new(credential_store, &credential_master_key, "iam-cred-master")
.map_err(|e| format!("Failed to initialize credential service: {}", e))?;
let credential_service = IamCredentialService::new(
credential_store,
principal_store.clone(),
&credential_master_key,
"iam-cred-master",
admin_token.clone(),
)
.map_err(|e| format!("Failed to initialize credential service: {}", e))?;
let admin_service = IamAdminService::new(
principal_store.clone(),
role_store.clone(),
binding_store.clone(),
org_store.clone(),
project_store.clone(),
group_store.clone(),
)
.with_evaluator(evaluator.clone());
let admin_interceptor = AdminTokenInterceptor {
token: admin_token.map(Arc::new),
token: admin_token.clone().map(Arc::new),
};
if admin_interceptor.token.is_some() {
info!("IAM admin token authentication enabled");
@ -388,6 +519,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
let rest_state = rest::RestApiState {
server_addr: config.server.addr.to_string(),
tls_enabled: config.server.tls.is_some(),
admin_token: admin_token.clone(),
};
let rest_app = rest::build_router(rest_state);
let http_listener = tokio::net::TcpListener::bind(&http_addr).await?;
@ -465,20 +597,16 @@ async fn create_backend(
pd_endpoint,
namespace,
})
.await
.map_err(|e| e.into())
.await
.map_err(|e| e.into())
}
BackendKind::Postgres | BackendKind::Sqlite => {
let database_url = config
.store
.database_url
.as_deref()
.ok_or_else(|| {
format!(
"database_url is required when store.backend={}",
backend_kind_name(config.store.backend)
)
})?;
let database_url = config.store.database_url.as_deref().ok_or_else(|| {
format!(
"database_url is required when store.backend={}",
backend_kind_name(config.store.backend)
)
})?;
ensure_sql_backend_matches_url(config.store.backend, database_url)?;
info!(
"Using {} backend: {}",
@ -543,31 +671,29 @@ async fn register_chainfire_membership(
let value = format!(r#"{{"addr":"{}","ts":{}}}"#, addr, ts);
let deadline = tokio::time::Instant::now() + Duration::from_secs(120);
let mut attempt = 0usize;
let mut last_error = String::new();
loop {
let last_error = loop {
attempt += 1;
match ChainFireClient::connect(endpoint).await {
let error = match ChainFireClient::connect(endpoint).await {
Ok(mut client) => match client.put_str(&key, &value).await {
Ok(_) => return Ok(()),
Err(error) => last_error = format!("put failed: {}", error),
Err(error) => format!("put failed: {}", error),
},
Err(error) => last_error = format!("connect failed: {}", error),
}
Err(error) => format!("connect failed: {}", error),
};
if tokio::time::Instant::now() >= deadline {
break;
break error;
}
warn!(
attempt,
endpoint,
service,
error = %last_error,
error = %error,
"retrying ChainFire membership registration"
);
tokio::time::sleep(Duration::from_secs(2)).await;
}
};
Err(std::io::Error::other(format!(
"failed to register ChainFire membership for {} via {} after {} attempts: {}",

View file

@ -1,22 +1,13 @@
//! REST HTTP API handlers for IAM
//!
//! Implements REST endpoints as specified in T050.S4:
//! - POST /api/v1/auth/token - Issue token
//! - POST /api/v1/auth/verify - Verify token
//! - GET /api/v1/users - List users
//! - POST /api/v1/users - Create user
//! - GET /api/v1/projects - List projects
//! - POST /api/v1/projects - Create project
//! - GET /health - Health check
//! REST HTTP API handlers for IAM.
use axum::{
extract::State,
http::StatusCode,
extract::{Path, Query, State},
http::{HeaderMap, StatusCode},
routing::{get, post},
Json, Router,
};
use iam_client::client::{IamClient, IamClientConfig};
use iam_types::{Principal, PrincipalKind, PrincipalRef, Scope};
use iam_types::{Organization, Principal, PrincipalKind, PrincipalRef, Project, Scope};
use serde::{Deserialize, Serialize};
/// REST API state
@ -24,6 +15,7 @@ use serde::{Deserialize, Serialize};
pub struct RestApiState {
pub server_addr: String,
pub tls_enabled: bool,
pub admin_token: Option<String>,
}
/// Standard REST error response
@ -58,6 +50,9 @@ impl ResponseMeta {
fn iam_client_config(state: &RestApiState) -> IamClientConfig {
let mut config = IamClientConfig::new(&state.server_addr);
if let Some(token) = state.admin_token.as_deref() {
config = config.with_admin_token(token);
}
if !state.tls_enabled {
config = config.without_tls();
}
@ -80,7 +75,6 @@ impl<T> SuccessResponse<T> {
}
}
/// Token issuance request
#[derive(Debug, Deserialize)]
pub struct TokenRequest {
pub username: String,
@ -90,23 +84,20 @@ pub struct TokenRequest {
}
fn default_ttl() -> u64 {
3600 // 1 hour
3600
}
/// Token response
#[derive(Debug, Serialize)]
pub struct TokenResponse {
pub token: String,
pub expires_at: String,
}
/// Token verification request
#[derive(Debug, Deserialize)]
pub struct VerifyRequest {
pub token: String,
}
/// Token verification response
#[derive(Debug, Serialize)]
pub struct VerifyResponse {
pub valid: bool,
@ -115,19 +106,20 @@ pub struct VerifyResponse {
pub roles: Option<Vec<String>>,
}
/// User creation request
#[derive(Debug, Deserialize)]
pub struct CreateUserRequest {
pub id: String,
pub name: String,
}
/// User response
#[derive(Debug, Serialize)]
pub struct UserResponse {
pub id: String,
pub name: String,
pub kind: String,
pub org_id: Option<String>,
pub project_id: Option<String>,
pub enabled: bool,
}
impl From<Principal> for UserResponse {
@ -136,48 +128,131 @@ impl From<Principal> for UserResponse {
id: p.id,
name: p.name,
kind: format!("{:?}", p.kind),
org_id: p.org_id,
project_id: p.project_id,
enabled: p.enabled,
}
}
}
/// Users list response
#[derive(Debug, Serialize)]
pub struct UsersResponse {
pub users: Vec<UserResponse>,
}
/// Project creation request (placeholder)
#[derive(Debug, Deserialize)]
pub struct CreateOrganizationRequest {
pub id: String,
pub name: String,
#[serde(default)]
pub description: String,
}
#[derive(Debug, Deserialize)]
pub struct UpdateOrganizationRequest {
pub name: Option<String>,
pub description: Option<String>,
pub enabled: Option<bool>,
}
#[derive(Debug, Serialize)]
pub struct OrganizationResponse {
pub id: String,
pub name: String,
pub description: String,
pub enabled: bool,
}
impl From<Organization> for OrganizationResponse {
fn from(org: Organization) -> Self {
Self {
id: org.id,
name: org.name,
description: org.description,
enabled: org.enabled,
}
}
}
#[derive(Debug, Serialize)]
pub struct OrganizationsResponse {
pub organizations: Vec<OrganizationResponse>,
}
#[derive(Debug, Deserialize)]
pub struct CreateProjectRequest {
pub id: String,
pub org_id: String,
pub name: String,
#[serde(default)]
pub description: String,
}
#[derive(Debug, Deserialize)]
pub struct UpdateProjectRequest {
pub name: Option<String>,
pub description: Option<String>,
pub enabled: Option<bool>,
}
#[derive(Debug, Deserialize)]
pub struct ProjectsQuery {
pub org_id: Option<String>,
}
/// Project response (placeholder)
#[derive(Debug, Serialize)]
pub struct ProjectResponse {
pub id: String,
pub org_id: String,
pub name: String,
pub description: String,
pub enabled: bool,
}
impl From<Project> for ProjectResponse {
fn from(project: Project) -> Self {
Self {
id: project.id,
org_id: project.org_id,
name: project.name,
description: project.description,
enabled: project.enabled,
}
}
}
/// Projects list response (placeholder)
#[derive(Debug, Serialize)]
pub struct ProjectsResponse {
pub projects: Vec<ProjectResponse>,
}
/// Build the REST API router
pub fn build_router(state: RestApiState) -> Router {
Router::new()
.route("/api/v1/auth/token", post(issue_token))
.route("/api/v1/auth/verify", post(verify_token))
.route("/api/v1/users", get(list_users).post(create_user))
.route("/api/v1/users/{id}", get(get_user))
.route(
"/api/v1/orgs",
get(list_organizations).post(create_organization),
)
.route(
"/api/v1/orgs/{org_id}",
get(get_organization)
.patch(update_organization)
.delete(delete_organization),
)
.route("/api/v1/projects", get(list_projects).post(create_project))
.route(
"/api/v1/orgs/{org_id}/projects/{project_id}",
get(get_project)
.patch(update_project)
.delete(delete_project),
)
.route("/health", get(health_check))
.with_state(state)
}
/// Health check endpoint
async fn health_check() -> (StatusCode, Json<SuccessResponse<serde_json::Value>>) {
(
StatusCode::OK,
@ -187,11 +262,63 @@ async fn health_check() -> (StatusCode, Json<SuccessResponse<serde_json::Value>>
)
}
/// POST /api/v1/auth/token - Issue token
fn require_admin(
state: &RestApiState,
headers: &HeaderMap,
) -> Result<(), (StatusCode, Json<ErrorResponse>)> {
let Some(token) = state.admin_token.as_deref() else {
return Ok(());
};
if let Some(value) = headers.get("x-iam-admin-token") {
if let Ok(raw) = value.to_str() {
if raw.trim() == token {
return Ok(());
}
}
}
if let Some(value) = headers.get("authorization") {
if let Ok(raw) = value.to_str() {
let raw = raw.trim();
if let Some(rest) = raw
.strip_prefix("Bearer ")
.or_else(|| raw.strip_prefix("bearer "))
{
if rest.trim() == token {
return Ok(());
}
}
}
}
Err(error_response(
StatusCode::UNAUTHORIZED,
"ADMIN_TOKEN_REQUIRED",
"missing or invalid IAM admin token",
))
}
async fn connect_client(
state: &RestApiState,
) -> Result<IamClient, (StatusCode, Json<ErrorResponse>)> {
IamClient::connect(iam_client_config(state))
.await
.map_err(|e| {
error_response(
StatusCode::SERVICE_UNAVAILABLE,
"SERVICE_UNAVAILABLE",
&format!("Failed to connect: {}", e),
)
})
}
async fn issue_token(
headers: HeaderMap,
State(state): State<RestApiState>,
Json(req): Json<TokenRequest>,
) -> Result<Json<SuccessResponse<TokenResponse>>, (StatusCode, Json<ErrorResponse>)> {
require_admin(&state, &headers)?;
if !allow_insecure_rest_token_issue() {
return Err(error_response(
StatusCode::FORBIDDEN,
@ -206,16 +333,7 @@ async fn issue_token(
ttl_seconds,
} = req;
// Connect to IAM server
let config = iam_client_config(&state);
let client = IamClient::connect(config).await.map_err(|e| {
error_response(
StatusCode::SERVICE_UNAVAILABLE,
"SERVICE_UNAVAILABLE",
&format!("Failed to connect: {}", e),
)
})?;
let client = connect_client(&state).await?;
let principal_ref = PrincipalRef::new(PrincipalKind::User, &username);
let principal = match client.get_principal(&principal_ref).await.map_err(|e| {
error_response(
@ -242,7 +360,6 @@ async fn issue_token(
));
}
// Issue token
let token = client
.issue_token(&principal, vec![], Scope::System, ttl_seconds)
.await
@ -275,22 +392,11 @@ fn allow_insecure_rest_token_issue() -> bool {
.unwrap_or(false)
}
/// POST /api/v1/auth/verify - Verify token
async fn verify_token(
State(state): State<RestApiState>,
Json(req): Json<VerifyRequest>,
) -> Result<Json<SuccessResponse<VerifyResponse>>, (StatusCode, Json<ErrorResponse>)> {
// Connect to IAM server
let config = iam_client_config(&state);
let client = IamClient::connect(config).await.map_err(|e| {
error_response(
StatusCode::SERVICE_UNAVAILABLE,
"SERVICE_UNAVAILABLE",
&format!("Failed to connect: {}", e),
)
})?;
// Validate token
let client = connect_client(&state).await?;
let result = client.validate_token(&req.token).await;
match result {
@ -309,22 +415,13 @@ async fn verify_token(
}
}
/// POST /api/v1/users - Create user
async fn create_user(
headers: HeaderMap,
State(state): State<RestApiState>,
Json(req): Json<CreateUserRequest>,
) -> Result<(StatusCode, Json<SuccessResponse<UserResponse>>), (StatusCode, Json<ErrorResponse>)> {
// Connect to IAM server
let config = iam_client_config(&state);
let client = IamClient::connect(config).await.map_err(|e| {
error_response(
StatusCode::SERVICE_UNAVAILABLE,
"SERVICE_UNAVAILABLE",
&format!("Failed to connect: {}", e),
)
})?;
// Create user
require_admin(&state, &headers)?;
let client = connect_client(&state).await?;
let principal = client.create_user(&req.id, &req.name).await.map_err(|e| {
error_response(
StatusCode::INTERNAL_SERVER_ERROR,
@ -339,21 +436,12 @@ async fn create_user(
))
}
/// GET /api/v1/users - List users
async fn list_users(
headers: HeaderMap,
State(state): State<RestApiState>,
) -> Result<Json<SuccessResponse<UsersResponse>>, (StatusCode, Json<ErrorResponse>)> {
// Connect to IAM server
let config = iam_client_config(&state);
let client = IamClient::connect(config).await.map_err(|e| {
error_response(
StatusCode::SERVICE_UNAVAILABLE,
"SERVICE_UNAVAILABLE",
&format!("Failed to connect: {}", e),
)
})?;
// List users
require_admin(&state, &headers)?;
let client = connect_client(&state).await?;
let principals = client.list_users().await.map_err(|e| {
error_response(
StatusCode::INTERNAL_SERVER_ERROR,
@ -363,45 +451,265 @@ async fn list_users(
})?;
let users: Vec<UserResponse> = principals.into_iter().map(UserResponse::from).collect();
Ok(Json(SuccessResponse::new(UsersResponse { users })))
}
/// GET /api/v1/projects - List projects (placeholder)
async fn list_projects(
State(_state): State<RestApiState>,
) -> Result<Json<SuccessResponse<ProjectsResponse>>, (StatusCode, Json<ErrorResponse>)> {
// Project management not yet implemented in IAM
// Return placeholder response
Ok(Json(SuccessResponse::new(ProjectsResponse {
projects: vec![ProjectResponse {
id: "(placeholder)".to_string(),
name: "Project management via REST not yet implemented - use gRPC IamAdminService for scope/binding management".to_string(),
}],
async fn get_user(
headers: HeaderMap,
Path(id): Path<String>,
State(state): State<RestApiState>,
) -> Result<Json<SuccessResponse<UserResponse>>, (StatusCode, Json<ErrorResponse>)> {
require_admin(&state, &headers)?;
let client = connect_client(&state).await?;
let principal = client
.get_principal(&PrincipalRef::user(id))
.await
.map_err(|e| {
error_response(
StatusCode::INTERNAL_SERVER_ERROR,
"USER_LOOKUP_FAILED",
&e.to_string(),
)
})?
.ok_or_else(|| error_response(StatusCode::NOT_FOUND, "USER_NOT_FOUND", "user not found"))?;
Ok(Json(SuccessResponse::new(UserResponse::from(principal))))
}
async fn create_organization(
headers: HeaderMap,
State(state): State<RestApiState>,
Json(req): Json<CreateOrganizationRequest>,
) -> Result<
(StatusCode, Json<SuccessResponse<OrganizationResponse>>),
(StatusCode, Json<ErrorResponse>),
> {
require_admin(&state, &headers)?;
let client = connect_client(&state).await?;
let org = client
.create_organization(&req.id, &req.name, &req.description)
.await
.map_err(|e| {
error_response(
StatusCode::INTERNAL_SERVER_ERROR,
"ORG_CREATE_FAILED",
&e.to_string(),
)
})?;
Ok((StatusCode::CREATED, Json(SuccessResponse::new(org.into()))))
}
async fn list_organizations(
headers: HeaderMap,
State(state): State<RestApiState>,
) -> Result<Json<SuccessResponse<OrganizationsResponse>>, (StatusCode, Json<ErrorResponse>)> {
require_admin(&state, &headers)?;
let client = connect_client(&state).await?;
let organizations = client.list_organizations().await.map_err(|e| {
error_response(
StatusCode::INTERNAL_SERVER_ERROR,
"ORG_LIST_FAILED",
&e.to_string(),
)
})?;
Ok(Json(SuccessResponse::new(OrganizationsResponse {
organizations: organizations.into_iter().map(Into::into).collect(),
})))
}
/// POST /api/v1/projects - Create project (placeholder)
async fn get_organization(
headers: HeaderMap,
Path(org_id): Path<String>,
State(state): State<RestApiState>,
) -> Result<Json<SuccessResponse<OrganizationResponse>>, (StatusCode, Json<ErrorResponse>)> {
require_admin(&state, &headers)?;
let client = connect_client(&state).await?;
let organization = client
.get_organization(&org_id)
.await
.map_err(|e| {
error_response(
StatusCode::INTERNAL_SERVER_ERROR,
"ORG_LOOKUP_FAILED",
&e.to_string(),
)
})?
.ok_or_else(|| {
error_response(
StatusCode::NOT_FOUND,
"ORG_NOT_FOUND",
"organization not found",
)
})?;
Ok(Json(SuccessResponse::new(organization.into())))
}
async fn update_organization(
headers: HeaderMap,
Path(org_id): Path<String>,
State(state): State<RestApiState>,
Json(req): Json<UpdateOrganizationRequest>,
) -> Result<Json<SuccessResponse<OrganizationResponse>>, (StatusCode, Json<ErrorResponse>)> {
require_admin(&state, &headers)?;
let client = connect_client(&state).await?;
let organization = client
.update_organization(
&org_id,
req.name.as_deref(),
req.description.as_deref(),
req.enabled,
)
.await
.map_err(|e| {
error_response(
StatusCode::INTERNAL_SERVER_ERROR,
"ORG_UPDATE_FAILED",
&e.to_string(),
)
})?;
Ok(Json(SuccessResponse::new(organization.into())))
}
async fn delete_organization(
headers: HeaderMap,
Path(org_id): Path<String>,
State(state): State<RestApiState>,
) -> Result<Json<SuccessResponse<serde_json::Value>>, (StatusCode, Json<ErrorResponse>)> {
require_admin(&state, &headers)?;
let client = connect_client(&state).await?;
let deleted = client.delete_organization(&org_id).await.map_err(|e| {
error_response(
StatusCode::INTERNAL_SERVER_ERROR,
"ORG_DELETE_FAILED",
&e.to_string(),
)
})?;
Ok(Json(SuccessResponse::new(
serde_json::json!({ "deleted": deleted }),
)))
}
async fn create_project(
State(_state): State<RestApiState>,
headers: HeaderMap,
State(state): State<RestApiState>,
Json(req): Json<CreateProjectRequest>,
) -> Result<(StatusCode, Json<SuccessResponse<ProjectResponse>>), (StatusCode, Json<ErrorResponse>)>
{
// Project management not yet implemented in IAM
// Return placeholder response
require_admin(&state, &headers)?;
let client = connect_client(&state).await?;
let project = client
.create_project(&req.org_id, &req.id, &req.name, &req.description)
.await
.map_err(|e| {
error_response(
StatusCode::INTERNAL_SERVER_ERROR,
"PROJECT_CREATE_FAILED",
&e.to_string(),
)
})?;
Ok((
StatusCode::NOT_IMPLEMENTED,
Json(SuccessResponse::new(ProjectResponse {
id: req.id,
name: format!(
"Project '{}' - management via REST not yet implemented",
req.name
),
})),
StatusCode::CREATED,
Json(SuccessResponse::new(project.into())),
))
}
/// Helper to create error response
async fn list_projects(
headers: HeaderMap,
Query(query): Query<ProjectsQuery>,
State(state): State<RestApiState>,
) -> Result<Json<SuccessResponse<ProjectsResponse>>, (StatusCode, Json<ErrorResponse>)> {
require_admin(&state, &headers)?;
let client = connect_client(&state).await?;
let projects = client
.list_projects(query.org_id.as_deref())
.await
.map_err(|e| {
error_response(
StatusCode::INTERNAL_SERVER_ERROR,
"PROJECT_LIST_FAILED",
&e.to_string(),
)
})?;
Ok(Json(SuccessResponse::new(ProjectsResponse {
projects: projects.into_iter().map(Into::into).collect(),
})))
}
async fn get_project(
headers: HeaderMap,
Path((org_id, project_id)): Path<(String, String)>,
State(state): State<RestApiState>,
) -> Result<Json<SuccessResponse<ProjectResponse>>, (StatusCode, Json<ErrorResponse>)> {
require_admin(&state, &headers)?;
let client = connect_client(&state).await?;
let project = client
.get_project(&org_id, &project_id)
.await
.map_err(|e| {
error_response(
StatusCode::INTERNAL_SERVER_ERROR,
"PROJECT_LOOKUP_FAILED",
&e.to_string(),
)
})?
.ok_or_else(|| {
error_response(
StatusCode::NOT_FOUND,
"PROJECT_NOT_FOUND",
"project not found",
)
})?;
Ok(Json(SuccessResponse::new(project.into())))
}
async fn update_project(
headers: HeaderMap,
Path((org_id, project_id)): Path<(String, String)>,
State(state): State<RestApiState>,
Json(req): Json<UpdateProjectRequest>,
) -> Result<Json<SuccessResponse<ProjectResponse>>, (StatusCode, Json<ErrorResponse>)> {
require_admin(&state, &headers)?;
let client = connect_client(&state).await?;
let project = client
.update_project(
&org_id,
&project_id,
req.name.as_deref(),
req.description.as_deref(),
req.enabled,
)
.await
.map_err(|e| {
error_response(
StatusCode::INTERNAL_SERVER_ERROR,
"PROJECT_UPDATE_FAILED",
&e.to_string(),
)
})?;
Ok(Json(SuccessResponse::new(project.into())))
}
async fn delete_project(
headers: HeaderMap,
Path((org_id, project_id)): Path<(String, String)>,
State(state): State<RestApiState>,
) -> Result<Json<SuccessResponse<serde_json::Value>>, (StatusCode, Json<ErrorResponse>)> {
require_admin(&state, &headers)?;
let client = connect_client(&state).await?;
let deleted = client
.delete_project(&org_id, &project_id)
.await
.map_err(|e| {
error_response(
StatusCode::INTERNAL_SERVER_ERROR,
"PROJECT_DELETE_FAILED",
&e.to_string(),
)
})?;
Ok(Json(SuccessResponse::new(
serde_json::json!({ "deleted": deleted }),
)))
}
fn error_response(
status: StatusCode,
code: &str,

View file

@ -94,10 +94,7 @@ impl AuthService {
}
/// Authenticate an HTTP request using headers.
pub async fn authenticate_headers(
&self,
headers: &HeaderMap,
) -> Result<TenantContext, Status> {
pub async fn authenticate_headers(&self, headers: &HeaderMap) -> Result<TenantContext, Status> {
let token = extract_token_from_headers(headers)?;
self.authenticate_token(&token).await
}
@ -110,11 +107,18 @@ impl AuthService {
resource: &Resource,
) -> Result<(), Status> {
let mut principal = match tenant.principal_kind {
PrincipalKind::User => Principal::new_user(&tenant.principal_id, &tenant.principal_name),
PrincipalKind::ServiceAccount => {
Principal::new_service_account(&tenant.principal_id, &tenant.principal_name, &tenant.project_id)
PrincipalKind::User => {
Principal::new_user(&tenant.principal_id, &tenant.principal_name)
}
PrincipalKind::ServiceAccount => Principal::new_service_account(
&tenant.principal_id,
&tenant.principal_name,
&tenant.org_id,
&tenant.project_id,
),
PrincipalKind::Group => {
Principal::new_group(&tenant.principal_id, &tenant.principal_name)
}
PrincipalKind::Group => Principal::new_group(&tenant.principal_id, &tenant.principal_name),
};
principal.org_id = Some(tenant.org_id.clone());
@ -135,14 +139,10 @@ impl AuthService {
return Ok(cached);
}
let claims = self
.iam_client
.validate_token(token)
.await
.map_err(|e| {
warn!("Token validation failed: {}", e);
Status::unauthenticated(format!("Invalid token: {}", e))
})?;
let claims = self.iam_client.validate_token(token).await.map_err(|e| {
warn!("Token validation failed: {}", e);
Status::unauthenticated(format!("Invalid token: {}", e))
})?;
let org_id = claims
.org_id

View file

@ -9,7 +9,9 @@ pub mod backend;
pub mod binding_store;
pub mod credential_store;
pub mod group_store;
pub mod org_store;
pub mod principal_store;
pub mod project_store;
pub mod role_store;
pub mod token_store;
@ -17,6 +19,8 @@ pub use backend::{Backend, BackendConfig, CasResult, KvPair, StorageBackend};
pub use binding_store::BindingStore;
pub use credential_store::CredentialStore;
pub use group_store::GroupStore;
pub use org_store::OrgStore;
pub use principal_store::PrincipalStore;
pub use project_store::ProjectStore;
pub use role_store::RoleStore;
pub use token_store::TokenStore;

View file

@ -0,0 +1,109 @@
//! Organization storage.
use std::sync::Arc;
use iam_types::{Error, IamError, Organization, Result};
use crate::backend::{Backend, CasResult, JsonStore, StorageBackend};
mod keys {
pub const ORGS: &str = "iam/orgs/";
}
pub struct OrgStore {
backend: Arc<Backend>,
}
impl JsonStore for OrgStore {
fn backend(&self) -> &Backend {
&self.backend
}
}
impl OrgStore {
pub fn new(backend: Arc<Backend>) -> Self {
Self { backend }
}
pub async fn create(&self, org: &Organization) -> Result<u64> {
let key = self.primary_key(&org.id);
let bytes = serde_json::to_vec(org).map_err(|e| Error::Serialization(e.to_string()))?;
match self.backend.cas(key.as_bytes(), 0, &bytes).await? {
CasResult::Success(version) => Ok(version),
CasResult::Conflict { .. } => {
Err(Error::Iam(IamError::OrganizationAlreadyExists(org.id.clone())))
}
CasResult::NotFound => Err(Error::Internal("Unexpected CAS result".into())),
}
}
pub async fn create_if_missing(&self, org: &Organization) -> Result<bool> {
let key = self.primary_key(&org.id);
let bytes = serde_json::to_vec(org).map_err(|e| Error::Serialization(e.to_string()))?;
match self.backend.cas(key.as_bytes(), 0, &bytes).await? {
CasResult::Success(_) => Ok(true),
CasResult::Conflict { .. } => Ok(false),
CasResult::NotFound => Err(Error::Internal("Unexpected CAS result".into())),
}
}
pub async fn get(&self, id: &str) -> Result<Option<Organization>> {
Ok(self.get_json::<Organization>(self.primary_key(id).as_bytes()).await?.map(|v| v.0))
}
pub async fn get_with_version(&self, id: &str) -> Result<Option<(Organization, u64)>> {
self.get_json::<Organization>(self.primary_key(id).as_bytes()).await
}
pub async fn update(&self, org: &Organization, expected_version: u64) -> Result<u64> {
let key = self.primary_key(&org.id);
let bytes = serde_json::to_vec(org).map_err(|e| Error::Serialization(e.to_string()))?;
match self.backend.cas(key.as_bytes(), expected_version, &bytes).await? {
CasResult::Success(version) => Ok(version),
CasResult::Conflict { expected, actual } => {
Err(Error::Storage(iam_types::StorageError::CasConflict { expected, actual }))
}
CasResult::NotFound => Err(Error::Iam(IamError::OrganizationNotFound(org.id.clone()))),
}
}
pub async fn delete(&self, id: &str) -> Result<bool> {
self.backend.delete(self.primary_key(id).as_bytes()).await
}
pub async fn list(&self) -> Result<Vec<Organization>> {
let pairs = self.backend.scan_prefix(keys::ORGS.as_bytes(), 10_000).await?;
let mut orgs = Vec::new();
for pair in pairs {
let org: Organization = serde_json::from_slice(&pair.value)
.map_err(|e| Error::Serialization(e.to_string()))?;
orgs.push(org);
}
Ok(orgs)
}
pub async fn exists(&self, id: &str) -> Result<bool> {
Ok(self.backend.get(self.primary_key(id).as_bytes()).await?.is_some())
}
fn primary_key(&self, id: &str) -> String {
format!("{}{id}", keys::ORGS)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test]
async fn crud_roundtrip() {
let backend = Arc::new(Backend::memory());
let store = OrgStore::new(backend);
let org = Organization::new("org-1", "Org 1");
store.create(&org).await.unwrap();
let fetched = store.get("org-1").await.unwrap().unwrap();
assert_eq!(fetched.name, "Org 1");
assert!(store.delete("org-1").await.unwrap());
assert!(store.get("org-1").await.unwrap().is_none());
}
}

View file

@ -19,9 +19,13 @@ mod keys {
pub const BY_ORG: &str = "iam/principals/by-org/";
/// Secondary index: by project (for service accounts)
/// Format: iam/principals/by-project/{project_id}/{id}
/// Format: iam/principals/by-project/{project_id}/{org_id}/{id}
pub const BY_PROJECT: &str = "iam/principals/by-project/";
/// Secondary index: by tenant (for service accounts)
/// Format: iam/principals/by-tenant/{org_id}/{project_id}/{id}
pub const BY_TENANT: &str = "iam/principals/by-tenant/";
/// Secondary index: by email
/// Format: iam/principals/by-email/{email}
pub const BY_EMAIL: &str = "iam/principals/by-email/";
@ -213,6 +217,23 @@ impl PrincipalStore {
Ok(principals)
}
/// List service accounts by organization + project.
pub async fn list_by_tenant(&self, org_id: &str, project_id: &str) -> Result<Vec<Principal>> {
let prefix = format!("{}{}/{}/", keys::BY_TENANT, org_id, project_id);
let pairs = self.backend.scan_prefix(prefix.as_bytes(), 10000).await?;
let mut principals = Vec::new();
for pair in pairs {
let principal_ref: PrincipalRef = serde_json::from_slice(&pair.value)
.map_err(|e| Error::Serialization(e.to_string()))?;
if let Some(principal) = self.get(&principal_ref).await? {
principals.push(principal);
}
}
Ok(principals)
}
/// Check if a principal exists
pub async fn exists(&self, principal_ref: &PrincipalRef) -> Result<bool> {
let key = self.make_primary_key(&principal_ref.kind, &principal_ref.id);
@ -251,9 +272,24 @@ impl PrincipalStore {
}
// Project index (for service accounts)
if let Some(project_id) = &principal.project_id {
let key = format!("{}{}/{}", keys::BY_PROJECT, project_id, principal.id);
if let (Some(org_id), Some(project_id)) = (&principal.org_id, &principal.project_id) {
let key = format!(
"{}{}/{}/{}",
keys::BY_PROJECT,
project_id,
org_id,
principal.id
);
self.backend.put(key.as_bytes(), &ref_bytes).await?;
let tenant_key = format!(
"{}{}/{}/{}",
keys::BY_TENANT,
org_id,
project_id,
principal.id
);
self.backend.put(tenant_key.as_bytes(), &ref_bytes).await?;
}
// Email index
@ -289,9 +325,24 @@ impl PrincipalStore {
}
// Project index
if let Some(project_id) = &principal.project_id {
let key = format!("{}{}/{}", keys::BY_PROJECT, project_id, principal.id);
if let (Some(org_id), Some(project_id)) = (&principal.org_id, &principal.project_id) {
let key = format!(
"{}{}/{}/{}",
keys::BY_PROJECT,
project_id,
org_id,
principal.id
);
self.backend.delete(key.as_bytes()).await?;
let tenant_key = format!(
"{}{}/{}/{}",
keys::BY_TENANT,
org_id,
project_id,
principal.id
);
self.backend.delete(tenant_key.as_bytes()).await?;
}
// Email index
@ -356,7 +407,8 @@ mod tests {
async fn test_service_account() {
let store = PrincipalStore::new(test_backend());
let sa = Principal::new_service_account("compute-agent", "Compute Agent", "proj-1");
let sa =
Principal::new_service_account("compute-agent", "Compute Agent", "org-1", "proj-1");
store.create(&sa).await.unwrap();
@ -364,6 +416,10 @@ mod tests {
let sas = store.list_by_project("proj-1").await.unwrap();
assert_eq!(sas.len(), 1);
assert_eq!(sas[0].id, "compute-agent");
let tenant = store.list_by_tenant("org-1", "proj-1").await.unwrap();
assert_eq!(tenant.len(), 1);
assert_eq!(tenant[0].id, "compute-agent");
}
#[tokio::test]
@ -382,14 +438,15 @@ mod tests {
store.update(&principal, version).await.unwrap();
// Old indexes should be cleared
assert!(store.get_by_email("alice@example.com").await.unwrap().is_none());
assert!(store
.get_by_email("alice@example.com")
.await
.unwrap()
.is_none());
assert!(store.list_by_org("org-1").await.unwrap().is_empty());
// New indexes should exist
let fetched = store
.get_by_email("alice+new@example.com")
.await
.unwrap();
let fetched = store.get_by_email("alice+new@example.com").await.unwrap();
assert!(fetched.is_some());
assert_eq!(fetched.unwrap().id, "alice");
let org2 = store.list_by_org("org-2").await.unwrap();
@ -409,7 +466,9 @@ mod tests {
.await
.unwrap();
store
.create(&Principal::new_service_account("sa1", "SA 1", "proj-1"))
.create(&Principal::new_service_account(
"sa1", "SA 1", "org-1", "proj-1",
))
.await
.unwrap();

View file

@ -0,0 +1,174 @@
//! Project storage.
use std::sync::Arc;
use iam_types::{Error, IamError, Project, Result};
use crate::backend::{Backend, CasResult, JsonStore, StorageBackend};
mod keys {
pub const PROJECTS: &str = "iam/projects/";
pub const BY_ORG: &str = "iam/projects/by-org/";
}
pub struct ProjectStore {
backend: Arc<Backend>,
}
impl JsonStore for ProjectStore {
fn backend(&self) -> &Backend {
&self.backend
}
}
impl ProjectStore {
pub fn new(backend: Arc<Backend>) -> Self {
Self { backend }
}
pub async fn create(&self, project: &Project) -> Result<u64> {
let key = self.primary_key(&project.org_id, &project.id);
let bytes =
serde_json::to_vec(project).map_err(|e| Error::Serialization(e.to_string()))?;
match self.backend.cas(key.as_bytes(), 0, &bytes).await? {
CasResult::Success(version) => {
self.create_indexes(project).await?;
Ok(version)
}
CasResult::Conflict { .. } => Err(Error::Iam(IamError::ProjectAlreadyExists(
project.key(),
))),
CasResult::NotFound => Err(Error::Internal("Unexpected CAS result".into())),
}
}
pub async fn create_if_missing(&self, project: &Project) -> Result<bool> {
let key = self.primary_key(&project.org_id, &project.id);
let bytes =
serde_json::to_vec(project).map_err(|e| Error::Serialization(e.to_string()))?;
match self.backend.cas(key.as_bytes(), 0, &bytes).await? {
CasResult::Success(_) => {
self.create_indexes(project).await?;
Ok(true)
}
CasResult::Conflict { .. } => Ok(false),
CasResult::NotFound => Err(Error::Internal("Unexpected CAS result".into())),
}
}
pub async fn get(&self, org_id: &str, id: &str) -> Result<Option<Project>> {
Ok(self
.get_json::<Project>(self.primary_key(org_id, id).as_bytes())
.await?
.map(|v| v.0))
}
pub async fn get_with_version(&self, org_id: &str, id: &str) -> Result<Option<(Project, u64)>> {
self.get_json::<Project>(self.primary_key(org_id, id).as_bytes())
.await
}
pub async fn update(&self, project: &Project, expected_version: u64) -> Result<u64> {
let key = self.primary_key(&project.org_id, &project.id);
let bytes =
serde_json::to_vec(project).map_err(|e| Error::Serialization(e.to_string()))?;
match self.backend.cas(key.as_bytes(), expected_version, &bytes).await? {
CasResult::Success(version) => {
self.create_indexes(project).await?;
Ok(version)
}
CasResult::Conflict { expected, actual } => {
Err(Error::Storage(iam_types::StorageError::CasConflict { expected, actual }))
}
CasResult::NotFound => Err(Error::Iam(IamError::ProjectNotFound(project.key()))),
}
}
pub async fn delete(&self, org_id: &str, id: &str) -> Result<bool> {
if let Some(project) = self.get(org_id, id).await? {
let deleted = self.backend.delete(self.primary_key(org_id, id).as_bytes()).await?;
if deleted {
self.delete_indexes(&project).await?;
}
Ok(deleted)
} else {
Ok(false)
}
}
pub async fn list(&self) -> Result<Vec<Project>> {
let pairs = self.backend.scan_prefix(keys::PROJECTS.as_bytes(), 10_000).await?;
let mut projects = Vec::new();
for pair in pairs {
if String::from_utf8_lossy(&pair.key).starts_with(keys::BY_ORG) {
continue;
}
let project: Project = serde_json::from_slice(&pair.value)
.map_err(|e| Error::Serialization(e.to_string()))?;
projects.push(project);
}
Ok(projects)
}
pub async fn list_by_org(&self, org_id: &str) -> Result<Vec<Project>> {
let prefix = format!("{}{}/", keys::BY_ORG, org_id);
let pairs = self.backend.scan_prefix(prefix.as_bytes(), 10_000).await?;
let mut projects = Vec::new();
for pair in pairs {
let project_id = String::from_utf8_lossy(&pair.value).to_string();
if let Some(project) = self.get(org_id, &project_id).await? {
projects.push(project);
}
}
Ok(projects)
}
pub async fn exists(&self, org_id: &str, id: &str) -> Result<bool> {
Ok(self
.backend
.get(self.primary_key(org_id, id).as_bytes())
.await?
.is_some())
}
fn primary_key(&self, org_id: &str, id: &str) -> String {
format!("{}{}/{}", keys::PROJECTS, org_id, id)
}
async fn create_indexes(&self, project: &Project) -> Result<()> {
let key = format!("{}{}/{}", keys::BY_ORG, project.org_id, project.id);
self.backend.put(key.as_bytes(), project.id.as_bytes()).await?;
Ok(())
}
async fn delete_indexes(&self, project: &Project) -> Result<()> {
let key = format!("{}{}/{}", keys::BY_ORG, project.org_id, project.id);
self.backend.delete(key.as_bytes()).await?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test]
async fn list_by_org_is_isolated() {
let backend = Arc::new(Backend::memory());
let store = ProjectStore::new(backend);
store
.create(&Project::new("proj-1", "org-1", "Project 1"))
.await
.unwrap();
store
.create(&Project::new("proj-1", "org-2", "Project 1"))
.await
.unwrap();
let org1 = store.list_by_org("org-1").await.unwrap();
let org2 = store.list_by_org("org-2").await.unwrap();
assert_eq!(org1.len(), 1);
assert_eq!(org2.len(), 1);
assert_eq!(org1[0].org_id, "org-1");
assert_eq!(org2[0].org_id, "org-2");
}
}

View file

@ -48,6 +48,14 @@ pub enum IamError {
#[error("Role not found: {0}")]
RoleNotFound(String),
/// Organization not found
#[error("Organization not found: {0}")]
OrganizationNotFound(String),
/// Project not found
#[error("Project not found: {0}")]
ProjectNotFound(String),
/// Binding not found
#[error("Binding not found: {0}")]
BindingNotFound(String),
@ -84,6 +92,14 @@ pub enum IamError {
#[error("Role already exists: {0}")]
RoleAlreadyExists(String),
/// Organization already exists
#[error("Organization already exists: {0}")]
OrganizationAlreadyExists(String),
/// Project already exists
#[error("Project already exists: {0}")]
ProjectAlreadyExists(String),
/// Cannot modify builtin role
#[error("Cannot modify builtin role: {0}")]
CannotModifyBuiltinRole(String),

View file

@ -17,6 +17,7 @@ pub mod principal;
pub mod resource;
pub mod role;
pub mod scope;
pub mod tenant;
pub mod token;
pub use condition::{Condition, ConditionExpr};
@ -27,6 +28,7 @@ pub use principal::{Principal, PrincipalKind, PrincipalRef};
pub use resource::{Resource, ResourceRef};
pub use role::{builtin as builtin_roles, Permission, Role};
pub use scope::Scope;
pub use tenant::{Organization, Project};
pub use token::{
AuthMethod, InternalTokenClaims, JwtClaims, TokenMetadata, TokenType, TokenValidationError,
};

View file

@ -104,13 +104,14 @@ impl Principal {
pub fn new_service_account(
id: impl Into<String>,
name: impl Into<String>,
org_id: impl Into<String>,
project_id: impl Into<String>,
) -> Self {
Self {
id: id.into(),
kind: PrincipalKind::ServiceAccount,
name: name.into(),
org_id: None,
org_id: Some(org_id.into()),
project_id: Some(project_id.into()),
email: None,
oidc_sub: None,

View file

@ -155,6 +155,63 @@ impl Scope {
}
}
/// Check whether this scope can be applied to another scope.
///
/// Unlike `contains`, this matcher understands `*` wildcard segments.
/// It is intended for validating role applicability at bind time.
pub fn applies_to(&self, other: &Scope) -> bool {
match (self, other) {
(Scope::System, _) => true,
(Scope::Org { id }, Scope::Org { id: other_id }) => segment_matches(id, other_id),
(
Scope::Org { id },
Scope::Project {
org_id: other_org_id,
..
},
) => segment_matches(id, other_org_id),
(
Scope::Org { id },
Scope::Resource {
org_id: other_org_id,
..
},
) => segment_matches(id, other_org_id),
(
Scope::Project { id, org_id },
Scope::Project {
id: other_id,
org_id: other_org_id,
},
) => segment_matches(org_id, other_org_id) && segment_matches(id, other_id),
(
Scope::Project { id, org_id },
Scope::Resource {
project_id: other_project_id,
org_id: other_org_id,
..
},
) => segment_matches(org_id, other_org_id) && segment_matches(id, other_project_id),
(
Scope::Resource {
id,
project_id,
org_id,
},
Scope::Resource {
id: other_id,
project_id: other_project_id,
org_id: other_org_id,
},
) => {
segment_matches(org_id, other_org_id)
&& segment_matches(project_id, other_project_id)
&& segment_matches(id, other_id)
}
_ => false,
}
}
/// Get the parent scope, if any
///
/// - System has no parent
@ -276,6 +333,10 @@ impl Scope {
}
}
fn segment_matches(pattern: &str, value: &str) -> bool {
pattern == "*" || pattern == value
}
impl fmt::Display for Scope {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
@ -490,4 +551,16 @@ mod tests {
Some("proj1")
);
}
#[test]
fn test_scope_applies_to_with_wildcards() {
assert!(Scope::System.applies_to(&Scope::project("proj-1", "org-1")));
assert!(Scope::org("*").applies_to(&Scope::project("proj-1", "org-1")));
assert!(Scope::org("org-1").applies_to(&Scope::resource("res-1", "proj-1", "org-1")));
assert!(Scope::project("*", "*").applies_to(&Scope::project("proj-1", "org-1")));
assert!(Scope::project("proj-1", "org-1")
.applies_to(&Scope::resource("res-1", "proj-1", "org-1")));
assert!(!Scope::project("proj-2", "org-1").applies_to(&Scope::project("proj-1", "org-1")));
assert!(!Scope::org("org-2").applies_to(&Scope::project("proj-1", "org-1")));
}
}

View file

@ -0,0 +1,93 @@
//! Tenant registry types for IAM.
//!
//! Organizations and projects are the authoritative tenant entities for IAM.
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
/// An organization tracked by IAM.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Organization {
pub id: String,
pub name: String,
pub description: String,
pub metadata: HashMap<String, String>,
pub created_at: u64,
pub updated_at: u64,
pub enabled: bool,
}
impl Organization {
pub fn new(id: impl Into<String>, name: impl Into<String>) -> Self {
Self {
id: id.into(),
name: name.into(),
description: String::new(),
metadata: HashMap::new(),
created_at: 0,
updated_at: 0,
enabled: true,
}
}
pub fn with_description(mut self, description: impl Into<String>) -> Self {
self.description = description.into();
self
}
}
/// A project belonging to an organization.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Project {
pub id: String,
pub org_id: String,
pub name: String,
pub description: String,
pub metadata: HashMap<String, String>,
pub created_at: u64,
pub updated_at: u64,
pub enabled: bool,
}
impl Project {
pub fn new(
id: impl Into<String>,
org_id: impl Into<String>,
name: impl Into<String>,
) -> Self {
Self {
id: id.into(),
org_id: org_id.into(),
name: name.into(),
description: String::new(),
metadata: HashMap::new(),
created_at: 0,
updated_at: 0,
enabled: true,
}
}
pub fn tenant_key(org_id: &str, project_id: &str) -> String {
format!("{org_id}/{project_id}")
}
pub fn key(&self) -> String {
Self::tenant_key(&self.org_id, &self.id)
}
pub fn with_description(mut self, description: impl Into<String>) -> Self {
self.description = description.into();
self
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn project_key_is_composite() {
let project = Project::new("proj-1", "org-1", "Project 1");
assert_eq!(project.key(), "org-1/proj-1");
}
}

View file

@ -255,6 +255,20 @@ service IamAdmin {
rpc DeletePrincipal(DeletePrincipalRequest) returns (DeletePrincipalResponse);
rpc ListPrincipals(ListPrincipalsRequest) returns (ListPrincipalsResponse);
// Organization management
rpc CreateOrganization(CreateOrganizationRequest) returns (Organization);
rpc GetOrganization(GetOrganizationRequest) returns (Organization);
rpc UpdateOrganization(UpdateOrganizationRequest) returns (Organization);
rpc DeleteOrganization(DeleteOrganizationRequest) returns (DeleteOrganizationResponse);
rpc ListOrganizations(ListOrganizationsRequest) returns (ListOrganizationsResponse);
// Project management
rpc CreateProject(CreateProjectRequest) returns (Project);
rpc GetProject(GetProjectRequest) returns (Project);
rpc UpdateProject(UpdateProjectRequest) returns (Project);
rpc DeleteProject(DeleteProjectRequest) returns (DeleteProjectResponse);
rpc ListProjects(ListProjectsRequest) returns (ListProjectsResponse);
// Role management
rpc CreateRole(CreateRoleRequest) returns (Role);
rpc GetRole(GetRoleRequest) returns (Role);
@ -268,6 +282,12 @@ service IamAdmin {
rpc UpdateBinding(UpdateBindingRequest) returns (PolicyBinding);
rpc DeleteBinding(DeleteBindingRequest) returns (DeleteBindingResponse);
rpc ListBindings(ListBindingsRequest) returns (ListBindingsResponse);
// Group membership management
rpc AddGroupMember(AddGroupMemberRequest) returns (AddGroupMemberResponse);
rpc RemoveGroupMember(RemoveGroupMemberRequest) returns (RemoveGroupMemberResponse);
rpc ListGroupMembers(ListGroupMembersRequest) returns (ListGroupMembersResponse);
rpc ListPrincipalGroups(ListPrincipalGroupsRequest) returns (ListPrincipalGroupsResponse);
}
// ----------------------------------------------------------------------------
@ -340,6 +360,95 @@ message ListPrincipalsResponse {
string next_page_token = 2;
}
// ----------------------------------------------------------------------------
// Organization Messages
// ----------------------------------------------------------------------------
message CreateOrganizationRequest {
string id = 1;
string name = 2;
string description = 3;
map<string, string> metadata = 4;
}
message GetOrganizationRequest {
string id = 1;
}
message UpdateOrganizationRequest {
string id = 1;
optional string name = 2;
optional string description = 3;
map<string, string> metadata = 4;
optional bool enabled = 5;
}
message DeleteOrganizationRequest {
string id = 1;
}
message DeleteOrganizationResponse {
bool deleted = 1;
}
message ListOrganizationsRequest {
bool include_disabled = 1;
int32 page_size = 2;
string page_token = 3;
}
message ListOrganizationsResponse {
repeated Organization organizations = 1;
string next_page_token = 2;
}
// ----------------------------------------------------------------------------
// Project Messages
// ----------------------------------------------------------------------------
message CreateProjectRequest {
string id = 1;
string org_id = 2;
string name = 3;
string description = 4;
map<string, string> metadata = 5;
}
message GetProjectRequest {
string org_id = 1;
string id = 2;
}
message UpdateProjectRequest {
string org_id = 1;
string id = 2;
optional string name = 3;
optional string description = 4;
map<string, string> metadata = 5;
optional bool enabled = 6;
}
message DeleteProjectRequest {
string org_id = 1;
string id = 2;
}
message DeleteProjectResponse {
bool deleted = 1;
}
message ListProjectsRequest {
optional string org_id = 1;
bool include_disabled = 2;
int32 page_size = 3;
string page_token = 4;
}
message ListProjectsResponse {
repeated Project projects = 1;
string next_page_token = 2;
}
// ----------------------------------------------------------------------------
// Role Messages
// ----------------------------------------------------------------------------
@ -466,6 +575,50 @@ message ListBindingsResponse {
string next_page_token = 2;
}
// ----------------------------------------------------------------------------
// Group Membership Messages
// ----------------------------------------------------------------------------
message AddGroupMemberRequest {
string group_id = 1;
PrincipalRef principal = 2;
}
message AddGroupMemberResponse {
bool added = 1;
}
message RemoveGroupMemberRequest {
string group_id = 1;
PrincipalRef principal = 2;
}
message RemoveGroupMemberResponse {
bool removed = 1;
}
message ListGroupMembersRequest {
string group_id = 1;
int32 page_size = 2;
string page_token = 3;
}
message ListGroupMembersResponse {
repeated Principal members = 1;
string next_page_token = 2;
}
message ListPrincipalGroupsRequest {
PrincipalRef principal = 1;
int32 page_size = 2;
string page_token = 3;
}
message ListPrincipalGroupsResponse {
repeated Principal groups = 1;
string next_page_token = 2;
}
// ============================================================================
// Common Types
// ============================================================================
@ -497,6 +650,27 @@ message Principal {
bool enabled = 12;
}
message Organization {
string id = 1;
string name = 2;
string description = 3;
map<string, string> metadata = 4;
uint64 created_at = 5;
uint64 updated_at = 6;
bool enabled = 7;
}
message Project {
string id = 1;
string org_id = 2;
string name = 3;
string description = 4;
map<string, string> metadata = 5;
uint64 created_at = 6;
uint64 updated_at = 7;
bool enabled = 8;
}
message ResourceRef {
// Resource kind (e.g., "instance", "volume")
string kind = 1;

View file

@ -5,12 +5,14 @@ use std::sync::Arc;
use anyhow::Result;
use iam_client::client::IamClientConfig;
use iam_client::IamClient;
use iam_types::{PolicyBinding, Principal, PrincipalRef, Scope};
pub use iam_service_auth::AuthService;
use iam_types::{PolicyBinding, Principal, PrincipalRef, Scope};
use tonic::metadata::MetadataValue;
use tonic::{Request, Status};
pub use iam_service_auth::{get_tenant_context, resolve_tenant_ids_from_context, resource_for_tenant};
pub use iam_service_auth::{
get_tenant_context, resolve_tenant_ids_from_context, resource_for_tenant,
};
/// gRPC interceptor that authenticates requests and injects tenant context.
pub async fn auth_interceptor(
@ -45,16 +47,23 @@ pub async fn issue_controller_token(
let principal_ref = PrincipalRef::service_account(principal_id);
let principal = match client.get_principal(&principal_ref).await? {
Some(existing) => existing,
None => client
.create_service_account(principal_id, principal_id, project_id)
.await?,
None => {
client
.create_service_account(principal_id, principal_id, org_id, project_id)
.await?
}
};
ensure_project_admin_binding(&client, &principal, org_id, project_id).await?;
let scope = Scope::project(project_id, org_id);
client
.issue_token(&principal, vec!["roles/ProjectAdmin".to_string()], scope, 3600)
.issue_token(
&principal,
vec!["roles/ProjectAdmin".to_string()],
scope,
3600,
)
.await
.map_err(Into::into)
}
@ -70,9 +79,9 @@ async fn ensure_project_admin_binding(
.list_bindings_for_principal(&principal.to_ref())
.await?;
let already_bound = bindings.iter().any(|binding| {
binding.role_ref == "roles/ProjectAdmin" && binding.scope == scope
});
let already_bound = bindings
.iter()
.any(|binding| binding.role_ref == "roles/ProjectAdmin" && binding.scope == scope);
if already_bound {
return Ok(());
}

View file

@ -1,8 +1,9 @@
//! AWS Signature Version 4 authentication for S3 API
//!
//!
//! Implements simplified SigV4 authentication compatible with AWS S3 SDKs and aws-cli.
//! Integrates with IAM for access key validation.
use crate::tenant::TenantContext;
use axum::{
body::{Body, Bytes},
extract::Request,
@ -10,17 +11,16 @@ use axum::{
middleware::Next,
response::{IntoResponse, Response},
};
use crate::tenant::TenantContext;
use hmac::{Hmac, Mac};
use iam_api::proto::{iam_credential_client::IamCredentialClient, GetSecretKeyRequest};
use sha2::{Digest, Sha256};
use std::collections::HashMap;
use std::sync::Arc;
use std::time::{Duration as StdDuration, Instant};
use tokio::sync::{Mutex, RwLock};
use tonic::transport::Channel;
use tonic::{transport::Channel, Request as TonicRequest};
use tracing::{debug, warn};
use url::form_urlencoded;
use std::time::{Duration as StdDuration, Instant};
type HmacSha256 = Hmac<Sha256>;
const DEFAULT_MAX_AUTH_BODY_BYTES: usize = 1024 * 1024 * 1024;
@ -124,13 +124,17 @@ impl IamClient {
if let Ok(creds_str) = std::env::var("S3_CREDENTIALS") {
for pair in creds_str.split(',') {
if let Some((access_key, secret_key)) = pair.split_once(':') {
credentials.insert(access_key.trim().to_string(), secret_key.trim().to_string());
credentials
.insert(access_key.trim().to_string(), secret_key.trim().to_string());
} else {
warn!("Invalid S3_CREDENTIALS format for pair: {}", pair);
}
}
if !credentials.is_empty() {
debug!("Loaded {} S3 credential(s) from S3_CREDENTIALS", credentials.len());
debug!(
"Loaded {} S3 credential(s) from S3_CREDENTIALS",
credentials.len()
);
}
}
@ -264,12 +268,15 @@ impl IamClient {
for attempt in 0..2 {
let grpc_channel = Self::grpc_channel(endpoint, channel).await?;
let mut client = IamCredentialClient::new(grpc_channel);
match client
.get_secret_key(GetSecretKeyRequest {
access_key_id: access_key_id.to_string(),
})
.await
{
let mut request = TonicRequest::new(GetSecretKeyRequest {
access_key_id: access_key_id.to_string(),
});
if let Some(token) = iam_admin_token() {
if let Ok(value) = token.parse() {
request.metadata_mut().insert("x-iam-admin-token", value);
}
}
match client.get_secret_key(request).await {
Ok(response) => return Ok(response),
Err(status)
if attempt == 0
@ -300,6 +307,14 @@ fn normalize_iam_endpoint(endpoint: &str) -> String {
}
}
fn iam_admin_token() -> Option<String> {
std::env::var("IAM_ADMIN_TOKEN")
.or_else(|_| std::env::var("PHOTON_IAM_ADMIN_TOKEN"))
.ok()
.map(|value| value.trim().to_string())
.filter(|value| !value.is_empty())
}
impl AuthState {
/// Create new auth state with IAM integration
pub fn new(iam_endpoint: Option<String>) -> Self {
@ -311,8 +326,7 @@ impl AuthState {
.unwrap_or_else(|_| "true".to_string())
.parse()
.unwrap_or(true),
aws_region: std::env::var("AWS_REGION")
.unwrap_or_else(|_| "us-east-1".to_string()), // Default S3 region
aws_region: std::env::var("AWS_REGION").unwrap_or_else(|_| "us-east-1".to_string()), // Default S3 region
aws_service: "s3".to_string(),
}
}
@ -431,7 +445,13 @@ pub async fn sigv4_auth_middleware(
let (parts, body) = request.into_parts();
let body_bytes = match axum::body::to_bytes(body, max_body_bytes).await {
Ok(b) => b,
Err(e) => return error_response(StatusCode::INTERNAL_SERVER_ERROR, "InternalError", &e.to_string()),
Err(e) => {
return error_response(
StatusCode::INTERNAL_SERVER_ERROR,
"InternalError",
&e.to_string(),
)
}
};
request = Request::from_parts(parts, Body::from(body_bytes.clone()));
@ -448,16 +468,13 @@ pub async fn sigv4_auth_middleware(
Bytes::new()
};
let (canonical_request, hashed_payload) = match build_canonical_request(
&method,
&uri,
&headers,
&body_bytes,
&signed_headers_str,
) {
Ok(val) => val,
Err(e) => return error_response(StatusCode::INTERNAL_SERVER_ERROR, "SignatureError", &e),
};
let (canonical_request, hashed_payload) =
match build_canonical_request(&method, &uri, &headers, &body_bytes, &signed_headers_str) {
Ok(val) => val,
Err(e) => {
return error_response(StatusCode::INTERNAL_SERVER_ERROR, "SignatureError", &e)
}
};
debug!(
method = %method,
uri = %uri,
@ -468,11 +485,7 @@ pub async fn sigv4_auth_middleware(
"SigV4 Canonical Request generated"
);
let string_to_sign = build_string_to_sign(
amz_date,
&credential_scope,
&canonical_request,
);
let string_to_sign = build_string_to_sign(amz_date, &credential_scope, &canonical_request);
debug!(
amz_date = %amz_date,
credential_scope = %credential_scope,
@ -559,7 +572,12 @@ fn parse_auth_header(auth_header: &str) -> Result<(String, String, String, Strin
.get("Signature")
.ok_or("Signature not found in Authorization header")?;
Ok((access_key_id.to_string(), full_credential_scope, signed_headers.to_string(), signature.to_string()))
Ok((
access_key_id.to_string(),
full_credential_scope,
signed_headers.to_string(),
signature.to_string(),
))
}
/// Compute the full AWS Signature Version 4.
@ -576,19 +594,10 @@ fn compute_sigv4_signature(
aws_region: &str,
aws_service: &str,
) -> Result<String, String> {
let (canonical_request, _hashed_payload) = build_canonical_request(
method,
uri,
headers,
body_bytes,
signed_headers_str,
)?;
let (canonical_request, _hashed_payload) =
build_canonical_request(method, uri, headers, body_bytes, signed_headers_str)?;
let string_to_sign = build_string_to_sign(
amz_date,
credential_scope,
&canonical_request,
);
let string_to_sign = build_string_to_sign(amz_date, credential_scope, &canonical_request);
let signing_key = get_signing_key(secret_key, amz_date, aws_region, aws_service)?;
@ -612,9 +621,10 @@ fn build_canonical_request(
// Canonical Query String
let canonical_query_string = if uri_parts.len() > 1 {
let mut query_params: Vec<(String, String)> = form_urlencoded::parse(uri_parts[1].as_bytes())
.into_owned()
.collect();
let mut query_params: Vec<(String, String)> =
form_urlencoded::parse(uri_parts[1].as_bytes())
.into_owned()
.collect();
query_params.sort_by(|(k1, _), (k2, _)| k1.cmp(k2));
query_params
.into_iter()
@ -638,10 +648,17 @@ fn build_canonical_request(
let value_str = header_value
.to_str()
.map_err(|_| format!("Invalid header value for {}", header_name))?;
canonical_headers.push_str(&format!("{}:{}
", header_name, value_str.trim()));
canonical_headers.push_str(&format!(
"{}:{}
",
header_name,
value_str.trim()
));
} else {
return Err(format!("Signed header '{}' not found in request", header_name));
return Err(format!(
"Signed header '{}' not found in request",
header_name
));
}
}
@ -756,8 +773,7 @@ fn error_response(status: StatusCode, code: &str, message: &str) -> Response {
<Code>{}</Code>
<Message>{}</Message>
</Error>"###,
code,
message
code, message
);
Response::builder()
@ -780,11 +796,14 @@ mod tests {
};
use std::collections::HashMap;
use std::net::SocketAddr;
use std::sync::{atomic::{AtomicUsize, Ordering}, Mutex};
use std::sync::{
atomic::{AtomicUsize, Ordering},
Mutex,
};
use tokio::net::TcpListener;
use tokio::time::{sleep, Duration};
use tonic::{Request as TonicRequest, Response as TonicResponse, Status};
use tonic::transport::Server;
use tonic::{Request as TonicRequest, Response as TonicResponse, Status};
static ENV_LOCK: Mutex<()> = Mutex::new(());
@ -835,7 +854,9 @@ mod tests {
&self,
_request: TonicRequest<RevokeCredentialRequest>,
) -> Result<TonicResponse<RevokeCredentialResponse>, Status> {
Ok(TonicResponse::new(RevokeCredentialResponse { success: true }))
Ok(TonicResponse::new(RevokeCredentialResponse {
success: true,
}))
}
}
@ -880,8 +901,9 @@ mod tests {
let key = b"key";
let data = "data";
// Verified with: echo -n "data" | openssl dgst -sha256 -mac hmac -macopt key:"key"
let expected = hex::decode("5031fe3d989c6d1537a013fa6e739da23463fdaec3b70137d828e36ace221bd0")
.unwrap();
let expected =
hex::decode("5031fe3d989c6d1537a013fa6e739da23463fdaec3b70137d828e36ace221bd0")
.unwrap();
assert_eq!(hmac_sha256(key, data).unwrap(), expected);
}
@ -911,11 +933,14 @@ mod tests {
assert_eq!(url_encode_path("/foo bar/baz"), "/foo%20bar/baz");
assert_eq!(url_encode_path("/"), "/");
assert_eq!(url_encode_path(""), "/"); // Empty path should be normalized to /
// Test special characters that should be encoded
// Test special characters that should be encoded
assert_eq!(url_encode_path("/my+bucket"), "/my%2Bbucket");
assert_eq!(url_encode_path("/my=bucket"), "/my%3Dbucket");
// Test unreserved characters that should NOT be encoded
assert_eq!(url_encode_path("/my-bucket_test.file~123"), "/my-bucket_test.file~123");
assert_eq!(
url_encode_path("/my-bucket_test.file~123"),
"/my-bucket_test.file~123"
);
}
#[tokio::test]
@ -930,8 +955,7 @@ mod tests {
let signed_headers = "content-type;host;x-amz-date";
let (canonical_request, hashed_payload) =
build_canonical_request(method, uri, &headers, &body, signed_headers)
.unwrap();
build_canonical_request(method, uri, &headers, &body, signed_headers).unwrap();
// Body hash verified with: echo -n "some_body" | sha256sum
let expected_body_hash = "fed42376ceefa4bb65ead687ec9738f6b2329fd78870aaf797bd7194da4228d3";
@ -950,13 +974,15 @@ mod tests {
let mut headers = HeaderMap::new();
headers.insert("host", HeaderValue::from_static("example.com"));
headers.insert("x-amz-date", HeaderValue::from_static("20231201T000000Z"));
headers.insert("x-amz-content-sha256", HeaderValue::from_static("signed-payload-hash"));
headers.insert(
"x-amz-content-sha256",
HeaderValue::from_static("signed-payload-hash"),
);
let body = Bytes::from("different-body");
let signed_headers = "host;x-amz-content-sha256;x-amz-date";
let (canonical_request, hashed_payload) =
build_canonical_request(method, uri, &headers, &body, signed_headers)
.unwrap();
build_canonical_request(method, uri, &headers, &body, signed_headers).unwrap();
assert!(canonical_request.ends_with("\nsigned-payload-hash"));
assert_eq!(hashed_payload, "signed-payload-hash");
@ -980,9 +1006,7 @@ mod tests {
let expected_string_to_sign = format!(
"AWS4-HMAC-SHA256\n{}\n{}\n{}",
amz_date,
credential_scope,
hashed_canonical_request
amz_date, credential_scope, hashed_canonical_request
);
assert_eq!(string_to_sign, expected_string_to_sign);
}
@ -1015,7 +1039,10 @@ mod tests {
let credentials = client.env_credentials().unwrap();
assert_eq!(credentials.len(), 1);
assert_eq!(credentials.get("test_key"), Some(&"test_secret".to_string()));
assert_eq!(
credentials.get("test_key"),
Some(&"test_secret".to_string())
);
std::env::remove_var("S3_ACCESS_KEY_ID");
std::env::remove_var("S3_SECRET_KEY");
@ -1071,7 +1098,10 @@ mod tests {
let signed_headers = "host;x-amz-date";
let mut headers = HeaderMap::new();
headers.insert("host", HeaderValue::from_static("examplebucket.s3.amazonaws.com"));
headers.insert(
"host",
HeaderValue::from_static("examplebucket.s3.amazonaws.com"),
);
headers.insert("x-amz-date", HeaderValue::from_static("20150830T123600Z"));
let body = Bytes::new(); // Empty body for GET
@ -1191,7 +1221,10 @@ mod tests {
.unwrap();
// Signatures MUST be different
assert_ne!(sig1, sig2, "Signatures should differ with different secret keys");
assert_ne!(
sig1, sig2,
"Signatures should differ with different secret keys"
);
}
#[test]
@ -1341,7 +1374,10 @@ mod tests {
.unwrap();
// Signatures MUST be different
assert_ne!(sig1, sig2, "Signatures should differ with different header values");
assert_ne!(
sig1, sig2,
"Signatures should differ with different header values"
);
}
#[test]
@ -1389,7 +1425,10 @@ mod tests {
.unwrap();
// Signatures MUST be different
assert_ne!(sig1, sig2, "Signatures should differ with different query parameters");
assert_ne!(
sig1, sig2,
"Signatures should differ with different query parameters"
);
}
#[test]
@ -1404,7 +1443,10 @@ mod tests {
let credentials = client.env_credentials().unwrap();
// Known key should be found in credentials map
assert_eq!(credentials.get("known_key"), Some(&"known_secret".to_string()));
assert_eq!(
credentials.get("known_key"),
Some(&"known_secret".to_string())
);
// Unknown key should not be found
assert_eq!(credentials.get("unknown_key"), None);

View file

@ -902,6 +902,11 @@ impl QueryableStorage {
#[cfg(test)]
mod tests {
use super::*;
use axum::{
body::{to_bytes, Body},
http::{Method, Request, StatusCode},
};
use tower::ServiceExt;
#[tokio::test]
async fn test_query_service_creation() {
@ -996,6 +1001,43 @@ mod tests {
assert!(values.contains(&"test_job".to_string()));
}
#[tokio::test]
async fn test_label_values_route() {
let service = QueryService::new();
{
let mut storage = service.storage.write().await;
storage.upsert_series(TimeSeries {
id: SeriesId(1),
labels: vec![
Label::new("__name__", "test_metric"),
Label::new("job", "test_job"),
],
samples: vec![],
});
}
let app: axum::Router = service.router();
let request = Request::builder()
.method(Method::GET)
.uri("/api/v1/label/job/values")
.body(Body::empty())
.unwrap();
let response = app
.oneshot(request)
.await
.unwrap();
assert_eq!(response.status(), StatusCode::OK);
let body = to_bytes(response.into_body(), 1024 * 1024).await.unwrap();
let payload: serde_json::Value = serde_json::from_slice(&body).unwrap();
assert_eq!(payload["status"], "success");
assert!(payload["data"]
.as_array()
.unwrap()
.iter()
.any(|value| value == "test_job"));
}
#[test]
fn test_persistence_save_load_empty() {
use tempfile::tempdir;

View file

@ -202,6 +202,7 @@ pub struct StorageStats {
#[cfg(test)]
mod tests {
use super::*;
use nightlight_types::Label;
#[test]
fn test_storage_creation() {

View file

@ -521,7 +521,7 @@ let
};
mode = mkOption {
type = types.enum [ "load_balancer" "direct" ];
type = types.enum [ "load_balancer" "direct" "direct_multi" ];
default = "load_balancer";
description = "Whether DNS publishes the load balancer VIP or a direct instance address";
};

View file

@ -62,7 +62,7 @@
PhotonCloud local CI gates (provider-agnostic)
Usage:
photoncloud-gate [--tier 0|1|2] [--workspace <name>] [--shared-crates] [--no-logs] [--fix]
photoncloud-gate [--tier 0|1|2] [--workspace <name>] [--shared-crates] [--shared-crate <name>] [--no-logs] [--fix]
Tiers:
0: fmt + clippy + unit tests (lib) (fast, stable default)
@ -79,6 +79,7 @@
tier="0"
only_ws=""
shared_crates="0"
only_shared_crate=""
no_logs="0"
fix="0"
@ -90,6 +91,8 @@
only_ws="$2"; shift 2;;
--shared-crates)
shared_crates="1"; shift 1;;
--shared-crate)
only_shared_crate="$2"; shift 2;;
--no-logs)
no_logs="1"; shift 1;;
--fix)
@ -159,6 +162,8 @@
export LIBCLANG_PATH="${pkgs.llvmPackages.libclang.lib}/lib";
export PROTOC="${pkgs.protobuf}/bin/protoc"
export ROCKSDB_LIB_DIR="${pkgs.rocksdb}/lib"
export CARGO_TARGET_DIR="$repo_root/work/cargo-target"
mkdir -p "$CARGO_TARGET_DIR"
manifest_has_target_kind() {
local manifest="$1"; shift
@ -211,8 +216,46 @@
fi
}
run_shared_crate_clippy() {
local crate="$1"; shift
local manifest="$1"; shift
local ran_clippy="0"
if manifest_has_target_kind "$manifest" "lib"; then
run_shared_crate_cmd "$crate" "$manifest" "clippy (lib)" "$CARGO_CLIPPY clippy --manifest-path \"$manifest\" --lib -- -D warnings"
ran_clippy="1"
fi
if manifest_has_target_kind "$manifest" "bin"; then
run_shared_crate_cmd "$crate" "$manifest" "clippy (bin)" "$CARGO_CLIPPY clippy --manifest-path \"$manifest\" --bins -- -D warnings"
ran_clippy="1"
fi
if [[ "$ran_clippy" == "0" ]]; then
echo "[gate][shared:$crate] WARN: no lib/bin targets for clippy"
fi
}
run_workspace_clippy() {
local ws="$1"; shift
local manifest="$1"; shift
local ran_clippy="0"
if manifest_has_target_kind "$manifest" "lib"; then
run_cmd "$ws" "clippy (lib)" "$CARGO_CLIPPY clippy --workspace --lib -- -D warnings"
ran_clippy="1"
fi
if manifest_has_target_kind "$manifest" "bin"; then
run_cmd "$ws" "clippy (bin)" "$CARGO_CLIPPY clippy --workspace --bins -- -D warnings"
ran_clippy="1"
fi
if [[ "$ran_clippy" == "0" ]]; then
echo "[gate][$ws] WARN: no lib/bin targets for clippy"
fi
}
run_shared_crates() {
local only_crate="$1"
local manifests=()
local selected_count=0
while IFS= read -r manifest; do
manifests+=("$manifest")
done < <(find "$repo_root/crates" -mindepth 2 -maxdepth 2 -name Cargo.toml | sort)
@ -226,8 +269,12 @@
local crate
local ran_unit_tests
crate="$(basename "$(dirname "$manifest")")"
if [[ -n "$only_crate" && "$crate" != "$only_crate" ]]; then
continue
fi
selected_count=$((selected_count + 1))
run_shared_crate_cmd "$crate" "$manifest" "fmt" "$CARGO_FMT fmt --manifest-path \"$manifest\" $fmt_rustfmt_args"
run_shared_crate_cmd "$crate" "$manifest" "clippy" "$CARGO_CLIPPY clippy --manifest-path \"$manifest\" --all-targets -- -D warnings"
run_shared_crate_clippy "$crate" "$manifest"
ran_unit_tests="0"
if manifest_has_target_kind "$manifest" "lib"; then
run_shared_crate_cmd "$crate" "$manifest" "test (tier0 unit lib)" "$CARGO test --manifest-path \"$manifest\" --lib"
@ -249,10 +296,22 @@
run_shared_crate_cmd "$crate" "$manifest" "test (tier2 ignored)" "$CARGO test --manifest-path \"$manifest\" --tests -- --ignored"
fi
done
if [[ -n "$only_crate" && "$selected_count" -eq 0 ]]; then
echo "[gate] ERROR: shared crate not found: $only_crate" >&2
exit 2
fi
}
if [[ "$shared_crates" == "1" ]]; then
run_shared_crates
run_shared_crates "$only_shared_crate"
echo ""
echo "[gate] OK (tier=$tier, shared-crates)"
exit 0
fi
if [[ -n "$only_shared_crate" ]]; then
run_shared_crates "$only_shared_crate"
echo ""
echo "[gate] OK (tier=$tier, shared-crates)"
exit 0
@ -274,8 +333,9 @@
#
# NOTE: Avoid `--all` here; with path-dependencies it may traverse outside the workspace directory.
run_cmd "$ws" "fmt" "$CARGO_FMT fmt $fmt_rustfmt_args"
# Lint gate: call Nix-provided `cargo-clippy` directly (avoid resolving ~/.cargo/bin/cargo-clippy).
run_cmd "$ws" "clippy" "$CARGO_CLIPPY clippy --workspace --all-targets -- -D warnings"
# Tier0 clippy stays on lib/bin targets to avoid dragging in
# heavy integration-only dev-dependencies for every workspace.
run_workspace_clippy "$ws" "$workspace_manifest"
ran_unit_tests="0"
if manifest_has_target_kind "$workspace_manifest" "lib"; then
run_cmd "$ws" "test (tier0 unit lib)" "$CARGO test --workspace --lib"

View file

@ -8,7 +8,6 @@
".github/workflows/nix.yml",
"Cargo.toml",
"Cargo.lock",
"crates/**",
"baremetal/**",
"scripts/**",
"specifications/**",

View file

@ -250,7 +250,7 @@ in
iamPort = lib.mkOption {
type = lib.types.port;
default = 8080;
default = config.services.iam.httpPort;
description = "IAM API port";
};
};
@ -374,7 +374,12 @@ in
# Check if admin user exists
log "INFO" "Checking for existing admin user"
HTTP_CODE=$(${pkgs.curl}/bin/curl -s -o /dev/null -w "%{http_code}" "http://localhost:${toString cfg.iamPort}/api/users/admin" 2>/dev/null || echo "000")
ADMIN_HEADER=()
${lib.optionalString (config.services.iam.adminToken != null) ''
ADMIN_HEADER=(-H "x-iam-admin-token: ${config.services.iam.adminToken}")
''}
HTTP_CODE=$(${pkgs.curl}/bin/curl -s -o /dev/null -w "%{http_code}" "''${ADMIN_HEADER[@]}" "http://localhost:${toString cfg.iamPort}/api/v1/users/admin" 2>/dev/null || echo "000")
if [ "$HTTP_CODE" = "200" ]; then
log "INFO" "Admin user already exists"
@ -383,8 +388,22 @@ in
exit 0
fi
# TODO: Create admin user (requires IAM API implementation)
log "WARN" "Admin user creation not yet implemented (waiting for IAM API)"
log "INFO" "Creating bootstrap admin user"
RESPONSE_FILE=$(mktemp)
HTTP_CODE=$(${pkgs.curl}/bin/curl -s -w "%{http_code}" -o "$RESPONSE_FILE" \
-X POST "http://localhost:${toString cfg.iamPort}/api/v1/users" \
"''${ADMIN_HEADER[@]}" \
-H "Content-Type: application/json" \
-d '{"id":"admin","name":"Bootstrap Admin"}' 2>/dev/null || echo "000")
RESPONSE_BODY=$(cat "$RESPONSE_FILE" 2>/dev/null || echo "")
rm -f "$RESPONSE_FILE"
if [ "$HTTP_CODE" != "201" ] && [ "$HTTP_CODE" != "200" ] && [ "$HTTP_CODE" != "409" ]; then
log "ERROR" "Failed to create admin user: HTTP $HTTP_CODE, response: $RESPONSE_BODY"
exit 1
fi
# Mark as initialized for now
mkdir -p /var/lib/first-boot-automation

View file

@ -81,6 +81,12 @@ in
description = "Data directory for iam";
};
adminToken = lib.mkOption {
type = lib.types.nullOr lib.types.str;
default = null;
description = "Admin token injected as IAM_ADMIN_TOKEN for privileged IAM APIs.";
};
settings = lib.mkOption {
type = lib.types.attrs;
default = {};
@ -127,6 +133,9 @@ in
(lib.mkIf (cfg.storeBackend == "memory") {
IAM_ALLOW_MEMORY_BACKEND = "1";
})
(lib.mkIf (cfg.adminToken != null) {
IAM_ADMIN_TOKEN = cfg.adminToken;
})
];
serviceConfig = {

View file

@ -398,6 +398,14 @@ in
timeoutSecs = 3;
};
};
publish = {
dns = {
zone = "native.cluster.test";
name = "daemon";
ttl = 30;
mode = "direct_multi";
};
};
};
};
};

View file

@ -164,6 +164,7 @@
systemd.services.iam.environment = {
IAM_ALLOW_RANDOM_SIGNING_KEY = "1";
IAM_ALLOW_UNAUTHENTICATED_ADMIN = "true";
};
systemd.services.lightningstor.environment = {

View file

@ -68,5 +68,6 @@
systemd.services.iam.environment = {
IAM_ALLOW_RANDOM_SIGNING_KEY = "1";
IAM_ALLOW_UNAUTHENTICATED_ADMIN = "true";
};
}

View file

@ -68,5 +68,6 @@
systemd.services.iam.environment = {
IAM_ALLOW_RANDOM_SIGNING_KEY = "1";
IAM_ALLOW_UNAUTHENTICATED_ADMIN = "true";
};
}

View file

@ -45,8 +45,15 @@ STORAGE_BENCHMARK_COMMAND="${PHOTON_VM_STORAGE_BENCH_COMMAND:-bench-storage}"
LIGHTNINGSTOR_BENCH_CLIENT_NODE="${PHOTON_VM_LIGHTNINGSTOR_BENCH_CLIENT_NODE:-node06}"
STORAGE_SKIP_PLASMAVMC_IMAGE_BENCH="${PHOTON_VM_SKIP_PLASMAVMC_IMAGE_BENCH:-0}"
STORAGE_SKIP_PLASMAVMC_GUEST_RUNTIME_BENCH="${PHOTON_VM_SKIP_PLASMAVMC_GUEST_RUNTIME_BENCH:-0}"
CLUSTER_NIX_MAX_JOBS="${PHOTON_CLUSTER_NIX_MAX_JOBS:-2}"
CLUSTER_NIX_BUILD_CORES="${PHOTON_CLUSTER_NIX_BUILD_CORES:-4}"
HOST_CPU_COUNT="$(getconf _NPROCESSORS_ONLN 2>/dev/null || nproc 2>/dev/null || echo 4)"
DEFAULT_CLUSTER_NIX_MAX_JOBS=2
DEFAULT_CLUSTER_NIX_BUILD_CORES=4
if [[ "${HOST_CPU_COUNT}" =~ ^[0-9]+$ ]] && (( HOST_CPU_COUNT >= 12 )); then
DEFAULT_CLUSTER_NIX_MAX_JOBS=3
DEFAULT_CLUSTER_NIX_BUILD_CORES=6
fi
CLUSTER_NIX_MAX_JOBS="${PHOTON_CLUSTER_NIX_MAX_JOBS:-${DEFAULT_CLUSTER_NIX_MAX_JOBS}}"
CLUSTER_NIX_BUILD_CORES="${PHOTON_CLUSTER_NIX_BUILD_CORES:-${DEFAULT_CLUSTER_NIX_BUILD_CORES}}"
BUILD_PROFILE="${PHOTON_CLUSTER_BUILD_PROFILE:-default}"
CLUSTER_SKIP_BUILD="${PHOTON_CLUSTER_SKIP_BUILD:-0}"
CLUSTER_LOCK_HELD=0
@ -4894,9 +4901,10 @@ validate_native_runtime_flow() {
}
native_publication_state() {
local service="$1"
native_dump_values "photoncloud/clusters/test-cluster/publications/" \
| sed '/^$/d' \
| jq -sr 'map(select(.service == "native-web")) | first'
| jq -sr --arg service "${service}" 'map(select(.service == $service)) | first'
}
wait_for_native_dns_record() {
@ -4916,6 +4924,33 @@ validate_native_runtime_flow() {
done
}
wait_for_native_dns_records() {
local fqdn="$1"
local timeout="$2"
shift 2
local expected_json actual_json
local deadline=$((SECONDS + timeout))
expected_json="$(printf '%s\n' "$@" | sed '/^$/d' | sort -u | jq -R . | jq -cs 'sort')"
while true; do
actual_json="$(
ssh_node node01 "dig @127.0.0.1 -p 5353 +short ${fqdn} A" 2>/dev/null \
| sed '/^$/d' \
| sort -u \
| jq -R . \
| jq -cs 'sort'
)" || actual_json="[]"
if [[ "${actual_json}" == "${expected_json}" ]]; then
return 0
fi
if (( SECONDS >= deadline )); then
die "timed out waiting for native DNS record set for ${fqdn}: expected ${expected_json}, got ${actual_json}"
fi
sleep 2
done
}
wait_for_native_lb_backends() {
local pool_id="$1"
local expected_count="$2"
@ -5023,8 +5058,8 @@ validate_native_runtime_flow() {
wait_for_native_dump_count \
"photoncloud/clusters/test-cluster/publications/" \
'map(select(.service == "native-web")) | length' \
"1" \
'map(select(.service == "native-web" or .service == "native-daemon")) | length' \
"2" \
180
iam_tunnel="$(start_ssh_tunnel node01 15080 50080)"
@ -5041,15 +5076,20 @@ validate_native_runtime_flow() {
| jq -e --arg name "${lb_name}" '.loadbalancers | any(.name == $name)' >/dev/null
local publication_value publication_fqdn publication_ip publication_pool_id
publication_value="$(native_publication_state)"
local daemon_publication_value daemon_publication_fqdn
publication_value="$(native_publication_state "native-web")"
publication_fqdn="$(printf '%s' "${publication_value}" | jq -r '.dns.fqdn')"
publication_ip="$(printf '%s' "${publication_value}" | jq -r '.dns.value')"
publication_pool_id="$(printf '%s' "${publication_value}" | jq -r '.load_balancer.pool_id')"
[[ -n "${publication_fqdn}" && "${publication_fqdn}" != "null" ]] || die "native publication missing fqdn"
[[ -n "${publication_ip}" && "${publication_ip}" != "null" ]] || die "native publication missing dns value"
[[ -n "${publication_pool_id}" && "${publication_pool_id}" != "null" ]] || die "native publication missing pool id"
daemon_publication_value="$(native_publication_state "native-daemon")"
daemon_publication_fqdn="$(printf '%s' "${daemon_publication_value}" | jq -r '.dns.fqdn')"
[[ -n "${publication_fqdn}" && "${publication_fqdn}" != "null" ]] || die "native-web publication missing fqdn"
[[ -n "${publication_ip}" && "${publication_ip}" != "null" ]] || die "native-web publication missing dns value"
[[ -n "${publication_pool_id}" && "${publication_pool_id}" != "null" ]] || die "native-web publication missing pool id"
[[ -n "${daemon_publication_fqdn}" && "${daemon_publication_fqdn}" != "null" ]] || die "native-daemon publication missing fqdn"
wait_for_native_dns_record "${publication_fqdn}" "${publication_ip}" 180
wait_for_native_dns_records "${daemon_publication_fqdn}" 180 10.100.0.21 10.100.0.22
wait_for_native_lb_backends "${publication_pool_id}" "2" 180 10.100.0.21 10.100.0.22
log "Draining node04 through deployer lifecycle state"
@ -5101,11 +5141,13 @@ validate_native_runtime_flow() {
wait_for_http node05 "http://10.100.0.22:18192/" 240
wait_for_http node05 "http://10.100.0.22:18193/" 240
wait_for_http node01 "http://127.0.0.1:18191/" 240
publication_value="$(native_publication_state)"
publication_value="$(native_publication_state "native-web")"
publication_pool_id="$(printf '%s' "${publication_value}" | jq -r '.load_balancer.pool_id')"
publication_ip="$(printf '%s' "${publication_value}" | jq -r '.dns.value')"
daemon_publication_value="$(native_publication_state "native-daemon")"
wait_for_native_lb_backends "${publication_pool_id}" "1" 180 10.100.0.22
wait_for_native_dns_record "${publication_fqdn}" "${publication_ip}" 180
wait_for_native_dns_records "${daemon_publication_fqdn}" 180 10.100.0.22
log "Restoring node04 and ensuring capacity returns without moving healthy singleton work"
set_native_node_state "node04" "active"
@ -5147,11 +5189,13 @@ validate_native_runtime_flow() {
restored_container_value="$(wait_for_native_instance_node "native-container" "node05" 240)"
restored_container_node="$(printf '%s' "${restored_container_value}" | jq -r '.node_id')"
[[ "${restored_container_node}" == "node05" ]] || die "native-container unexpectedly moved after node04 returned to service"
publication_value="$(native_publication_state)"
publication_value="$(native_publication_state "native-web")"
publication_pool_id="$(printf '%s' "${publication_value}" | jq -r '.load_balancer.pool_id')"
publication_ip="$(printf '%s' "${publication_value}" | jq -r '.dns.value')"
daemon_publication_value="$(native_publication_state "native-daemon")"
wait_for_native_lb_backends "${publication_pool_id}" "2" 180 10.100.0.21 10.100.0.22
wait_for_native_dns_record "${publication_fqdn}" "${publication_ip}" 180
wait_for_native_dns_records "${daemon_publication_fqdn}" 180 10.100.0.21 10.100.0.22
wait_for_http node01 "http://127.0.0.1:18191/" 240
log "Simulating native worker loss and scheduler failover"
@ -5182,11 +5226,13 @@ validate_native_runtime_flow() {
failover_container_node="$(printf '%s' "${failover_container_value}" | jq -r '.node_id')"
[[ "${failover_container_node}" == "node04" ]] || die "native-container did not fail over to node04 after node05 stopped"
wait_for_native_instance_node "native-daemon" "node04" 240 >/dev/null
publication_value="$(native_publication_state)"
publication_value="$(native_publication_state "native-web")"
publication_pool_id="$(printf '%s' "${publication_value}" | jq -r '.load_balancer.pool_id')"
publication_ip="$(printf '%s' "${publication_value}" | jq -r '.dns.value')"
daemon_publication_value="$(native_publication_state "native-daemon")"
wait_for_native_lb_backends "${publication_pool_id}" "1" 240 10.100.0.21
wait_for_native_dns_record "${publication_fqdn}" "${publication_ip}" 180
wait_for_native_dns_records "${daemon_publication_fqdn}" 180 10.100.0.21
wait_for_http node04 "http://10.100.0.21:18190/" 240
wait_for_http node04 "http://10.100.0.21:18192/" 240
wait_for_http node04 "http://10.100.0.21:18193/" 240
@ -5236,11 +5282,13 @@ validate_native_runtime_flow() {
recovered_container_value="$(wait_for_native_instance_node "native-container" "node04" 240)"
recovered_container_node="$(printf '%s' "${recovered_container_value}" | jq -r '.node_id')"
[[ "${recovered_container_node}" == "node04" ]] || die "native-container unexpectedly churned after node05 recovered"
publication_value="$(native_publication_state)"
publication_value="$(native_publication_state "native-web")"
publication_pool_id="$(printf '%s' "${publication_value}" | jq -r '.load_balancer.pool_id')"
publication_ip="$(printf '%s' "${publication_value}" | jq -r '.dns.value')"
daemon_publication_value="$(native_publication_state "native-daemon")"
wait_for_native_lb_backends "${publication_pool_id}" "2" 180 10.100.0.21 10.100.0.22
wait_for_native_dns_record "${publication_fqdn}" "${publication_ip}" 180
wait_for_native_dns_records "${daemon_publication_fqdn}" 180 10.100.0.21 10.100.0.22
wait_for_http node01 "http://127.0.0.1:18191/" 240
trap - RETURN

View file

@ -126,6 +126,7 @@
systemd.services.iam.environment = {
IAM_ALLOW_RANDOM_SIGNING_KEY = "1";
IAM_ALLOW_UNAUTHENTICATED_ADMIN = "true";
};
systemd.services.lightningstor.environment = {

View file

@ -70,5 +70,6 @@
systemd.services.iam.environment = {
IAM_ALLOW_RANDOM_SIGNING_KEY = "1";
IAM_ALLOW_UNAUTHENTICATED_ADMIN = "true";
};
}

View file

@ -70,5 +70,6 @@
systemd.services.iam.environment = {
IAM_ALLOW_RANDOM_SIGNING_KEY = "1";
IAM_ALLOW_UNAUTHENTICATED_ADMIN = "true";
};
}

View file

@ -783,7 +783,7 @@ impl ArtifactStore {
Some(principal) => principal,
None => self
.iam_client
.create_service_account(&principal_id, &principal_id, project_id)
.create_service_account(&principal_id, &principal_id, org_id, project_id)
.await
.map_err(|e| {
Status::unavailable(format!("failed to create service account: {e}"))

View file

@ -7,13 +7,16 @@ use crate::volume_manager::VolumeManager;
use crate::watcher::StateSink;
use creditservice_client::{Client as CreditServiceClient, ResourceType as CreditResourceType};
use dashmap::DashMap;
use iam_client::IamClient;
use iam_client::client::IamClientConfig;
use iam_client::IamClient;
use iam_service_auth::{
AuthService, get_tenant_context, resolve_tenant_ids_from_context, resource_for_tenant,
get_tenant_context, resolve_tenant_ids_from_context, resource_for_tenant, AuthService,
};
use iam_types::{PolicyBinding, PrincipalRef, Scope};
use plasmavmc_api::proto::{
disk_source::Source as ProtoDiskSourceKind, image_service_server::ImageService,
node_service_server::NodeService, vm_service_client::VmServiceClient,
vm_service_server::VmService, volume_service_server::VolumeService,
Architecture as ProtoArchitecture, AttachDiskRequest, AttachNicRequest, CephRbdBacking,
CordonNodeRequest, CreateImageRequest, CreateVmRequest, CreateVolumeRequest,
DeleteImageRequest, DeleteVmRequest, DeleteVolumeRequest, DetachDiskRequest, DetachNicRequest,
@ -31,9 +34,6 @@ use plasmavmc_api::proto::{
VmState as ProtoVmState, VmStatus as ProtoVmStatus, Volume as ProtoVolume,
VolumeBacking as ProtoVolumeBacking, VolumeDriverKind as ProtoVolumeDriverKind,
VolumeFormat as ProtoVolumeFormat, VolumeStatus as ProtoVolumeStatus, WatchVmRequest,
disk_source::Source as ProtoDiskSourceKind, image_service_server::ImageService,
node_service_server::NodeService, vm_service_client::VmServiceClient,
vm_service_server::VmService, volume_service_server::VolumeService,
};
use plasmavmc_hypervisor::HypervisorRegistry;
use plasmavmc_types::{
@ -374,7 +374,7 @@ impl VmServiceImpl {
Some(principal) => principal,
None => self
.iam_client
.create_service_account(&principal_id, &principal_id, project_id)
.create_service_account(&principal_id, &principal_id, org_id, project_id)
.await
.map_err(|e| {
Status::unavailable(format!("IAM service account create failed: {e}"))
@ -1480,7 +1480,9 @@ impl VmServiceImpl {
return Ok(());
};
let auth_token = self.issue_internal_token(&vm.org_id, &vm.project_id).await?;
let auth_token = self
.issue_internal_token(&vm.org_id, &vm.project_id)
.await?;
let mut client = PrismNETClient::new(endpoint.clone(), auth_token).await?;
for net_spec in &mut vm.spec.network {
@ -1532,7 +1534,9 @@ impl VmServiceImpl {
return Ok(());
};
let auth_token = self.issue_internal_token(&vm.org_id, &vm.project_id).await?;
let auth_token = self
.issue_internal_token(&vm.org_id, &vm.project_id)
.await?;
let mut client = PrismNETClient::new(endpoint.clone(), auth_token).await?;
for net_spec in &vm.spec.network {
@ -1936,7 +1940,10 @@ mod tests {
PRISMNET_VM_DEVICE_TYPE,
prismnet_api::proto::DeviceType::Vm as i32
);
assert_ne!(PRISMNET_VM_DEVICE_TYPE, prismnet_api::proto::DeviceType::None as i32);
assert_ne!(
PRISMNET_VM_DEVICE_TYPE,
prismnet_api::proto::DeviceType::None as i32
);
}
}
@ -2130,7 +2137,10 @@ impl VmService for VmServiceImpl {
{
let mut client = credit_svc.write().await;
if let Err(release_err) = client
.release_reservation(res_id, format!("VM volume preparation failed: {}", error))
.release_reservation(
res_id,
format!("VM volume preparation failed: {}", error),
)
.await
{
tracing::warn!("Failed to release reservation {}: {}", res_id, release_err);
@ -2185,7 +2195,10 @@ impl VmService for VmServiceImpl {
{
let mut client = credit_svc.write().await;
if let Err(release_err) = client
.release_reservation(res_id, format!("VM status failed after creation: {}", error))
.release_reservation(
res_id,
format!("VM status failed after creation: {}", error),
)
.await
{
tracing::warn!("Failed to release reservation {}: {}", res_id, release_err);

View file

@ -4,6 +4,7 @@ import argparse
import fnmatch
import json
from pathlib import Path
import tomllib
from typing import Any
@ -24,26 +25,149 @@ def matches_any(path: str, patterns: list[str]) -> bool:
return any(fnmatch.fnmatchcase(path, pattern) for pattern in patterns)
def detect_changes(config: dict[str, Any], changed_files: list[str]) -> dict[str, Any]:
def workspace_root(workspace: dict[str, Any]) -> str:
for pattern in workspace["paths"]:
root = pattern.split("/", 1)[0]
if root and "*" not in root and "?" not in root:
return root
raise ValueError(f"Could not infer workspace root for {workspace['name']}")
def collect_path_dependencies(obj: Any) -> list[str]:
path_dependencies: list[str] = []
if isinstance(obj, dict):
path = obj.get("path")
if isinstance(path, str):
path_dependencies.append(path)
for value in obj.values():
path_dependencies.extend(collect_path_dependencies(value))
elif isinstance(obj, list):
for value in obj:
path_dependencies.extend(collect_path_dependencies(value))
return path_dependencies
def build_nodes(config: dict[str, Any], repo_root: Path) -> dict[str, dict[str, Any]]:
nodes: dict[str, dict[str, Any]] = {}
for workspace in config["workspaces"]:
nodes[workspace["name"]] = {
"name": workspace["name"],
"kind": "workspace",
"root": repo_root / workspace_root(workspace),
}
crates_dir = repo_root / "crates"
if crates_dir.is_dir():
for manifest in sorted(crates_dir.glob("*/Cargo.toml")):
crate_name = manifest.parent.name
nodes[f"crate:{crate_name}"] = {
"name": crate_name,
"kind": "shared_crate",
"root": manifest.parent,
}
return nodes
def resolve_node_for_path(path: Path, nodes: dict[str, dict[str, Any]]) -> str | None:
for node_name, node in sorted(
nodes.items(),
key=lambda item: len(str(item[1]["root"])),
reverse=True,
):
try:
path.relative_to(node["root"])
except ValueError:
continue
return node_name
return None
def build_dependency_graph(
nodes: dict[str, dict[str, Any]],
) -> dict[str, set[str]]:
graph = {node_name: set() for node_name in nodes}
for node_name, node in nodes.items():
root = node["root"]
for manifest in root.rglob("Cargo.toml"):
manifest_data = tomllib.loads(manifest.read_text())
for dependency_path in collect_path_dependencies(manifest_data):
resolved_dependency = (manifest.parent / dependency_path).resolve()
dependency_node = resolve_node_for_path(resolved_dependency, nodes)
if dependency_node is None or dependency_node == node_name:
continue
graph[node_name].add(dependency_node)
return graph
def reverse_graph(graph: dict[str, set[str]]) -> dict[str, set[str]]:
reversed_graph = {node_name: set() for node_name in graph}
for node_name, dependencies in graph.items():
for dependency in dependencies:
reversed_graph[dependency].add(node_name)
return reversed_graph
def impacted_nodes(
changed_nodes: set[str],
reversed_dependencies: dict[str, set[str]],
) -> set[str]:
selected = set(changed_nodes)
queue = list(changed_nodes)
while queue:
current = queue.pop()
for dependent in reversed_dependencies[current]:
if dependent in selected:
continue
selected.add(dependent)
queue.append(dependent)
return selected
def detect_changes(
config: dict[str, Any],
changed_files: list[str],
repo_root: Path,
) -> dict[str, Any]:
workspaces: list[dict[str, Any]] = config["workspaces"]
all_workspace_names = [workspace["name"] for workspace in workspaces]
nodes = build_nodes(config, repo_root)
dependency_graph = build_dependency_graph(nodes)
reversed_dependencies = reverse_graph(dependency_graph)
global_changed = any(
matches_any(path, config["global_paths"])
for path in changed_files
)
shared_crates_changed = any(
matches_any(path, config["shared_crates_paths"])
directly_changed_nodes = {
node_name
for path in changed_files
for node_name in [resolve_node_for_path((repo_root / path).resolve(), nodes)]
if node_name is not None
}
shared_crates = sorted(
nodes[node_name]["name"]
for node_name in directly_changed_nodes
if nodes[node_name]["kind"] == "shared_crate"
)
shared_crates_changed = bool(shared_crates)
if global_changed:
changed_workspaces = all_workspace_names
else:
selected_nodes = impacted_nodes(directly_changed_nodes, reversed_dependencies)
changed_workspaces = [
workspace["name"]
for workspace in workspaces
if any(matches_any(path, workspace["paths"]) for path in changed_files)
if workspace["name"] in selected_nodes
]
selected_workspaces = set(changed_workspaces)
@ -70,6 +194,7 @@ def detect_changes(config: dict[str, Any], changed_files: list[str]) -> dict[str
"any_changed": global_changed or bool(changed_workspaces),
"build_changed": bool(build_targets),
"global_changed": global_changed,
"shared_crates": shared_crates,
"shared_crates_changed": shared_crates_changed,
}
@ -81,6 +206,7 @@ def write_github_output(path: Path, result: dict[str, Any]) -> None:
"any_changed": str(result["any_changed"]).lower(),
"build_changed": str(result["build_changed"]).lower(),
"global_changed": str(result["global_changed"]).lower(),
"shared_crates": json.dumps(result["shared_crates"], separators=(",", ":")),
"shared_crates_changed": str(result["shared_crates_changed"]).lower(),
}
@ -119,9 +245,11 @@ def parse_args() -> argparse.Namespace:
def main() -> int:
args = parse_args()
config = json.loads(Path(args.config).read_text())
config_path = Path(args.config).resolve()
repo_root = config_path.parents[2]
config = json.loads(config_path.read_text())
changed_files = load_changed_files(args)
result = detect_changes(config, changed_files)
result = detect_changes(config, changed_files, repo_root)
if args.github_output:
write_github_output(Path(args.github_output), result)