From e1a5d394e540d2e24d2b1dd4fd9bcc1d1aae774c Mon Sep 17 00:00:00 2001
From: centra
Date: Sat, 28 Mar 2026 00:09:22 +0900
Subject: [PATCH] ci: unify workspace inventory and harden tier0 gating
---
.github/workflows/nix.yml | 102 +++++------
client-common/src/lib.rs | 26 ++-
coronafs/crates/coronafs-server/src/config.rs | 18 +-
coronafs/crates/coronafs-server/src/main.rs | 49 +++---
mtls-agent/src/client.rs | 24 +--
mtls-agent/src/discovery.rs | 23 +--
mtls-agent/src/main.rs | 48 ++---
mtls-agent/src/policy.rs | 2 +
nix/ci/flake.nix | 61 +++++--
nix/ci/workspaces.json | 166 ++++++++++++++++++
scripts/ci_changed_workspaces.py | 134 ++++++++++++++
11 files changed, 493 insertions(+), 160 deletions(-)
create mode 100644 nix/ci/workspaces.json
create mode 100644 scripts/ci_changed_workspaces.py
diff --git a/.github/workflows/nix.yml b/.github/workflows/nix.yml
index c8354ae..9f87315 100644
--- a/.github/workflows/nix.yml
+++ b/.github/workflows/nix.yml
@@ -2,65 +2,71 @@ name: Nix CI
on:
push:
- branches: [ master ]
+ branches: [ main, master ]
pull_request:
- branches: [ master ]
+ branches: [ main, master ]
+ workflow_dispatch:
jobs:
# Detect which workspaces have changed to save CI minutes
filter:
runs-on: ubuntu-latest
outputs:
- workspaces: ${{ steps.filter.outputs.changes }}
- any_changed: ${{ steps.filter.outputs.workspaces_any_changed }}
- global_changed: ${{ steps.filter.outputs.global }}
- shared_crates_changed: ${{ steps.filter.outputs.shared_crates }}
+ workspaces: ${{ steps.detect.outputs.workspaces }}
+ build_targets: ${{ steps.detect.outputs.build_targets }}
+ any_changed: ${{ steps.detect.outputs.any_changed }}
+ build_changed: ${{ steps.detect.outputs.build_changed }}
+ global_changed: ${{ steps.detect.outputs.global_changed }}
+ shared_crates_changed: ${{ steps.detect.outputs.shared_crates_changed }}
steps:
- uses: actions/checkout@v4
- - uses: dorny/paths-filter@v3
- id: filter
with:
- filters: |
- global:
- - 'flake.nix'
- - 'flake.lock'
- - 'nix/**'
- - 'nix-nos/**'
- - '.github/workflows/nix.yml'
- - 'Cargo.toml'
- - 'Cargo.lock'
- - 'crates/**'
- - 'client-common/**'
- - 'baremetal/**'
- - 'scripts/**'
- - 'specifications/**'
- - 'docs/**'
- shared_crates: 'crates/**'
- chainfire: 'chainfire/**'
- flaredb: 'flaredb/**'
- iam: 'iam/**'
- plasmavmc: 'plasmavmc/**'
- prismnet: 'prismnet/**'
- flashdns: 'flashdns/**'
- fiberlb: 'fiberlb/**'
- lightningstor: 'lightningstor/**'
- nightlight: 'nightlight/**'
- creditservice: 'creditservice/**'
- k8shost: 'k8shost/**'
- apigateway: 'apigateway/**'
- deployer: 'deployer/**'
+ fetch-depth: 0
+
+ - name: Collect changed files
+ env:
+ EVENT_NAME: ${{ github.event_name }}
+ BASE_REF: ${{ github.base_ref }}
+ BEFORE_SHA: ${{ github.event.before }}
+ HEAD_SHA: ${{ github.sha }}
+ run: |
+ set -euo pipefail
+
+ if [[ "$EVENT_NAME" == "workflow_dispatch" ]]; then
+ printf 'flake.nix\n' > changed-files.txt
+ elif [[ "$EVENT_NAME" == "pull_request" ]]; then
+ git fetch --no-tags --depth=1 origin "$BASE_REF"
+ git diff --name-only "origin/$BASE_REF...$HEAD_SHA" > changed-files.txt
+ elif [[ "$BEFORE_SHA" == "0000000000000000000000000000000000000000" ]]; then
+ git diff-tree --no-commit-id --name-only -r "$HEAD_SHA" > changed-files.txt
+ else
+ git diff --name-only "$BEFORE_SHA" "$HEAD_SHA" > changed-files.txt
+ fi
+
+ if [[ ! -f changed-files.txt ]]; then
+ : > changed-files.txt
+ fi
+
+ sed -n '1,200p' changed-files.txt
+
+ - name: Detect changed workspaces
+ id: detect
+ run: |
+ python3 scripts/ci_changed_workspaces.py \
+ --config nix/ci/workspaces.json \
+ --changed-files-file changed-files.txt \
+ --github-output "$GITHUB_OUTPUT"
# Run CI gates for changed workspaces
# Uses the provider-agnostic 'photoncloud-gate' defined in nix/ci/flake.nix
gate:
needs: filter
- if: ${{ needs.filter.outputs.any_changed == 'true' || needs.filter.outputs.global_changed == 'true' }}
+ if: ${{ needs.filter.outputs.any_changed == 'true' }}
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
- # If global files changed, run all. Otherwise run only changed ones.
- workspace: ${{ fromJSON(needs.filter.outputs.global_changed == 'true' && '["chainfire", "flaredb", "iam", "plasmavmc", "prismnet", "flashdns", "fiberlb", "lightningstor", "nightlight", "creditservice", "k8shost", "apigateway", "deployer"]' || needs.filter.outputs.workspaces) }}
+ workspace: ${{ fromJSON(needs.filter.outputs.workspaces) }}
name: gate (${{ matrix.workspace }})
steps:
- uses: actions/checkout@v4
@@ -88,26 +94,20 @@ jobs:
# Build server packages (tier 1+)
build:
needs: [filter, gate]
- if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/master' }}
+ if: ${{ github.event_name == 'push' && (github.ref == 'refs/heads/main' || github.ref == 'refs/heads/master') && needs.filter.outputs.build_changed == 'true' }}
runs-on: ubuntu-latest
strategy:
matrix:
- workspace: ${{ fromJSON(needs.filter.outputs.global_changed == 'true' && '["chainfire", "flaredb", "iam", "plasmavmc", "prismnet", "flashdns", "fiberlb", "lightningstor", "nightlight", "creditservice", "k8shost", "apigateway", "deployer"]' || needs.filter.outputs.workspaces) }}
- name: build (${{ matrix.workspace }})
+ target: ${{ fromJSON(needs.filter.outputs.build_targets) }}
+ name: build (${{ matrix.target.package }})
steps:
- uses: actions/checkout@v4
- uses: DeterminateSystems/nix-installer-action@v11
- uses: DeterminateSystems/magic-nix-cache-action@v8
- - name: Build server
+ - name: Build package
run: |
- # Only build if the workspace has a corresponding package in flake.nix
- # We check if it exists before building to avoid failure on non-package workspaces
- if nix flake show --json | jq -e ".packages.\"x86_64-linux\".\"${{ matrix.workspace }}-server\"" > /dev/null; then
- nix build .#${{ matrix.workspace }}-server --accept-flake-config
- else
- echo "No server package found for ${{ matrix.workspace }}, skipping build."
- fi
+ nix build .#${{ matrix.target.package }} --accept-flake-config
# Summary job for PR status checks
ci-status:
diff --git a/client-common/src/lib.rs b/client-common/src/lib.rs
index 9458b42..8ca690f 100644
--- a/client-common/src/lib.rs
+++ b/client-common/src/lib.rs
@@ -1,10 +1,10 @@
//! Shared client config types (endpoint/auth/retry) for PhotonCloud SDKs.
-//!
+//!
//! Lightweight, type-only helpers to keep SDK crates consistent without
//! forcing a unified SDK dependency tree.
-use std::time::Duration;
use backoff::ExponentialBackoffBuilder;
+use std::time::Duration;
use thiserror::Error;
use tonic::codegen::InterceptedService;
use tonic::service::Interceptor;
@@ -70,7 +70,9 @@ impl EndpointConfig {
key.clone(),
));
}
- ep = ep.tls_config(cfg).map_err(|e| ClientError::TlsConfig(e.to_string()))?;
+ ep = ep
+ .tls_config(cfg)
+ .map_err(|e| ClientError::TlsConfig(e.to_string()))?;
}
Ok(ep)
@@ -96,7 +98,9 @@ pub enum AuthConfig {
impl AuthConfig {
pub fn bearer(token: impl Into) -> Self {
- Self::Bearer { token: token.into() }
+ Self::Bearer {
+ token: token.into(),
+ }
}
}
@@ -157,12 +161,13 @@ impl Interceptor for AuthInterceptor {
match &self.0 {
AuthConfig::None => {}
AuthConfig::Bearer { token } => {
- req.metadata_mut()
- .insert("authorization", format!("Bearer {}", token).parse().unwrap());
+ req.metadata_mut().insert(
+ "authorization",
+ format!("Bearer {}", token).parse().unwrap(),
+ );
}
AuthConfig::AccessKey { id, secret } => {
- req.metadata_mut()
- .insert("x-api-key", id.parse().unwrap());
+ req.metadata_mut().insert("x-api-key", id.parse().unwrap());
req.metadata_mut()
.insert("x-api-secret", secret.parse().unwrap());
}
@@ -180,7 +185,10 @@ pub fn auth_interceptor(auth: &AuthConfig) -> Option {
}
/// Helper to wrap a tonic client with an interceptor when auth is provided.
-pub fn with_auth(channel: Channel, auth: &AuthConfig) -> InterceptedService {
+pub fn with_auth(
+ channel: Channel,
+ auth: &AuthConfig,
+) -> InterceptedService {
let interceptor = auth_interceptor(auth).unwrap_or(AuthInterceptor(AuthConfig::None));
InterceptedService::new(channel, interceptor)
}
diff --git a/coronafs/crates/coronafs-server/src/config.rs b/coronafs/crates/coronafs-server/src/config.rs
index fb98da7..7746f83 100644
--- a/coronafs/crates/coronafs-server/src/config.rs
+++ b/coronafs/crates/coronafs-server/src/config.rs
@@ -2,33 +2,23 @@ use serde::{Deserialize, Serialize};
use std::net::SocketAddr;
use std::path::PathBuf;
-#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
+#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum ServerMode {
+ #[default]
Combined,
Controller,
Node,
}
-impl Default for ServerMode {
- fn default() -> Self {
- Self::Combined
- }
-}
-
-#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
+#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum MetadataBackend {
+ #[default]
Filesystem,
Chainfire,
}
-impl Default for MetadataBackend {
- fn default() -> Self {
- Self::Filesystem
- }
-}
-
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(default)]
pub struct ServerConfig {
diff --git a/coronafs/crates/coronafs-server/src/main.rs b/coronafs/crates/coronafs-server/src/main.rs
index d819866..6b29928 100644
--- a/coronafs/crates/coronafs-server/src/main.rs
+++ b/coronafs/crates/coronafs-server/src/main.rs
@@ -123,19 +123,14 @@ struct AppState {
reserved_ports: Arc>>,
}
-#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]
+#[derive(Debug, Clone, Copy, Default, Serialize, Deserialize, PartialEq, Eq)]
#[serde(rename_all = "lowercase")]
enum VolumeFileFormat {
+ #[default]
Raw,
Qcow2,
}
-impl Default for VolumeFileFormat {
- fn default() -> Self {
- Self::Raw
- }
-}
-
impl VolumeFileFormat {
fn as_qemu_arg(self) -> &'static str {
match self {
@@ -871,13 +866,12 @@ async fn materialize_impl(
return load_response_required(state, id).await;
}
- let format = req.format.unwrap_or_else(|| {
- if req.lazy {
- VolumeFileFormat::Qcow2
- } else {
- VolumeFileFormat::Raw
- }
- });
+ let default_format = if req.lazy {
+ VolumeFileFormat::Qcow2
+ } else {
+ VolumeFileFormat::Raw
+ };
+ let format = req.format.unwrap_or(default_format);
let temp_path = temp_create_path(&state.config, id);
if fs::try_exists(&temp_path).await.unwrap_or(false) {
let _ = fs::remove_file(&temp_path).await;
@@ -1103,10 +1097,7 @@ async fn delete_impl(state: &AppState, id: &str) -> Result<()> {
}
async fn load_response(state: &AppState, id: &str) -> Option {
- match load_response_required(state, id).await {
- Ok(response) => Some(response),
- Err(_) => None,
- }
+ load_response_required(state, id).await.ok()
}
async fn list_volume_responses(state: &AppState) -> Result> {
@@ -1517,8 +1508,10 @@ mod tests {
#[test]
fn export_probe_host_prefers_loopback_for_wildcard_bind() {
- let mut config = ServerConfig::default();
- config.export_bind_addr = "0.0.0.0".to_string();
+ let mut config = ServerConfig {
+ export_bind_addr: "0.0.0.0".to_string(),
+ ..ServerConfig::default()
+ };
assert_eq!(export_probe_host(&config), "127.0.0.1");
config.export_bind_addr = "10.100.0.11".to_string();
assert_eq!(export_probe_host(&config), "10.100.0.11");
@@ -1650,6 +1643,13 @@ mod tests {
assert_ne!(next, preferred);
release_export_port(&state, Some(port)).await;
+ release_export_port(&state, Some(next)).await;
+ for _ in 0..10 {
+ if port_is_usable(&state.config, &HashSet::new(), preferred).await {
+ break;
+ }
+ tokio::time::sleep(std::time::Duration::from_millis(10)).await;
+ }
let reused = reserve_export_port(&state, Some(preferred)).await.unwrap();
assert_eq!(reused, preferred);
}
@@ -1760,7 +1760,7 @@ mod tests {
&state,
"vol-a",
CreateVolumeRequest {
- size_bytes: 1 * 1024 * 1024,
+ size_bytes: 1024 * 1024,
format: Some(VolumeFileFormat::Raw),
backing_file: None,
backing_format: None,
@@ -2275,10 +2275,11 @@ mod tests {
#[test]
fn volume_create_api_is_available_in_node_mode() {
- let mut config = ServerConfig::default();
- config.mode = ServerMode::Node;
let state = AppState {
- config: Arc::new(config),
+ config: Arc::new(ServerConfig {
+ mode: ServerMode::Node,
+ ..ServerConfig::default()
+ }),
metadata_store: MetadataStore::Filesystem,
volume_guards: Arc::new(Mutex::new(HashMap::new())),
reserved_ports: Arc::new(Mutex::new(HashSet::new())),
diff --git a/mtls-agent/src/client.rs b/mtls-agent/src/client.rs
index 1d2d4b9..65f99e8 100644
--- a/mtls-agent/src/client.rs
+++ b/mtls-agent/src/client.rs
@@ -1,7 +1,12 @@
+#![allow(dead_code)]
+
use std::sync::Arc;
use anyhow::{Context, Result};
-use rustls::{pki_types::{PrivateKeyDer, ServerName}, ClientConfig, RootCertStore};
+use rustls::{
+ pki_types::{PrivateKeyDer, ServerName},
+ ClientConfig, RootCertStore,
+};
use rustls_pemfile::certs;
use std::fs;
use std::io::BufReader;
@@ -15,7 +20,7 @@ use crate::discovery::ServiceDiscovery;
pub enum MtlsStream {
Plain(TcpStream),
- Tls(tokio_rustls::client::TlsStream),
+ Tls(Box>),
}
impl AsyncRead for MtlsStream {
@@ -26,7 +31,7 @@ impl AsyncRead for MtlsStream {
) -> Poll> {
match self.get_mut() {
MtlsStream::Plain(stream) => Pin::new(stream).poll_read(cx, buf),
- MtlsStream::Tls(stream) => Pin::new(stream).poll_read(cx, buf),
+ MtlsStream::Tls(stream) => Pin::new(stream.as_mut()).poll_read(cx, buf),
}
}
}
@@ -39,24 +44,21 @@ impl AsyncWrite for MtlsStream {
) -> Poll> {
match self.get_mut() {
MtlsStream::Plain(stream) => Pin::new(stream).poll_write(cx, data),
- MtlsStream::Tls(stream) => Pin::new(stream).poll_write(cx, data),
+ MtlsStream::Tls(stream) => Pin::new(stream.as_mut()).poll_write(cx, data),
}
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut TaskContext<'_>) -> Poll> {
match self.get_mut() {
MtlsStream::Plain(stream) => Pin::new(stream).poll_flush(cx),
- MtlsStream::Tls(stream) => Pin::new(stream).poll_flush(cx),
+ MtlsStream::Tls(stream) => Pin::new(stream.as_mut()).poll_flush(cx),
}
}
- fn poll_shutdown(
- self: Pin<&mut Self>,
- cx: &mut TaskContext<'_>,
- ) -> Poll> {
+ fn poll_shutdown(self: Pin<&mut Self>, cx: &mut TaskContext<'_>) -> Poll> {
match self.get_mut() {
MtlsStream::Plain(stream) => Pin::new(stream).poll_shutdown(cx),
- MtlsStream::Tls(stream) => Pin::new(stream).poll_shutdown(cx),
+ MtlsStream::Tls(stream) => Pin::new(stream.as_mut()).poll_shutdown(cx),
}
}
}
@@ -109,7 +111,7 @@ impl MtlsClient {
let server_name = ServerName::try_from(service_name.to_string())
.context("invalid server name for TLS")?;
let tls_stream = connector.connect(server_name, stream).await?;
- return Ok(MtlsStream::Tls(tls_stream));
+ return Ok(MtlsStream::Tls(Box::new(tls_stream)));
}
Ok(MtlsStream::Plain(stream))
diff --git a/mtls-agent/src/discovery.rs b/mtls-agent/src/discovery.rs
index 902a51a..fbbd6d6 100644
--- a/mtls-agent/src/discovery.rs
+++ b/mtls-agent/src/discovery.rs
@@ -1,3 +1,5 @@
+#![allow(dead_code)]
+
use std::collections::HashMap;
use std::sync::Arc;
use std::time::{Duration, Instant};
@@ -96,7 +98,10 @@ impl ServiceDiscovery {
Ok(instances)
}
- async fn fetch_instances_from_chainfire(&self, service_name: &str) -> Result> {
+ async fn fetch_instances_from_chainfire(
+ &self,
+ service_name: &str,
+ ) -> Result> {
let mut client = Client::connect(self.chainfire_endpoint.clone()).await?;
let prefix = format!(
"{}instances/{}/",
@@ -136,10 +141,7 @@ impl ServiceDiscovery {
source_service: &str,
target_service: &str,
) -> Result