ci: unify workspace inventory and harden tier0 gating
Some checks failed
Nix CI / filter (push) Successful in 54s
Nix CI / gate (shared crates) (push) Has been skipped
Nix CI / gate () (push) Failing after 6s
Nix CI / build () (push) Has been skipped
Nix CI / ci-status (push) Failing after 1m14s

This commit is contained in:
centra 2026-03-28 00:09:22 +09:00
parent 290c6ba88a
commit e1a5d394e5
Signed by: centra
GPG key ID: 0C09689D20B25ACA
11 changed files with 493 additions and 160 deletions

View file

@ -2,65 +2,71 @@ name: Nix CI
on:
push:
branches: [ master ]
branches: [ main, master ]
pull_request:
branches: [ master ]
branches: [ main, master ]
workflow_dispatch:
jobs:
# Detect which workspaces have changed to save CI minutes
filter:
runs-on: ubuntu-latest
outputs:
workspaces: ${{ steps.filter.outputs.changes }}
any_changed: ${{ steps.filter.outputs.workspaces_any_changed }}
global_changed: ${{ steps.filter.outputs.global }}
shared_crates_changed: ${{ steps.filter.outputs.shared_crates }}
workspaces: ${{ steps.detect.outputs.workspaces }}
build_targets: ${{ steps.detect.outputs.build_targets }}
any_changed: ${{ steps.detect.outputs.any_changed }}
build_changed: ${{ steps.detect.outputs.build_changed }}
global_changed: ${{ steps.detect.outputs.global_changed }}
shared_crates_changed: ${{ steps.detect.outputs.shared_crates_changed }}
steps:
- uses: actions/checkout@v4
- uses: dorny/paths-filter@v3
id: filter
with:
filters: |
global:
- 'flake.nix'
- 'flake.lock'
- 'nix/**'
- 'nix-nos/**'
- '.github/workflows/nix.yml'
- 'Cargo.toml'
- 'Cargo.lock'
- 'crates/**'
- 'client-common/**'
- 'baremetal/**'
- 'scripts/**'
- 'specifications/**'
- 'docs/**'
shared_crates: 'crates/**'
chainfire: 'chainfire/**'
flaredb: 'flaredb/**'
iam: 'iam/**'
plasmavmc: 'plasmavmc/**'
prismnet: 'prismnet/**'
flashdns: 'flashdns/**'
fiberlb: 'fiberlb/**'
lightningstor: 'lightningstor/**'
nightlight: 'nightlight/**'
creditservice: 'creditservice/**'
k8shost: 'k8shost/**'
apigateway: 'apigateway/**'
deployer: 'deployer/**'
fetch-depth: 0
- name: Collect changed files
env:
EVENT_NAME: ${{ github.event_name }}
BASE_REF: ${{ github.base_ref }}
BEFORE_SHA: ${{ github.event.before }}
HEAD_SHA: ${{ github.sha }}
run: |
set -euo pipefail
if [[ "$EVENT_NAME" == "workflow_dispatch" ]]; then
printf 'flake.nix\n' > changed-files.txt
elif [[ "$EVENT_NAME" == "pull_request" ]]; then
git fetch --no-tags --depth=1 origin "$BASE_REF"
git diff --name-only "origin/$BASE_REF...$HEAD_SHA" > changed-files.txt
elif [[ "$BEFORE_SHA" == "0000000000000000000000000000000000000000" ]]; then
git diff-tree --no-commit-id --name-only -r "$HEAD_SHA" > changed-files.txt
else
git diff --name-only "$BEFORE_SHA" "$HEAD_SHA" > changed-files.txt
fi
if [[ ! -f changed-files.txt ]]; then
: > changed-files.txt
fi
sed -n '1,200p' changed-files.txt
- name: Detect changed workspaces
id: detect
run: |
python3 scripts/ci_changed_workspaces.py \
--config nix/ci/workspaces.json \
--changed-files-file changed-files.txt \
--github-output "$GITHUB_OUTPUT"
# Run CI gates for changed workspaces
# Uses the provider-agnostic 'photoncloud-gate' defined in nix/ci/flake.nix
gate:
needs: filter
if: ${{ needs.filter.outputs.any_changed == 'true' || needs.filter.outputs.global_changed == 'true' }}
if: ${{ needs.filter.outputs.any_changed == 'true' }}
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
# If global files changed, run all. Otherwise run only changed ones.
workspace: ${{ fromJSON(needs.filter.outputs.global_changed == 'true' && '["chainfire", "flaredb", "iam", "plasmavmc", "prismnet", "flashdns", "fiberlb", "lightningstor", "nightlight", "creditservice", "k8shost", "apigateway", "deployer"]' || needs.filter.outputs.workspaces) }}
workspace: ${{ fromJSON(needs.filter.outputs.workspaces) }}
name: gate (${{ matrix.workspace }})
steps:
- uses: actions/checkout@v4
@ -88,26 +94,20 @@ jobs:
# Build server packages (tier 1+)
build:
needs: [filter, gate]
if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/master' }}
if: ${{ github.event_name == 'push' && (github.ref == 'refs/heads/main' || github.ref == 'refs/heads/master') && needs.filter.outputs.build_changed == 'true' }}
runs-on: ubuntu-latest
strategy:
matrix:
workspace: ${{ fromJSON(needs.filter.outputs.global_changed == 'true' && '["chainfire", "flaredb", "iam", "plasmavmc", "prismnet", "flashdns", "fiberlb", "lightningstor", "nightlight", "creditservice", "k8shost", "apigateway", "deployer"]' || needs.filter.outputs.workspaces) }}
name: build (${{ matrix.workspace }})
target: ${{ fromJSON(needs.filter.outputs.build_targets) }}
name: build (${{ matrix.target.package }})
steps:
- uses: actions/checkout@v4
- uses: DeterminateSystems/nix-installer-action@v11
- uses: DeterminateSystems/magic-nix-cache-action@v8
- name: Build server
- name: Build package
run: |
# Only build if the workspace has a corresponding package in flake.nix
# We check if it exists before building to avoid failure on non-package workspaces
if nix flake show --json | jq -e ".packages.\"x86_64-linux\".\"${{ matrix.workspace }}-server\"" > /dev/null; then
nix build .#${{ matrix.workspace }}-server --accept-flake-config
else
echo "No server package found for ${{ matrix.workspace }}, skipping build."
fi
nix build .#${{ matrix.target.package }} --accept-flake-config
# Summary job for PR status checks
ci-status:

View file

@ -1,10 +1,10 @@
//! Shared client config types (endpoint/auth/retry) for PhotonCloud SDKs.
//!
//!
//! Lightweight, type-only helpers to keep SDK crates consistent without
//! forcing a unified SDK dependency tree.
use std::time::Duration;
use backoff::ExponentialBackoffBuilder;
use std::time::Duration;
use thiserror::Error;
use tonic::codegen::InterceptedService;
use tonic::service::Interceptor;
@ -70,7 +70,9 @@ impl EndpointConfig {
key.clone(),
));
}
ep = ep.tls_config(cfg).map_err(|e| ClientError::TlsConfig(e.to_string()))?;
ep = ep
.tls_config(cfg)
.map_err(|e| ClientError::TlsConfig(e.to_string()))?;
}
Ok(ep)
@ -96,7 +98,9 @@ pub enum AuthConfig {
impl AuthConfig {
pub fn bearer(token: impl Into<String>) -> Self {
Self::Bearer { token: token.into() }
Self::Bearer {
token: token.into(),
}
}
}
@ -157,12 +161,13 @@ impl Interceptor for AuthInterceptor {
match &self.0 {
AuthConfig::None => {}
AuthConfig::Bearer { token } => {
req.metadata_mut()
.insert("authorization", format!("Bearer {}", token).parse().unwrap());
req.metadata_mut().insert(
"authorization",
format!("Bearer {}", token).parse().unwrap(),
);
}
AuthConfig::AccessKey { id, secret } => {
req.metadata_mut()
.insert("x-api-key", id.parse().unwrap());
req.metadata_mut().insert("x-api-key", id.parse().unwrap());
req.metadata_mut()
.insert("x-api-secret", secret.parse().unwrap());
}
@ -180,7 +185,10 @@ pub fn auth_interceptor(auth: &AuthConfig) -> Option<AuthInterceptor> {
}
/// Helper to wrap a tonic client with an interceptor when auth is provided.
pub fn with_auth(channel: Channel, auth: &AuthConfig) -> InterceptedService<Channel, AuthInterceptor> {
pub fn with_auth(
channel: Channel,
auth: &AuthConfig,
) -> InterceptedService<Channel, AuthInterceptor> {
let interceptor = auth_interceptor(auth).unwrap_or(AuthInterceptor(AuthConfig::None));
InterceptedService::new(channel, interceptor)
}

View file

@ -2,33 +2,23 @@ use serde::{Deserialize, Serialize};
use std::net::SocketAddr;
use std::path::PathBuf;
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum ServerMode {
#[default]
Combined,
Controller,
Node,
}
impl Default for ServerMode {
fn default() -> Self {
Self::Combined
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum MetadataBackend {
#[default]
Filesystem,
Chainfire,
}
impl Default for MetadataBackend {
fn default() -> Self {
Self::Filesystem
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(default)]
pub struct ServerConfig {

View file

@ -123,19 +123,14 @@ struct AppState {
reserved_ports: Arc<Mutex<HashSet<u16>>>,
}
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]
#[derive(Debug, Clone, Copy, Default, Serialize, Deserialize, PartialEq, Eq)]
#[serde(rename_all = "lowercase")]
enum VolumeFileFormat {
#[default]
Raw,
Qcow2,
}
impl Default for VolumeFileFormat {
fn default() -> Self {
Self::Raw
}
}
impl VolumeFileFormat {
fn as_qemu_arg(self) -> &'static str {
match self {
@ -871,13 +866,12 @@ async fn materialize_impl(
return load_response_required(state, id).await;
}
let format = req.format.unwrap_or_else(|| {
if req.lazy {
VolumeFileFormat::Qcow2
} else {
VolumeFileFormat::Raw
}
});
let default_format = if req.lazy {
VolumeFileFormat::Qcow2
} else {
VolumeFileFormat::Raw
};
let format = req.format.unwrap_or(default_format);
let temp_path = temp_create_path(&state.config, id);
if fs::try_exists(&temp_path).await.unwrap_or(false) {
let _ = fs::remove_file(&temp_path).await;
@ -1103,10 +1097,7 @@ async fn delete_impl(state: &AppState, id: &str) -> Result<()> {
}
async fn load_response(state: &AppState, id: &str) -> Option<VolumeResponse> {
match load_response_required(state, id).await {
Ok(response) => Some(response),
Err(_) => None,
}
load_response_required(state, id).await.ok()
}
async fn list_volume_responses(state: &AppState) -> Result<Vec<VolumeResponse>> {
@ -1517,8 +1508,10 @@ mod tests {
#[test]
fn export_probe_host_prefers_loopback_for_wildcard_bind() {
let mut config = ServerConfig::default();
config.export_bind_addr = "0.0.0.0".to_string();
let mut config = ServerConfig {
export_bind_addr: "0.0.0.0".to_string(),
..ServerConfig::default()
};
assert_eq!(export_probe_host(&config), "127.0.0.1");
config.export_bind_addr = "10.100.0.11".to_string();
assert_eq!(export_probe_host(&config), "10.100.0.11");
@ -1650,6 +1643,13 @@ mod tests {
assert_ne!(next, preferred);
release_export_port(&state, Some(port)).await;
release_export_port(&state, Some(next)).await;
for _ in 0..10 {
if port_is_usable(&state.config, &HashSet::new(), preferred).await {
break;
}
tokio::time::sleep(std::time::Duration::from_millis(10)).await;
}
let reused = reserve_export_port(&state, Some(preferred)).await.unwrap();
assert_eq!(reused, preferred);
}
@ -1760,7 +1760,7 @@ mod tests {
&state,
"vol-a",
CreateVolumeRequest {
size_bytes: 1 * 1024 * 1024,
size_bytes: 1024 * 1024,
format: Some(VolumeFileFormat::Raw),
backing_file: None,
backing_format: None,
@ -2275,10 +2275,11 @@ mod tests {
#[test]
fn volume_create_api_is_available_in_node_mode() {
let mut config = ServerConfig::default();
config.mode = ServerMode::Node;
let state = AppState {
config: Arc::new(config),
config: Arc::new(ServerConfig {
mode: ServerMode::Node,
..ServerConfig::default()
}),
metadata_store: MetadataStore::Filesystem,
volume_guards: Arc::new(Mutex::new(HashMap::new())),
reserved_ports: Arc::new(Mutex::new(HashSet::new())),

View file

@ -1,7 +1,12 @@
#![allow(dead_code)]
use std::sync::Arc;
use anyhow::{Context, Result};
use rustls::{pki_types::{PrivateKeyDer, ServerName}, ClientConfig, RootCertStore};
use rustls::{
pki_types::{PrivateKeyDer, ServerName},
ClientConfig, RootCertStore,
};
use rustls_pemfile::certs;
use std::fs;
use std::io::BufReader;
@ -15,7 +20,7 @@ use crate::discovery::ServiceDiscovery;
pub enum MtlsStream {
Plain(TcpStream),
Tls(tokio_rustls::client::TlsStream<TcpStream>),
Tls(Box<tokio_rustls::client::TlsStream<TcpStream>>),
}
impl AsyncRead for MtlsStream {
@ -26,7 +31,7 @@ impl AsyncRead for MtlsStream {
) -> Poll<std::io::Result<()>> {
match self.get_mut() {
MtlsStream::Plain(stream) => Pin::new(stream).poll_read(cx, buf),
MtlsStream::Tls(stream) => Pin::new(stream).poll_read(cx, buf),
MtlsStream::Tls(stream) => Pin::new(stream.as_mut()).poll_read(cx, buf),
}
}
}
@ -39,24 +44,21 @@ impl AsyncWrite for MtlsStream {
) -> Poll<std::io::Result<usize>> {
match self.get_mut() {
MtlsStream::Plain(stream) => Pin::new(stream).poll_write(cx, data),
MtlsStream::Tls(stream) => Pin::new(stream).poll_write(cx, data),
MtlsStream::Tls(stream) => Pin::new(stream.as_mut()).poll_write(cx, data),
}
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut TaskContext<'_>) -> Poll<std::io::Result<()>> {
match self.get_mut() {
MtlsStream::Plain(stream) => Pin::new(stream).poll_flush(cx),
MtlsStream::Tls(stream) => Pin::new(stream).poll_flush(cx),
MtlsStream::Tls(stream) => Pin::new(stream.as_mut()).poll_flush(cx),
}
}
fn poll_shutdown(
self: Pin<&mut Self>,
cx: &mut TaskContext<'_>,
) -> Poll<std::io::Result<()>> {
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut TaskContext<'_>) -> Poll<std::io::Result<()>> {
match self.get_mut() {
MtlsStream::Plain(stream) => Pin::new(stream).poll_shutdown(cx),
MtlsStream::Tls(stream) => Pin::new(stream).poll_shutdown(cx),
MtlsStream::Tls(stream) => Pin::new(stream.as_mut()).poll_shutdown(cx),
}
}
}
@ -109,7 +111,7 @@ impl MtlsClient {
let server_name = ServerName::try_from(service_name.to_string())
.context("invalid server name for TLS")?;
let tls_stream = connector.connect(server_name, stream).await?;
return Ok(MtlsStream::Tls(tls_stream));
return Ok(MtlsStream::Tls(Box::new(tls_stream)));
}
Ok(MtlsStream::Plain(stream))

View file

@ -1,3 +1,5 @@
#![allow(dead_code)]
use std::collections::HashMap;
use std::sync::Arc;
use std::time::{Duration, Instant};
@ -96,7 +98,10 @@ impl ServiceDiscovery {
Ok(instances)
}
async fn fetch_instances_from_chainfire(&self, service_name: &str) -> Result<Vec<ServiceInstance>> {
async fn fetch_instances_from_chainfire(
&self,
service_name: &str,
) -> Result<Vec<ServiceInstance>> {
let mut client = Client::connect(self.chainfire_endpoint.clone()).await?;
let prefix = format!(
"{}instances/{}/",
@ -136,10 +141,7 @@ impl ServiceDiscovery {
source_service: &str,
target_service: &str,
) -> Result<Option<MtlsPolicy>> {
let policy_key = format!(
"{}-{}",
source_service, target_service
);
let policy_key = format!("{}-{}", source_service, target_service);
// キャッシュをチェック
{
@ -153,10 +155,7 @@ impl ServiceDiscovery {
// Chainfireから取得
let mut client = Client::connect(self.chainfire_endpoint.clone()).await?;
let prefix = format!(
"{}mtls/policies/",
cluster_prefix(&self.cluster_id)
);
let prefix = format!("{}mtls/policies/", cluster_prefix(&self.cluster_id));
let prefix_bytes = prefix.as_bytes();
let (kvs, _) = client.scan_prefix(prefix_bytes, 0).await?;
@ -164,7 +163,9 @@ impl ServiceDiscovery {
for (_, value, _) in kvs {
match serde_json::from_slice::<MtlsPolicy>(&value) {
Ok(policy) => {
if policy.source_service == source_service && policy.target_service == target_service {
if policy.source_service == source_service
&& policy.target_service == target_service
{
// キャッシュに保存
let mut cache = self.policy_cache.write().await;
cache.insert(
@ -207,7 +208,7 @@ impl ServiceDiscovery {
if inst.state.as_deref().unwrap_or("healthy") == "healthy" {
service_map
.entry(inst.service.clone())
.or_insert_with(Vec::new)
.or_default()
.push(inst);
}
}

View file

@ -120,10 +120,7 @@ async fn main() -> Result<()> {
if let Some(disc) = &discovery {
// デフォルトポリシーを確認(簡易実装)
// 実際には、自身のサービス名とターゲットサービス名でポリシーを検索
if let Ok(Some(policy)) = disc
.get_mtls_policy(&cfg.service.name, "default")
.await
{
if let Ok(Some(policy)) = disc.get_mtls_policy(&cfg.service.name, "default").await {
if policy.mtls_required.unwrap_or(false) {
"mtls"
} else {
@ -168,15 +165,16 @@ async fn main() -> Result<()> {
}
fn load_config(path: &PathBuf) -> Result<Config> {
let contents = fs::read_to_string(path)
.with_context(|| format!("failed to read {}", path.display()))?;
let contents =
fs::read_to_string(path).with_context(|| format!("failed to read {}", path.display()))?;
let cfg: Config =
toml::from_str(&contents).with_context(|| format!("failed to parse {}", path.display()))?;
Ok(cfg)
}
fn load_certs(path: &str) -> Result<Vec<CertificateDer<'static>>> {
let file = fs::File::open(path).with_context(|| format!("failed to open cert file {}", path))?;
let file =
fs::File::open(path).with_context(|| format!("failed to open cert file {}", path))?;
let mut reader = BufReader::new(file);
let certs = certs(&mut reader)
.collect::<std::result::Result<Vec<_>, _>>()
@ -246,7 +244,9 @@ fn build_server_config(cfg: &Config, mode: &str) -> Result<ServerConfig> {
let client_certs = load_certs(ca_path)?;
let mut roots = rustls::RootCertStore::empty();
for c in client_certs {
roots.add(c).map_err(|e| anyhow!("adding CA failed: {:?}", e))?;
roots
.add(c)
.map_err(|e| anyhow!("adding CA failed: {:?}", e))?;
}
let verifier =
rustls::server::WebPkiClientVerifier::builder(std::sync::Arc::new(roots)).build()?;
@ -266,7 +266,10 @@ fn build_server_config(cfg: &Config, mode: &str) -> Result<ServerConfig> {
async fn run_plain_proxy(listen_addr: &str, app_addr: &str) -> Result<()> {
let listener = TcpListener::bind(listen_addr).await?;
info!("listening on {} and forwarding to {}", listen_addr, app_addr);
info!(
"listening on {} and forwarding to {}",
listen_addr, app_addr
);
loop {
let (inbound, peer) = listener.accept().await?;
@ -347,8 +350,8 @@ async fn handle_connection(mut inbound: TcpStream, app_addr: &str) -> Result<()>
#[cfg(test)]
mod tests {
use super::*;
use rustls::{ClientConfig, RootCertStore};
use rustls::pki_types::ServerName;
use rustls::{ClientConfig, RootCertStore};
use std::path::{Path, PathBuf};
use std::process::Command;
use std::sync::OnceLock;
@ -361,14 +364,20 @@ mod tests {
.args(args)
.status()
.unwrap_or_else(|error| panic!("failed to spawn openssl {:?}: {}", args, error));
assert!(status.success(), "openssl {:?} failed with status {}", args, status);
assert!(
status.success(),
"openssl {:?} failed with status {}",
args,
status
);
}
fn ensure_test_certs() -> &'static Path {
static CERT_DIR: OnceLock<PathBuf> = OnceLock::new();
CERT_DIR.get_or_init(|| {
let dir = std::env::temp_dir().join(format!("mtls-agent-test-certs-{}", std::process::id()));
let dir =
std::env::temp_dir().join(format!("mtls-agent-test-certs-{}", std::process::id()));
std::fs::create_dir_all(&dir).unwrap();
let ca_key = dir.join("ca.key");
@ -384,12 +393,7 @@ mod tests {
let client_pem = dir.join("client.pem");
if !ca_pem.exists() {
run_openssl(&[
"genrsa",
"-out",
ca_key.to_string_lossy().as_ref(),
"2048",
]);
run_openssl(&["genrsa", "-out", ca_key.to_string_lossy().as_ref(), "2048"]);
run_openssl(&[
"req",
"-x509",
@ -490,10 +494,7 @@ mod tests {
}
fn test_cert_path(name: &str) -> String {
ensure_test_certs()
.join(name)
.display()
.to_string()
ensure_test_certs().join(name).display().to_string()
}
fn unused_loopback_addr() -> String {
@ -648,7 +649,8 @@ mod tests {
let write_result = tls_stream.write_all(b"blocked").await;
if write_result.is_ok() {
let mut buf = [0u8; 1];
let read_result = tokio::time::timeout(Duration::from_secs(1), tls_stream.read(&mut buf)).await;
let read_result =
tokio::time::timeout(Duration::from_secs(1), tls_stream.read(&mut buf)).await;
match read_result {
Ok(Ok(0)) | Ok(Err(_)) | Err(_) => {}
Ok(Ok(_)) => panic!("mTLS mode accepted traffic without a client certificate"),

View file

@ -1,3 +1,5 @@
#![allow(dead_code)]
use std::sync::Arc;
use std::time::{Duration, Instant};

View file

@ -27,21 +27,9 @@
rustfmtComponent = pkgs.rust-bin.stable.latest.rustfmt;
clippyComponent = pkgs.rust-bin.stable.latest.clippy;
wsList = [
"chainfire"
"flaredb"
"iam"
"plasmavmc"
"prismnet"
"flashdns"
"fiberlb"
"lightningstor"
"nightlight"
"creditservice"
"k8shost"
"apigateway"
"deployer"
];
ciInventory = builtins.fromJSON (builtins.readFile ./workspaces.json);
wsList = map (workspace: workspace.name) ciInventory.workspaces;
supportedWorkspaces = pkgs.lib.concatStringsSep ", " wsList;
gate = pkgs.writeShellApplication {
name = "photoncloud-gate";
@ -53,6 +41,7 @@
gnugrep
gawk
git
jq
rustToolchain
rustfmtComponent
clippyComponent
@ -61,6 +50,7 @@
llvmPackages.clang
pkg-config
openssl
qemu
rocksdb
];
@ -82,6 +72,7 @@
Notes:
- Requires running inside a git checkout (uses `git rev-parse`).
- Logs are written to ./work/ci/<timestamp>/ by default (NOT .cccc/).
- Supported workspaces: ${supportedWorkspaces}
USAGE
}
@ -143,6 +134,7 @@
pkgs.gnugrep
pkgs.gawk
pkgs.git
pkgs.jq
rustToolchain
rustfmtComponent
clippyComponent
@ -150,6 +142,8 @@
pkgs.llvmPackages.libclang
pkgs.llvmPackages.clang
pkgs.pkg-config
pkgs.openssl
pkgs.qemu
]}"
CARGO="${rustToolchain}/bin/cargo"
@ -166,6 +160,14 @@
export PROTOC="${pkgs.protobuf}/bin/protoc"
export ROCKSDB_LIB_DIR="${pkgs.rocksdb}/lib"
manifest_has_target_kind() {
local manifest="$1"; shift
local kind="$1"; shift
"$CARGO" metadata --format-version 1 --no-deps --manifest-path "$manifest" \
| jq -e --arg kind "$kind" 'any(.packages[]?.targets[]?.kind[]?; . == $kind)' > /dev/null
}
run_cmd() {
local ws="$1"; shift
local title="$1"; shift
@ -222,10 +224,22 @@
for manifest in "''${manifests[@]}"; do
local crate
local ran_unit_tests
crate="$(basename "$(dirname "$manifest")")"
run_shared_crate_cmd "$crate" "$manifest" "fmt" "$CARGO_FMT fmt --manifest-path \"$manifest\" $fmt_rustfmt_args"
run_shared_crate_cmd "$crate" "$manifest" "clippy" "$CARGO_CLIPPY clippy --manifest-path \"$manifest\" --all-targets -- -D warnings"
run_shared_crate_cmd "$crate" "$manifest" "test (tier0 unit)" "$CARGO test --manifest-path \"$manifest\" --lib"
ran_unit_tests="0"
if manifest_has_target_kind "$manifest" "lib"; then
run_shared_crate_cmd "$crate" "$manifest" "test (tier0 unit lib)" "$CARGO test --manifest-path \"$manifest\" --lib"
ran_unit_tests="1"
fi
if manifest_has_target_kind "$manifest" "bin"; then
run_shared_crate_cmd "$crate" "$manifest" "test (tier0 unit bin)" "$CARGO test --manifest-path \"$manifest\" --bins"
ran_unit_tests="1"
fi
if [[ "$ran_unit_tests" == "0" ]]; then
echo "[gate][shared:$crate] WARN: no lib/bin unit test targets"
fi
if [[ "$tier" == "1" || "$tier" == "2" ]]; then
run_shared_crate_cmd "$crate" "$manifest" "test (tier1 integration)" "$CARGO test --manifest-path \"$manifest\" --tests"
@ -254,13 +268,26 @@
continue
fi
workspace_manifest="$repo_root/$ws/Cargo.toml"
# Format gate: call Nix-provided `cargo-fmt` directly (avoid resolving ~/.cargo/bin/cargo-fmt).
#
# NOTE: Avoid `--all` here; with path-dependencies it may traverse outside the workspace directory.
run_cmd "$ws" "fmt" "$CARGO_FMT fmt $fmt_rustfmt_args"
# Lint gate: call Nix-provided `cargo-clippy` directly (avoid resolving ~/.cargo/bin/cargo-clippy).
run_cmd "$ws" "clippy" "$CARGO_CLIPPY clippy --workspace --all-targets -- -D warnings"
run_cmd "$ws" "test (tier0 unit)" "$CARGO test --workspace --lib"
ran_unit_tests="0"
if manifest_has_target_kind "$workspace_manifest" "lib"; then
run_cmd "$ws" "test (tier0 unit lib)" "$CARGO test --workspace --lib"
ran_unit_tests="1"
fi
if manifest_has_target_kind "$workspace_manifest" "bin"; then
run_cmd "$ws" "test (tier0 unit bin)" "$CARGO test --workspace --bins"
ran_unit_tests="1"
fi
if [[ "$ran_unit_tests" == "0" ]]; then
echo "[gate][$ws] WARN: no lib/bin unit test targets"
fi
if [[ "$tier" == "1" || "$tier" == "2" ]]; then
run_cmd "$ws" "test (tier1 integration)" "$CARGO test --workspace --tests"

166
nix/ci/workspaces.json Normal file
View file

@ -0,0 +1,166 @@
{
"global_paths": [
"flake.nix",
"flake.lock",
"shell.nix",
"nix/**",
"nix-nos/**",
".github/workflows/nix.yml",
"Cargo.toml",
"Cargo.lock",
"crates/**",
"baremetal/**",
"scripts/**",
"specifications/**",
"docs/**"
],
"shared_crates_paths": [
"crates/**"
],
"workspaces": [
{
"name": "chainfire",
"paths": [
"chainfire/**"
],
"build_packages": [
"chainfire-server"
]
},
{
"name": "flaredb",
"paths": [
"flaredb/**"
],
"build_packages": [
"flaredb-server"
]
},
{
"name": "iam",
"paths": [
"iam/**"
],
"build_packages": [
"iam-server"
]
},
{
"name": "coronafs",
"paths": [
"coronafs/**"
],
"build_packages": [
"coronafs-server"
]
},
{
"name": "plasmavmc",
"paths": [
"plasmavmc/**"
],
"build_packages": [
"plasmavmc-server"
]
},
{
"name": "prismnet",
"paths": [
"prismnet/**"
],
"build_packages": [
"prismnet-server"
]
},
{
"name": "flashdns",
"paths": [
"flashdns/**"
],
"build_packages": [
"flashdns-server"
]
},
{
"name": "fiberlb",
"paths": [
"fiberlb/**"
],
"build_packages": [
"fiberlb-server"
]
},
{
"name": "lightningstor",
"paths": [
"lightningstor/**"
],
"build_packages": [
"lightningstor-server",
"lightningstor-node"
]
},
{
"name": "nightlight",
"paths": [
"nightlight/**"
],
"build_packages": [
"nightlight-server"
]
},
{
"name": "creditservice",
"paths": [
"creditservice/**"
],
"build_packages": [
"creditservice-server"
]
},
{
"name": "k8shost",
"paths": [
"k8shost/**"
],
"build_packages": [
"k8shost-server"
]
},
{
"name": "apigateway",
"paths": [
"apigateway/**"
],
"build_packages": [
"apigateway-server"
]
},
{
"name": "deployer",
"paths": [
"deployer/**"
],
"build_packages": [
"deployer-server",
"deployer-ctl",
"node-agent",
"nix-agent",
"plasmacloud-reconciler",
"fleet-scheduler"
]
},
{
"name": "client-common",
"paths": [
"client-common/**"
]
},
{
"name": "mtls-agent",
"paths": [
"mtls-agent/**"
]
}
]
}

View file

@ -0,0 +1,134 @@
#!/usr/bin/env python3
import argparse
import fnmatch
import json
from pathlib import Path
from typing import Any
def load_changed_files(args: argparse.Namespace) -> list[str]:
changed_files: list[str] = []
for path in args.changed_files_file:
for line in Path(path).read_text().splitlines():
candidate = line.strip()
if candidate:
changed_files.append(candidate)
changed_files.extend(path.strip() for path in args.changed_file if path.strip())
return changed_files
def matches_any(path: str, patterns: list[str]) -> bool:
return any(fnmatch.fnmatchcase(path, pattern) for pattern in patterns)
def detect_changes(config: dict[str, Any], changed_files: list[str]) -> dict[str, Any]:
workspaces: list[dict[str, Any]] = config["workspaces"]
all_workspace_names = [workspace["name"] for workspace in workspaces]
global_changed = any(
matches_any(path, config["global_paths"])
for path in changed_files
)
shared_crates_changed = any(
matches_any(path, config["shared_crates_paths"])
for path in changed_files
)
if global_changed:
changed_workspaces = all_workspace_names
else:
changed_workspaces = [
workspace["name"]
for workspace in workspaces
if any(matches_any(path, workspace["paths"]) for path in changed_files)
]
selected_workspaces = set(changed_workspaces)
build_targets: list[dict[str, str]] = []
seen_build_targets: set[tuple[str, str]] = set()
for workspace in workspaces:
if workspace["name"] not in selected_workspaces:
continue
for package in workspace.get("build_packages", []):
key = (workspace["name"], package)
if key in seen_build_targets:
continue
seen_build_targets.add(key)
build_targets.append({
"workspace": workspace["name"],
"package": package,
})
return {
"workspaces": changed_workspaces,
"build_targets": build_targets,
"any_changed": global_changed or bool(changed_workspaces),
"build_changed": bool(build_targets),
"global_changed": global_changed,
"shared_crates_changed": shared_crates_changed,
}
def write_github_output(path: Path, result: dict[str, Any]) -> None:
serialized = {
"workspaces": json.dumps(result["workspaces"], separators=(",", ":")),
"build_targets": json.dumps(result["build_targets"], separators=(",", ":")),
"any_changed": str(result["any_changed"]).lower(),
"build_changed": str(result["build_changed"]).lower(),
"global_changed": str(result["global_changed"]).lower(),
"shared_crates_changed": str(result["shared_crates_changed"]).lower(),
}
with path.open("a", encoding="utf-8") as handle:
for key, value in serialized.items():
handle.write(f"{key}={value}\n")
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(
description="Map changed files to PhotonCloud CI workspaces."
)
parser.add_argument(
"--config",
required=True,
help="Path to the JSON CI workspace inventory.",
)
parser.add_argument(
"--changed-files-file",
action="append",
default=[],
help="File containing newline-separated changed paths.",
)
parser.add_argument(
"--changed-file",
action="append",
default=[],
help="Single changed path. Can be repeated.",
)
parser.add_argument(
"--github-output",
help="Optional path to append GitHub Actions step outputs.",
)
return parser.parse_args()
def main() -> int:
args = parse_args()
config = json.loads(Path(args.config).read_text())
changed_files = load_changed_files(args)
result = detect_changes(config, changed_files)
if args.github_output:
write_github_output(Path(args.github_output), result)
print(json.dumps(result, indent=2, sort_keys=True))
return 0
if __name__ == "__main__":
raise SystemExit(main())