ci: unify workspace inventory and harden tier0 gating
This commit is contained in:
parent
290c6ba88a
commit
e1a5d394e5
11 changed files with 493 additions and 160 deletions
102
.github/workflows/nix.yml
vendored
102
.github/workflows/nix.yml
vendored
|
|
@ -2,65 +2,71 @@ name: Nix CI
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches: [ master ]
|
branches: [ main, master ]
|
||||||
pull_request:
|
pull_request:
|
||||||
branches: [ master ]
|
branches: [ main, master ]
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
# Detect which workspaces have changed to save CI minutes
|
# Detect which workspaces have changed to save CI minutes
|
||||||
filter:
|
filter:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
outputs:
|
outputs:
|
||||||
workspaces: ${{ steps.filter.outputs.changes }}
|
workspaces: ${{ steps.detect.outputs.workspaces }}
|
||||||
any_changed: ${{ steps.filter.outputs.workspaces_any_changed }}
|
build_targets: ${{ steps.detect.outputs.build_targets }}
|
||||||
global_changed: ${{ steps.filter.outputs.global }}
|
any_changed: ${{ steps.detect.outputs.any_changed }}
|
||||||
shared_crates_changed: ${{ steps.filter.outputs.shared_crates }}
|
build_changed: ${{ steps.detect.outputs.build_changed }}
|
||||||
|
global_changed: ${{ steps.detect.outputs.global_changed }}
|
||||||
|
shared_crates_changed: ${{ steps.detect.outputs.shared_crates_changed }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- uses: dorny/paths-filter@v3
|
|
||||||
id: filter
|
|
||||||
with:
|
with:
|
||||||
filters: |
|
fetch-depth: 0
|
||||||
global:
|
|
||||||
- 'flake.nix'
|
- name: Collect changed files
|
||||||
- 'flake.lock'
|
env:
|
||||||
- 'nix/**'
|
EVENT_NAME: ${{ github.event_name }}
|
||||||
- 'nix-nos/**'
|
BASE_REF: ${{ github.base_ref }}
|
||||||
- '.github/workflows/nix.yml'
|
BEFORE_SHA: ${{ github.event.before }}
|
||||||
- 'Cargo.toml'
|
HEAD_SHA: ${{ github.sha }}
|
||||||
- 'Cargo.lock'
|
run: |
|
||||||
- 'crates/**'
|
set -euo pipefail
|
||||||
- 'client-common/**'
|
|
||||||
- 'baremetal/**'
|
if [[ "$EVENT_NAME" == "workflow_dispatch" ]]; then
|
||||||
- 'scripts/**'
|
printf 'flake.nix\n' > changed-files.txt
|
||||||
- 'specifications/**'
|
elif [[ "$EVENT_NAME" == "pull_request" ]]; then
|
||||||
- 'docs/**'
|
git fetch --no-tags --depth=1 origin "$BASE_REF"
|
||||||
shared_crates: 'crates/**'
|
git diff --name-only "origin/$BASE_REF...$HEAD_SHA" > changed-files.txt
|
||||||
chainfire: 'chainfire/**'
|
elif [[ "$BEFORE_SHA" == "0000000000000000000000000000000000000000" ]]; then
|
||||||
flaredb: 'flaredb/**'
|
git diff-tree --no-commit-id --name-only -r "$HEAD_SHA" > changed-files.txt
|
||||||
iam: 'iam/**'
|
else
|
||||||
plasmavmc: 'plasmavmc/**'
|
git diff --name-only "$BEFORE_SHA" "$HEAD_SHA" > changed-files.txt
|
||||||
prismnet: 'prismnet/**'
|
fi
|
||||||
flashdns: 'flashdns/**'
|
|
||||||
fiberlb: 'fiberlb/**'
|
if [[ ! -f changed-files.txt ]]; then
|
||||||
lightningstor: 'lightningstor/**'
|
: > changed-files.txt
|
||||||
nightlight: 'nightlight/**'
|
fi
|
||||||
creditservice: 'creditservice/**'
|
|
||||||
k8shost: 'k8shost/**'
|
sed -n '1,200p' changed-files.txt
|
||||||
apigateway: 'apigateway/**'
|
|
||||||
deployer: 'deployer/**'
|
- name: Detect changed workspaces
|
||||||
|
id: detect
|
||||||
|
run: |
|
||||||
|
python3 scripts/ci_changed_workspaces.py \
|
||||||
|
--config nix/ci/workspaces.json \
|
||||||
|
--changed-files-file changed-files.txt \
|
||||||
|
--github-output "$GITHUB_OUTPUT"
|
||||||
|
|
||||||
# Run CI gates for changed workspaces
|
# Run CI gates for changed workspaces
|
||||||
# Uses the provider-agnostic 'photoncloud-gate' defined in nix/ci/flake.nix
|
# Uses the provider-agnostic 'photoncloud-gate' defined in nix/ci/flake.nix
|
||||||
gate:
|
gate:
|
||||||
needs: filter
|
needs: filter
|
||||||
if: ${{ needs.filter.outputs.any_changed == 'true' || needs.filter.outputs.global_changed == 'true' }}
|
if: ${{ needs.filter.outputs.any_changed == 'true' }}
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
# If global files changed, run all. Otherwise run only changed ones.
|
workspace: ${{ fromJSON(needs.filter.outputs.workspaces) }}
|
||||||
workspace: ${{ fromJSON(needs.filter.outputs.global_changed == 'true' && '["chainfire", "flaredb", "iam", "plasmavmc", "prismnet", "flashdns", "fiberlb", "lightningstor", "nightlight", "creditservice", "k8shost", "apigateway", "deployer"]' || needs.filter.outputs.workspaces) }}
|
|
||||||
name: gate (${{ matrix.workspace }})
|
name: gate (${{ matrix.workspace }})
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
|
@ -88,26 +94,20 @@ jobs:
|
||||||
# Build server packages (tier 1+)
|
# Build server packages (tier 1+)
|
||||||
build:
|
build:
|
||||||
needs: [filter, gate]
|
needs: [filter, gate]
|
||||||
if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/master' }}
|
if: ${{ github.event_name == 'push' && (github.ref == 'refs/heads/main' || github.ref == 'refs/heads/master') && needs.filter.outputs.build_changed == 'true' }}
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
workspace: ${{ fromJSON(needs.filter.outputs.global_changed == 'true' && '["chainfire", "flaredb", "iam", "plasmavmc", "prismnet", "flashdns", "fiberlb", "lightningstor", "nightlight", "creditservice", "k8shost", "apigateway", "deployer"]' || needs.filter.outputs.workspaces) }}
|
target: ${{ fromJSON(needs.filter.outputs.build_targets) }}
|
||||||
name: build (${{ matrix.workspace }})
|
name: build (${{ matrix.target.package }})
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- uses: DeterminateSystems/nix-installer-action@v11
|
- uses: DeterminateSystems/nix-installer-action@v11
|
||||||
- uses: DeterminateSystems/magic-nix-cache-action@v8
|
- uses: DeterminateSystems/magic-nix-cache-action@v8
|
||||||
|
|
||||||
- name: Build server
|
- name: Build package
|
||||||
run: |
|
run: |
|
||||||
# Only build if the workspace has a corresponding package in flake.nix
|
nix build .#${{ matrix.target.package }} --accept-flake-config
|
||||||
# We check if it exists before building to avoid failure on non-package workspaces
|
|
||||||
if nix flake show --json | jq -e ".packages.\"x86_64-linux\".\"${{ matrix.workspace }}-server\"" > /dev/null; then
|
|
||||||
nix build .#${{ matrix.workspace }}-server --accept-flake-config
|
|
||||||
else
|
|
||||||
echo "No server package found for ${{ matrix.workspace }}, skipping build."
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Summary job for PR status checks
|
# Summary job for PR status checks
|
||||||
ci-status:
|
ci-status:
|
||||||
|
|
|
||||||
|
|
@ -1,10 +1,10 @@
|
||||||
//! Shared client config types (endpoint/auth/retry) for PhotonCloud SDKs.
|
//! Shared client config types (endpoint/auth/retry) for PhotonCloud SDKs.
|
||||||
//!
|
//!
|
||||||
//! Lightweight, type-only helpers to keep SDK crates consistent without
|
//! Lightweight, type-only helpers to keep SDK crates consistent without
|
||||||
//! forcing a unified SDK dependency tree.
|
//! forcing a unified SDK dependency tree.
|
||||||
|
|
||||||
use std::time::Duration;
|
|
||||||
use backoff::ExponentialBackoffBuilder;
|
use backoff::ExponentialBackoffBuilder;
|
||||||
|
use std::time::Duration;
|
||||||
use thiserror::Error;
|
use thiserror::Error;
|
||||||
use tonic::codegen::InterceptedService;
|
use tonic::codegen::InterceptedService;
|
||||||
use tonic::service::Interceptor;
|
use tonic::service::Interceptor;
|
||||||
|
|
@ -70,7 +70,9 @@ impl EndpointConfig {
|
||||||
key.clone(),
|
key.clone(),
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
ep = ep.tls_config(cfg).map_err(|e| ClientError::TlsConfig(e.to_string()))?;
|
ep = ep
|
||||||
|
.tls_config(cfg)
|
||||||
|
.map_err(|e| ClientError::TlsConfig(e.to_string()))?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(ep)
|
Ok(ep)
|
||||||
|
|
@ -96,7 +98,9 @@ pub enum AuthConfig {
|
||||||
|
|
||||||
impl AuthConfig {
|
impl AuthConfig {
|
||||||
pub fn bearer(token: impl Into<String>) -> Self {
|
pub fn bearer(token: impl Into<String>) -> Self {
|
||||||
Self::Bearer { token: token.into() }
|
Self::Bearer {
|
||||||
|
token: token.into(),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -157,12 +161,13 @@ impl Interceptor for AuthInterceptor {
|
||||||
match &self.0 {
|
match &self.0 {
|
||||||
AuthConfig::None => {}
|
AuthConfig::None => {}
|
||||||
AuthConfig::Bearer { token } => {
|
AuthConfig::Bearer { token } => {
|
||||||
req.metadata_mut()
|
req.metadata_mut().insert(
|
||||||
.insert("authorization", format!("Bearer {}", token).parse().unwrap());
|
"authorization",
|
||||||
|
format!("Bearer {}", token).parse().unwrap(),
|
||||||
|
);
|
||||||
}
|
}
|
||||||
AuthConfig::AccessKey { id, secret } => {
|
AuthConfig::AccessKey { id, secret } => {
|
||||||
req.metadata_mut()
|
req.metadata_mut().insert("x-api-key", id.parse().unwrap());
|
||||||
.insert("x-api-key", id.parse().unwrap());
|
|
||||||
req.metadata_mut()
|
req.metadata_mut()
|
||||||
.insert("x-api-secret", secret.parse().unwrap());
|
.insert("x-api-secret", secret.parse().unwrap());
|
||||||
}
|
}
|
||||||
|
|
@ -180,7 +185,10 @@ pub fn auth_interceptor(auth: &AuthConfig) -> Option<AuthInterceptor> {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Helper to wrap a tonic client with an interceptor when auth is provided.
|
/// Helper to wrap a tonic client with an interceptor when auth is provided.
|
||||||
pub fn with_auth(channel: Channel, auth: &AuthConfig) -> InterceptedService<Channel, AuthInterceptor> {
|
pub fn with_auth(
|
||||||
|
channel: Channel,
|
||||||
|
auth: &AuthConfig,
|
||||||
|
) -> InterceptedService<Channel, AuthInterceptor> {
|
||||||
let interceptor = auth_interceptor(auth).unwrap_or(AuthInterceptor(AuthConfig::None));
|
let interceptor = auth_interceptor(auth).unwrap_or(AuthInterceptor(AuthConfig::None));
|
||||||
InterceptedService::new(channel, interceptor)
|
InterceptedService::new(channel, interceptor)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -2,33 +2,23 @@ use serde::{Deserialize, Serialize};
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
|
|
||||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Serialize, Deserialize)]
|
||||||
#[serde(rename_all = "snake_case")]
|
#[serde(rename_all = "snake_case")]
|
||||||
pub enum ServerMode {
|
pub enum ServerMode {
|
||||||
|
#[default]
|
||||||
Combined,
|
Combined,
|
||||||
Controller,
|
Controller,
|
||||||
Node,
|
Node,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for ServerMode {
|
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Serialize, Deserialize)]
|
||||||
fn default() -> Self {
|
|
||||||
Self::Combined
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
|
|
||||||
#[serde(rename_all = "snake_case")]
|
#[serde(rename_all = "snake_case")]
|
||||||
pub enum MetadataBackend {
|
pub enum MetadataBackend {
|
||||||
|
#[default]
|
||||||
Filesystem,
|
Filesystem,
|
||||||
Chainfire,
|
Chainfire,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for MetadataBackend {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self::Filesystem
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub struct ServerConfig {
|
pub struct ServerConfig {
|
||||||
|
|
|
||||||
|
|
@ -123,19 +123,14 @@ struct AppState {
|
||||||
reserved_ports: Arc<Mutex<HashSet<u16>>>,
|
reserved_ports: Arc<Mutex<HashSet<u16>>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]
|
#[derive(Debug, Clone, Copy, Default, Serialize, Deserialize, PartialEq, Eq)]
|
||||||
#[serde(rename_all = "lowercase")]
|
#[serde(rename_all = "lowercase")]
|
||||||
enum VolumeFileFormat {
|
enum VolumeFileFormat {
|
||||||
|
#[default]
|
||||||
Raw,
|
Raw,
|
||||||
Qcow2,
|
Qcow2,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for VolumeFileFormat {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self::Raw
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl VolumeFileFormat {
|
impl VolumeFileFormat {
|
||||||
fn as_qemu_arg(self) -> &'static str {
|
fn as_qemu_arg(self) -> &'static str {
|
||||||
match self {
|
match self {
|
||||||
|
|
@ -871,13 +866,12 @@ async fn materialize_impl(
|
||||||
return load_response_required(state, id).await;
|
return load_response_required(state, id).await;
|
||||||
}
|
}
|
||||||
|
|
||||||
let format = req.format.unwrap_or_else(|| {
|
let default_format = if req.lazy {
|
||||||
if req.lazy {
|
VolumeFileFormat::Qcow2
|
||||||
VolumeFileFormat::Qcow2
|
} else {
|
||||||
} else {
|
VolumeFileFormat::Raw
|
||||||
VolumeFileFormat::Raw
|
};
|
||||||
}
|
let format = req.format.unwrap_or(default_format);
|
||||||
});
|
|
||||||
let temp_path = temp_create_path(&state.config, id);
|
let temp_path = temp_create_path(&state.config, id);
|
||||||
if fs::try_exists(&temp_path).await.unwrap_or(false) {
|
if fs::try_exists(&temp_path).await.unwrap_or(false) {
|
||||||
let _ = fs::remove_file(&temp_path).await;
|
let _ = fs::remove_file(&temp_path).await;
|
||||||
|
|
@ -1103,10 +1097,7 @@ async fn delete_impl(state: &AppState, id: &str) -> Result<()> {
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn load_response(state: &AppState, id: &str) -> Option<VolumeResponse> {
|
async fn load_response(state: &AppState, id: &str) -> Option<VolumeResponse> {
|
||||||
match load_response_required(state, id).await {
|
load_response_required(state, id).await.ok()
|
||||||
Ok(response) => Some(response),
|
|
||||||
Err(_) => None,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn list_volume_responses(state: &AppState) -> Result<Vec<VolumeResponse>> {
|
async fn list_volume_responses(state: &AppState) -> Result<Vec<VolumeResponse>> {
|
||||||
|
|
@ -1517,8 +1508,10 @@ mod tests {
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn export_probe_host_prefers_loopback_for_wildcard_bind() {
|
fn export_probe_host_prefers_loopback_for_wildcard_bind() {
|
||||||
let mut config = ServerConfig::default();
|
let mut config = ServerConfig {
|
||||||
config.export_bind_addr = "0.0.0.0".to_string();
|
export_bind_addr: "0.0.0.0".to_string(),
|
||||||
|
..ServerConfig::default()
|
||||||
|
};
|
||||||
assert_eq!(export_probe_host(&config), "127.0.0.1");
|
assert_eq!(export_probe_host(&config), "127.0.0.1");
|
||||||
config.export_bind_addr = "10.100.0.11".to_string();
|
config.export_bind_addr = "10.100.0.11".to_string();
|
||||||
assert_eq!(export_probe_host(&config), "10.100.0.11");
|
assert_eq!(export_probe_host(&config), "10.100.0.11");
|
||||||
|
|
@ -1650,6 +1643,13 @@ mod tests {
|
||||||
assert_ne!(next, preferred);
|
assert_ne!(next, preferred);
|
||||||
|
|
||||||
release_export_port(&state, Some(port)).await;
|
release_export_port(&state, Some(port)).await;
|
||||||
|
release_export_port(&state, Some(next)).await;
|
||||||
|
for _ in 0..10 {
|
||||||
|
if port_is_usable(&state.config, &HashSet::new(), preferred).await {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
tokio::time::sleep(std::time::Duration::from_millis(10)).await;
|
||||||
|
}
|
||||||
let reused = reserve_export_port(&state, Some(preferred)).await.unwrap();
|
let reused = reserve_export_port(&state, Some(preferred)).await.unwrap();
|
||||||
assert_eq!(reused, preferred);
|
assert_eq!(reused, preferred);
|
||||||
}
|
}
|
||||||
|
|
@ -1760,7 +1760,7 @@ mod tests {
|
||||||
&state,
|
&state,
|
||||||
"vol-a",
|
"vol-a",
|
||||||
CreateVolumeRequest {
|
CreateVolumeRequest {
|
||||||
size_bytes: 1 * 1024 * 1024,
|
size_bytes: 1024 * 1024,
|
||||||
format: Some(VolumeFileFormat::Raw),
|
format: Some(VolumeFileFormat::Raw),
|
||||||
backing_file: None,
|
backing_file: None,
|
||||||
backing_format: None,
|
backing_format: None,
|
||||||
|
|
@ -2275,10 +2275,11 @@ mod tests {
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn volume_create_api_is_available_in_node_mode() {
|
fn volume_create_api_is_available_in_node_mode() {
|
||||||
let mut config = ServerConfig::default();
|
|
||||||
config.mode = ServerMode::Node;
|
|
||||||
let state = AppState {
|
let state = AppState {
|
||||||
config: Arc::new(config),
|
config: Arc::new(ServerConfig {
|
||||||
|
mode: ServerMode::Node,
|
||||||
|
..ServerConfig::default()
|
||||||
|
}),
|
||||||
metadata_store: MetadataStore::Filesystem,
|
metadata_store: MetadataStore::Filesystem,
|
||||||
volume_guards: Arc::new(Mutex::new(HashMap::new())),
|
volume_guards: Arc::new(Mutex::new(HashMap::new())),
|
||||||
reserved_ports: Arc::new(Mutex::new(HashSet::new())),
|
reserved_ports: Arc::new(Mutex::new(HashSet::new())),
|
||||||
|
|
|
||||||
|
|
@ -1,7 +1,12 @@
|
||||||
|
#![allow(dead_code)]
|
||||||
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use anyhow::{Context, Result};
|
use anyhow::{Context, Result};
|
||||||
use rustls::{pki_types::{PrivateKeyDer, ServerName}, ClientConfig, RootCertStore};
|
use rustls::{
|
||||||
|
pki_types::{PrivateKeyDer, ServerName},
|
||||||
|
ClientConfig, RootCertStore,
|
||||||
|
};
|
||||||
use rustls_pemfile::certs;
|
use rustls_pemfile::certs;
|
||||||
use std::fs;
|
use std::fs;
|
||||||
use std::io::BufReader;
|
use std::io::BufReader;
|
||||||
|
|
@ -15,7 +20,7 @@ use crate::discovery::ServiceDiscovery;
|
||||||
|
|
||||||
pub enum MtlsStream {
|
pub enum MtlsStream {
|
||||||
Plain(TcpStream),
|
Plain(TcpStream),
|
||||||
Tls(tokio_rustls::client::TlsStream<TcpStream>),
|
Tls(Box<tokio_rustls::client::TlsStream<TcpStream>>),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl AsyncRead for MtlsStream {
|
impl AsyncRead for MtlsStream {
|
||||||
|
|
@ -26,7 +31,7 @@ impl AsyncRead for MtlsStream {
|
||||||
) -> Poll<std::io::Result<()>> {
|
) -> Poll<std::io::Result<()>> {
|
||||||
match self.get_mut() {
|
match self.get_mut() {
|
||||||
MtlsStream::Plain(stream) => Pin::new(stream).poll_read(cx, buf),
|
MtlsStream::Plain(stream) => Pin::new(stream).poll_read(cx, buf),
|
||||||
MtlsStream::Tls(stream) => Pin::new(stream).poll_read(cx, buf),
|
MtlsStream::Tls(stream) => Pin::new(stream.as_mut()).poll_read(cx, buf),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -39,24 +44,21 @@ impl AsyncWrite for MtlsStream {
|
||||||
) -> Poll<std::io::Result<usize>> {
|
) -> Poll<std::io::Result<usize>> {
|
||||||
match self.get_mut() {
|
match self.get_mut() {
|
||||||
MtlsStream::Plain(stream) => Pin::new(stream).poll_write(cx, data),
|
MtlsStream::Plain(stream) => Pin::new(stream).poll_write(cx, data),
|
||||||
MtlsStream::Tls(stream) => Pin::new(stream).poll_write(cx, data),
|
MtlsStream::Tls(stream) => Pin::new(stream.as_mut()).poll_write(cx, data),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn poll_flush(self: Pin<&mut Self>, cx: &mut TaskContext<'_>) -> Poll<std::io::Result<()>> {
|
fn poll_flush(self: Pin<&mut Self>, cx: &mut TaskContext<'_>) -> Poll<std::io::Result<()>> {
|
||||||
match self.get_mut() {
|
match self.get_mut() {
|
||||||
MtlsStream::Plain(stream) => Pin::new(stream).poll_flush(cx),
|
MtlsStream::Plain(stream) => Pin::new(stream).poll_flush(cx),
|
||||||
MtlsStream::Tls(stream) => Pin::new(stream).poll_flush(cx),
|
MtlsStream::Tls(stream) => Pin::new(stream.as_mut()).poll_flush(cx),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn poll_shutdown(
|
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut TaskContext<'_>) -> Poll<std::io::Result<()>> {
|
||||||
self: Pin<&mut Self>,
|
|
||||||
cx: &mut TaskContext<'_>,
|
|
||||||
) -> Poll<std::io::Result<()>> {
|
|
||||||
match self.get_mut() {
|
match self.get_mut() {
|
||||||
MtlsStream::Plain(stream) => Pin::new(stream).poll_shutdown(cx),
|
MtlsStream::Plain(stream) => Pin::new(stream).poll_shutdown(cx),
|
||||||
MtlsStream::Tls(stream) => Pin::new(stream).poll_shutdown(cx),
|
MtlsStream::Tls(stream) => Pin::new(stream.as_mut()).poll_shutdown(cx),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -109,7 +111,7 @@ impl MtlsClient {
|
||||||
let server_name = ServerName::try_from(service_name.to_string())
|
let server_name = ServerName::try_from(service_name.to_string())
|
||||||
.context("invalid server name for TLS")?;
|
.context("invalid server name for TLS")?;
|
||||||
let tls_stream = connector.connect(server_name, stream).await?;
|
let tls_stream = connector.connect(server_name, stream).await?;
|
||||||
return Ok(MtlsStream::Tls(tls_stream));
|
return Ok(MtlsStream::Tls(Box::new(tls_stream)));
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(MtlsStream::Plain(stream))
|
Ok(MtlsStream::Plain(stream))
|
||||||
|
|
|
||||||
|
|
@ -1,3 +1,5 @@
|
||||||
|
#![allow(dead_code)]
|
||||||
|
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::{Duration, Instant};
|
use std::time::{Duration, Instant};
|
||||||
|
|
@ -96,7 +98,10 @@ impl ServiceDiscovery {
|
||||||
Ok(instances)
|
Ok(instances)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn fetch_instances_from_chainfire(&self, service_name: &str) -> Result<Vec<ServiceInstance>> {
|
async fn fetch_instances_from_chainfire(
|
||||||
|
&self,
|
||||||
|
service_name: &str,
|
||||||
|
) -> Result<Vec<ServiceInstance>> {
|
||||||
let mut client = Client::connect(self.chainfire_endpoint.clone()).await?;
|
let mut client = Client::connect(self.chainfire_endpoint.clone()).await?;
|
||||||
let prefix = format!(
|
let prefix = format!(
|
||||||
"{}instances/{}/",
|
"{}instances/{}/",
|
||||||
|
|
@ -136,10 +141,7 @@ impl ServiceDiscovery {
|
||||||
source_service: &str,
|
source_service: &str,
|
||||||
target_service: &str,
|
target_service: &str,
|
||||||
) -> Result<Option<MtlsPolicy>> {
|
) -> Result<Option<MtlsPolicy>> {
|
||||||
let policy_key = format!(
|
let policy_key = format!("{}-{}", source_service, target_service);
|
||||||
"{}-{}",
|
|
||||||
source_service, target_service
|
|
||||||
);
|
|
||||||
|
|
||||||
// キャッシュをチェック
|
// キャッシュをチェック
|
||||||
{
|
{
|
||||||
|
|
@ -153,10 +155,7 @@ impl ServiceDiscovery {
|
||||||
|
|
||||||
// Chainfireから取得
|
// Chainfireから取得
|
||||||
let mut client = Client::connect(self.chainfire_endpoint.clone()).await?;
|
let mut client = Client::connect(self.chainfire_endpoint.clone()).await?;
|
||||||
let prefix = format!(
|
let prefix = format!("{}mtls/policies/", cluster_prefix(&self.cluster_id));
|
||||||
"{}mtls/policies/",
|
|
||||||
cluster_prefix(&self.cluster_id)
|
|
||||||
);
|
|
||||||
let prefix_bytes = prefix.as_bytes();
|
let prefix_bytes = prefix.as_bytes();
|
||||||
|
|
||||||
let (kvs, _) = client.scan_prefix(prefix_bytes, 0).await?;
|
let (kvs, _) = client.scan_prefix(prefix_bytes, 0).await?;
|
||||||
|
|
@ -164,7 +163,9 @@ impl ServiceDiscovery {
|
||||||
for (_, value, _) in kvs {
|
for (_, value, _) in kvs {
|
||||||
match serde_json::from_slice::<MtlsPolicy>(&value) {
|
match serde_json::from_slice::<MtlsPolicy>(&value) {
|
||||||
Ok(policy) => {
|
Ok(policy) => {
|
||||||
if policy.source_service == source_service && policy.target_service == target_service {
|
if policy.source_service == source_service
|
||||||
|
&& policy.target_service == target_service
|
||||||
|
{
|
||||||
// キャッシュに保存
|
// キャッシュに保存
|
||||||
let mut cache = self.policy_cache.write().await;
|
let mut cache = self.policy_cache.write().await;
|
||||||
cache.insert(
|
cache.insert(
|
||||||
|
|
@ -207,7 +208,7 @@ impl ServiceDiscovery {
|
||||||
if inst.state.as_deref().unwrap_or("healthy") == "healthy" {
|
if inst.state.as_deref().unwrap_or("healthy") == "healthy" {
|
||||||
service_map
|
service_map
|
||||||
.entry(inst.service.clone())
|
.entry(inst.service.clone())
|
||||||
.or_insert_with(Vec::new)
|
.or_default()
|
||||||
.push(inst);
|
.push(inst);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -120,10 +120,7 @@ async fn main() -> Result<()> {
|
||||||
if let Some(disc) = &discovery {
|
if let Some(disc) = &discovery {
|
||||||
// デフォルトポリシーを確認(簡易実装)
|
// デフォルトポリシーを確認(簡易実装)
|
||||||
// 実際には、自身のサービス名とターゲットサービス名でポリシーを検索
|
// 実際には、自身のサービス名とターゲットサービス名でポリシーを検索
|
||||||
if let Ok(Some(policy)) = disc
|
if let Ok(Some(policy)) = disc.get_mtls_policy(&cfg.service.name, "default").await {
|
||||||
.get_mtls_policy(&cfg.service.name, "default")
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
if policy.mtls_required.unwrap_or(false) {
|
if policy.mtls_required.unwrap_or(false) {
|
||||||
"mtls"
|
"mtls"
|
||||||
} else {
|
} else {
|
||||||
|
|
@ -168,15 +165,16 @@ async fn main() -> Result<()> {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn load_config(path: &PathBuf) -> Result<Config> {
|
fn load_config(path: &PathBuf) -> Result<Config> {
|
||||||
let contents = fs::read_to_string(path)
|
let contents =
|
||||||
.with_context(|| format!("failed to read {}", path.display()))?;
|
fs::read_to_string(path).with_context(|| format!("failed to read {}", path.display()))?;
|
||||||
let cfg: Config =
|
let cfg: Config =
|
||||||
toml::from_str(&contents).with_context(|| format!("failed to parse {}", path.display()))?;
|
toml::from_str(&contents).with_context(|| format!("failed to parse {}", path.display()))?;
|
||||||
Ok(cfg)
|
Ok(cfg)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn load_certs(path: &str) -> Result<Vec<CertificateDer<'static>>> {
|
fn load_certs(path: &str) -> Result<Vec<CertificateDer<'static>>> {
|
||||||
let file = fs::File::open(path).with_context(|| format!("failed to open cert file {}", path))?;
|
let file =
|
||||||
|
fs::File::open(path).with_context(|| format!("failed to open cert file {}", path))?;
|
||||||
let mut reader = BufReader::new(file);
|
let mut reader = BufReader::new(file);
|
||||||
let certs = certs(&mut reader)
|
let certs = certs(&mut reader)
|
||||||
.collect::<std::result::Result<Vec<_>, _>>()
|
.collect::<std::result::Result<Vec<_>, _>>()
|
||||||
|
|
@ -246,7 +244,9 @@ fn build_server_config(cfg: &Config, mode: &str) -> Result<ServerConfig> {
|
||||||
let client_certs = load_certs(ca_path)?;
|
let client_certs = load_certs(ca_path)?;
|
||||||
let mut roots = rustls::RootCertStore::empty();
|
let mut roots = rustls::RootCertStore::empty();
|
||||||
for c in client_certs {
|
for c in client_certs {
|
||||||
roots.add(c).map_err(|e| anyhow!("adding CA failed: {:?}", e))?;
|
roots
|
||||||
|
.add(c)
|
||||||
|
.map_err(|e| anyhow!("adding CA failed: {:?}", e))?;
|
||||||
}
|
}
|
||||||
let verifier =
|
let verifier =
|
||||||
rustls::server::WebPkiClientVerifier::builder(std::sync::Arc::new(roots)).build()?;
|
rustls::server::WebPkiClientVerifier::builder(std::sync::Arc::new(roots)).build()?;
|
||||||
|
|
@ -266,7 +266,10 @@ fn build_server_config(cfg: &Config, mode: &str) -> Result<ServerConfig> {
|
||||||
|
|
||||||
async fn run_plain_proxy(listen_addr: &str, app_addr: &str) -> Result<()> {
|
async fn run_plain_proxy(listen_addr: &str, app_addr: &str) -> Result<()> {
|
||||||
let listener = TcpListener::bind(listen_addr).await?;
|
let listener = TcpListener::bind(listen_addr).await?;
|
||||||
info!("listening on {} and forwarding to {}", listen_addr, app_addr);
|
info!(
|
||||||
|
"listening on {} and forwarding to {}",
|
||||||
|
listen_addr, app_addr
|
||||||
|
);
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
let (inbound, peer) = listener.accept().await?;
|
let (inbound, peer) = listener.accept().await?;
|
||||||
|
|
@ -347,8 +350,8 @@ async fn handle_connection(mut inbound: TcpStream, app_addr: &str) -> Result<()>
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
use rustls::{ClientConfig, RootCertStore};
|
|
||||||
use rustls::pki_types::ServerName;
|
use rustls::pki_types::ServerName;
|
||||||
|
use rustls::{ClientConfig, RootCertStore};
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use std::process::Command;
|
use std::process::Command;
|
||||||
use std::sync::OnceLock;
|
use std::sync::OnceLock;
|
||||||
|
|
@ -361,14 +364,20 @@ mod tests {
|
||||||
.args(args)
|
.args(args)
|
||||||
.status()
|
.status()
|
||||||
.unwrap_or_else(|error| panic!("failed to spawn openssl {:?}: {}", args, error));
|
.unwrap_or_else(|error| panic!("failed to spawn openssl {:?}: {}", args, error));
|
||||||
assert!(status.success(), "openssl {:?} failed with status {}", args, status);
|
assert!(
|
||||||
|
status.success(),
|
||||||
|
"openssl {:?} failed with status {}",
|
||||||
|
args,
|
||||||
|
status
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn ensure_test_certs() -> &'static Path {
|
fn ensure_test_certs() -> &'static Path {
|
||||||
static CERT_DIR: OnceLock<PathBuf> = OnceLock::new();
|
static CERT_DIR: OnceLock<PathBuf> = OnceLock::new();
|
||||||
|
|
||||||
CERT_DIR.get_or_init(|| {
|
CERT_DIR.get_or_init(|| {
|
||||||
let dir = std::env::temp_dir().join(format!("mtls-agent-test-certs-{}", std::process::id()));
|
let dir =
|
||||||
|
std::env::temp_dir().join(format!("mtls-agent-test-certs-{}", std::process::id()));
|
||||||
std::fs::create_dir_all(&dir).unwrap();
|
std::fs::create_dir_all(&dir).unwrap();
|
||||||
|
|
||||||
let ca_key = dir.join("ca.key");
|
let ca_key = dir.join("ca.key");
|
||||||
|
|
@ -384,12 +393,7 @@ mod tests {
|
||||||
let client_pem = dir.join("client.pem");
|
let client_pem = dir.join("client.pem");
|
||||||
|
|
||||||
if !ca_pem.exists() {
|
if !ca_pem.exists() {
|
||||||
run_openssl(&[
|
run_openssl(&["genrsa", "-out", ca_key.to_string_lossy().as_ref(), "2048"]);
|
||||||
"genrsa",
|
|
||||||
"-out",
|
|
||||||
ca_key.to_string_lossy().as_ref(),
|
|
||||||
"2048",
|
|
||||||
]);
|
|
||||||
run_openssl(&[
|
run_openssl(&[
|
||||||
"req",
|
"req",
|
||||||
"-x509",
|
"-x509",
|
||||||
|
|
@ -490,10 +494,7 @@ mod tests {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn test_cert_path(name: &str) -> String {
|
fn test_cert_path(name: &str) -> String {
|
||||||
ensure_test_certs()
|
ensure_test_certs().join(name).display().to_string()
|
||||||
.join(name)
|
|
||||||
.display()
|
|
||||||
.to_string()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn unused_loopback_addr() -> String {
|
fn unused_loopback_addr() -> String {
|
||||||
|
|
@ -648,7 +649,8 @@ mod tests {
|
||||||
let write_result = tls_stream.write_all(b"blocked").await;
|
let write_result = tls_stream.write_all(b"blocked").await;
|
||||||
if write_result.is_ok() {
|
if write_result.is_ok() {
|
||||||
let mut buf = [0u8; 1];
|
let mut buf = [0u8; 1];
|
||||||
let read_result = tokio::time::timeout(Duration::from_secs(1), tls_stream.read(&mut buf)).await;
|
let read_result =
|
||||||
|
tokio::time::timeout(Duration::from_secs(1), tls_stream.read(&mut buf)).await;
|
||||||
match read_result {
|
match read_result {
|
||||||
Ok(Ok(0)) | Ok(Err(_)) | Err(_) => {}
|
Ok(Ok(0)) | Ok(Err(_)) | Err(_) => {}
|
||||||
Ok(Ok(_)) => panic!("mTLS mode accepted traffic without a client certificate"),
|
Ok(Ok(_)) => panic!("mTLS mode accepted traffic without a client certificate"),
|
||||||
|
|
|
||||||
|
|
@ -1,3 +1,5 @@
|
||||||
|
#![allow(dead_code)]
|
||||||
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::{Duration, Instant};
|
use std::time::{Duration, Instant};
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -27,21 +27,9 @@
|
||||||
rustfmtComponent = pkgs.rust-bin.stable.latest.rustfmt;
|
rustfmtComponent = pkgs.rust-bin.stable.latest.rustfmt;
|
||||||
clippyComponent = pkgs.rust-bin.stable.latest.clippy;
|
clippyComponent = pkgs.rust-bin.stable.latest.clippy;
|
||||||
|
|
||||||
wsList = [
|
ciInventory = builtins.fromJSON (builtins.readFile ./workspaces.json);
|
||||||
"chainfire"
|
wsList = map (workspace: workspace.name) ciInventory.workspaces;
|
||||||
"flaredb"
|
supportedWorkspaces = pkgs.lib.concatStringsSep ", " wsList;
|
||||||
"iam"
|
|
||||||
"plasmavmc"
|
|
||||||
"prismnet"
|
|
||||||
"flashdns"
|
|
||||||
"fiberlb"
|
|
||||||
"lightningstor"
|
|
||||||
"nightlight"
|
|
||||||
"creditservice"
|
|
||||||
"k8shost"
|
|
||||||
"apigateway"
|
|
||||||
"deployer"
|
|
||||||
];
|
|
||||||
|
|
||||||
gate = pkgs.writeShellApplication {
|
gate = pkgs.writeShellApplication {
|
||||||
name = "photoncloud-gate";
|
name = "photoncloud-gate";
|
||||||
|
|
@ -53,6 +41,7 @@
|
||||||
gnugrep
|
gnugrep
|
||||||
gawk
|
gawk
|
||||||
git
|
git
|
||||||
|
jq
|
||||||
rustToolchain
|
rustToolchain
|
||||||
rustfmtComponent
|
rustfmtComponent
|
||||||
clippyComponent
|
clippyComponent
|
||||||
|
|
@ -61,6 +50,7 @@
|
||||||
llvmPackages.clang
|
llvmPackages.clang
|
||||||
pkg-config
|
pkg-config
|
||||||
openssl
|
openssl
|
||||||
|
qemu
|
||||||
rocksdb
|
rocksdb
|
||||||
];
|
];
|
||||||
|
|
||||||
|
|
@ -82,6 +72,7 @@
|
||||||
Notes:
|
Notes:
|
||||||
- Requires running inside a git checkout (uses `git rev-parse`).
|
- Requires running inside a git checkout (uses `git rev-parse`).
|
||||||
- Logs are written to ./work/ci/<timestamp>/ by default (NOT .cccc/).
|
- Logs are written to ./work/ci/<timestamp>/ by default (NOT .cccc/).
|
||||||
|
- Supported workspaces: ${supportedWorkspaces}
|
||||||
USAGE
|
USAGE
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -143,6 +134,7 @@
|
||||||
pkgs.gnugrep
|
pkgs.gnugrep
|
||||||
pkgs.gawk
|
pkgs.gawk
|
||||||
pkgs.git
|
pkgs.git
|
||||||
|
pkgs.jq
|
||||||
rustToolchain
|
rustToolchain
|
||||||
rustfmtComponent
|
rustfmtComponent
|
||||||
clippyComponent
|
clippyComponent
|
||||||
|
|
@ -150,6 +142,8 @@
|
||||||
pkgs.llvmPackages.libclang
|
pkgs.llvmPackages.libclang
|
||||||
pkgs.llvmPackages.clang
|
pkgs.llvmPackages.clang
|
||||||
pkgs.pkg-config
|
pkgs.pkg-config
|
||||||
|
pkgs.openssl
|
||||||
|
pkgs.qemu
|
||||||
]}"
|
]}"
|
||||||
|
|
||||||
CARGO="${rustToolchain}/bin/cargo"
|
CARGO="${rustToolchain}/bin/cargo"
|
||||||
|
|
@ -166,6 +160,14 @@
|
||||||
export PROTOC="${pkgs.protobuf}/bin/protoc"
|
export PROTOC="${pkgs.protobuf}/bin/protoc"
|
||||||
export ROCKSDB_LIB_DIR="${pkgs.rocksdb}/lib"
|
export ROCKSDB_LIB_DIR="${pkgs.rocksdb}/lib"
|
||||||
|
|
||||||
|
manifest_has_target_kind() {
|
||||||
|
local manifest="$1"; shift
|
||||||
|
local kind="$1"; shift
|
||||||
|
|
||||||
|
"$CARGO" metadata --format-version 1 --no-deps --manifest-path "$manifest" \
|
||||||
|
| jq -e --arg kind "$kind" 'any(.packages[]?.targets[]?.kind[]?; . == $kind)' > /dev/null
|
||||||
|
}
|
||||||
|
|
||||||
run_cmd() {
|
run_cmd() {
|
||||||
local ws="$1"; shift
|
local ws="$1"; shift
|
||||||
local title="$1"; shift
|
local title="$1"; shift
|
||||||
|
|
@ -222,10 +224,22 @@
|
||||||
|
|
||||||
for manifest in "''${manifests[@]}"; do
|
for manifest in "''${manifests[@]}"; do
|
||||||
local crate
|
local crate
|
||||||
|
local ran_unit_tests
|
||||||
crate="$(basename "$(dirname "$manifest")")"
|
crate="$(basename "$(dirname "$manifest")")"
|
||||||
run_shared_crate_cmd "$crate" "$manifest" "fmt" "$CARGO_FMT fmt --manifest-path \"$manifest\" $fmt_rustfmt_args"
|
run_shared_crate_cmd "$crate" "$manifest" "fmt" "$CARGO_FMT fmt --manifest-path \"$manifest\" $fmt_rustfmt_args"
|
||||||
run_shared_crate_cmd "$crate" "$manifest" "clippy" "$CARGO_CLIPPY clippy --manifest-path \"$manifest\" --all-targets -- -D warnings"
|
run_shared_crate_cmd "$crate" "$manifest" "clippy" "$CARGO_CLIPPY clippy --manifest-path \"$manifest\" --all-targets -- -D warnings"
|
||||||
run_shared_crate_cmd "$crate" "$manifest" "test (tier0 unit)" "$CARGO test --manifest-path \"$manifest\" --lib"
|
ran_unit_tests="0"
|
||||||
|
if manifest_has_target_kind "$manifest" "lib"; then
|
||||||
|
run_shared_crate_cmd "$crate" "$manifest" "test (tier0 unit lib)" "$CARGO test --manifest-path \"$manifest\" --lib"
|
||||||
|
ran_unit_tests="1"
|
||||||
|
fi
|
||||||
|
if manifest_has_target_kind "$manifest" "bin"; then
|
||||||
|
run_shared_crate_cmd "$crate" "$manifest" "test (tier0 unit bin)" "$CARGO test --manifest-path \"$manifest\" --bins"
|
||||||
|
ran_unit_tests="1"
|
||||||
|
fi
|
||||||
|
if [[ "$ran_unit_tests" == "0" ]]; then
|
||||||
|
echo "[gate][shared:$crate] WARN: no lib/bin unit test targets"
|
||||||
|
fi
|
||||||
|
|
||||||
if [[ "$tier" == "1" || "$tier" == "2" ]]; then
|
if [[ "$tier" == "1" || "$tier" == "2" ]]; then
|
||||||
run_shared_crate_cmd "$crate" "$manifest" "test (tier1 integration)" "$CARGO test --manifest-path \"$manifest\" --tests"
|
run_shared_crate_cmd "$crate" "$manifest" "test (tier1 integration)" "$CARGO test --manifest-path \"$manifest\" --tests"
|
||||||
|
|
@ -254,13 +268,26 @@
|
||||||
continue
|
continue
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
workspace_manifest="$repo_root/$ws/Cargo.toml"
|
||||||
|
|
||||||
# Format gate: call Nix-provided `cargo-fmt` directly (avoid resolving ~/.cargo/bin/cargo-fmt).
|
# Format gate: call Nix-provided `cargo-fmt` directly (avoid resolving ~/.cargo/bin/cargo-fmt).
|
||||||
#
|
#
|
||||||
# NOTE: Avoid `--all` here; with path-dependencies it may traverse outside the workspace directory.
|
# NOTE: Avoid `--all` here; with path-dependencies it may traverse outside the workspace directory.
|
||||||
run_cmd "$ws" "fmt" "$CARGO_FMT fmt $fmt_rustfmt_args"
|
run_cmd "$ws" "fmt" "$CARGO_FMT fmt $fmt_rustfmt_args"
|
||||||
# Lint gate: call Nix-provided `cargo-clippy` directly (avoid resolving ~/.cargo/bin/cargo-clippy).
|
# Lint gate: call Nix-provided `cargo-clippy` directly (avoid resolving ~/.cargo/bin/cargo-clippy).
|
||||||
run_cmd "$ws" "clippy" "$CARGO_CLIPPY clippy --workspace --all-targets -- -D warnings"
|
run_cmd "$ws" "clippy" "$CARGO_CLIPPY clippy --workspace --all-targets -- -D warnings"
|
||||||
run_cmd "$ws" "test (tier0 unit)" "$CARGO test --workspace --lib"
|
ran_unit_tests="0"
|
||||||
|
if manifest_has_target_kind "$workspace_manifest" "lib"; then
|
||||||
|
run_cmd "$ws" "test (tier0 unit lib)" "$CARGO test --workspace --lib"
|
||||||
|
ran_unit_tests="1"
|
||||||
|
fi
|
||||||
|
if manifest_has_target_kind "$workspace_manifest" "bin"; then
|
||||||
|
run_cmd "$ws" "test (tier0 unit bin)" "$CARGO test --workspace --bins"
|
||||||
|
ran_unit_tests="1"
|
||||||
|
fi
|
||||||
|
if [[ "$ran_unit_tests" == "0" ]]; then
|
||||||
|
echo "[gate][$ws] WARN: no lib/bin unit test targets"
|
||||||
|
fi
|
||||||
|
|
||||||
if [[ "$tier" == "1" || "$tier" == "2" ]]; then
|
if [[ "$tier" == "1" || "$tier" == "2" ]]; then
|
||||||
run_cmd "$ws" "test (tier1 integration)" "$CARGO test --workspace --tests"
|
run_cmd "$ws" "test (tier1 integration)" "$CARGO test --workspace --tests"
|
||||||
|
|
|
||||||
166
nix/ci/workspaces.json
Normal file
166
nix/ci/workspaces.json
Normal file
|
|
@ -0,0 +1,166 @@
|
||||||
|
{
|
||||||
|
"global_paths": [
|
||||||
|
"flake.nix",
|
||||||
|
"flake.lock",
|
||||||
|
"shell.nix",
|
||||||
|
"nix/**",
|
||||||
|
"nix-nos/**",
|
||||||
|
".github/workflows/nix.yml",
|
||||||
|
"Cargo.toml",
|
||||||
|
"Cargo.lock",
|
||||||
|
"crates/**",
|
||||||
|
"baremetal/**",
|
||||||
|
"scripts/**",
|
||||||
|
"specifications/**",
|
||||||
|
"docs/**"
|
||||||
|
],
|
||||||
|
"shared_crates_paths": [
|
||||||
|
"crates/**"
|
||||||
|
],
|
||||||
|
"workspaces": [
|
||||||
|
{
|
||||||
|
"name": "chainfire",
|
||||||
|
"paths": [
|
||||||
|
"chainfire/**"
|
||||||
|
],
|
||||||
|
"build_packages": [
|
||||||
|
"chainfire-server"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "flaredb",
|
||||||
|
"paths": [
|
||||||
|
"flaredb/**"
|
||||||
|
],
|
||||||
|
"build_packages": [
|
||||||
|
"flaredb-server"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "iam",
|
||||||
|
"paths": [
|
||||||
|
"iam/**"
|
||||||
|
],
|
||||||
|
"build_packages": [
|
||||||
|
"iam-server"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "coronafs",
|
||||||
|
"paths": [
|
||||||
|
"coronafs/**"
|
||||||
|
],
|
||||||
|
"build_packages": [
|
||||||
|
"coronafs-server"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "plasmavmc",
|
||||||
|
"paths": [
|
||||||
|
"plasmavmc/**"
|
||||||
|
],
|
||||||
|
"build_packages": [
|
||||||
|
"plasmavmc-server"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "prismnet",
|
||||||
|
"paths": [
|
||||||
|
"prismnet/**"
|
||||||
|
],
|
||||||
|
"build_packages": [
|
||||||
|
"prismnet-server"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "flashdns",
|
||||||
|
"paths": [
|
||||||
|
"flashdns/**"
|
||||||
|
],
|
||||||
|
"build_packages": [
|
||||||
|
"flashdns-server"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "fiberlb",
|
||||||
|
"paths": [
|
||||||
|
"fiberlb/**"
|
||||||
|
],
|
||||||
|
"build_packages": [
|
||||||
|
"fiberlb-server"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "lightningstor",
|
||||||
|
"paths": [
|
||||||
|
"lightningstor/**"
|
||||||
|
],
|
||||||
|
"build_packages": [
|
||||||
|
"lightningstor-server",
|
||||||
|
"lightningstor-node"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "nightlight",
|
||||||
|
"paths": [
|
||||||
|
"nightlight/**"
|
||||||
|
],
|
||||||
|
"build_packages": [
|
||||||
|
"nightlight-server"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "creditservice",
|
||||||
|
"paths": [
|
||||||
|
"creditservice/**"
|
||||||
|
],
|
||||||
|
"build_packages": [
|
||||||
|
"creditservice-server"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "k8shost",
|
||||||
|
"paths": [
|
||||||
|
"k8shost/**"
|
||||||
|
],
|
||||||
|
"build_packages": [
|
||||||
|
"k8shost-server"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "apigateway",
|
||||||
|
"paths": [
|
||||||
|
"apigateway/**"
|
||||||
|
],
|
||||||
|
"build_packages": [
|
||||||
|
"apigateway-server"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "deployer",
|
||||||
|
"paths": [
|
||||||
|
"deployer/**"
|
||||||
|
],
|
||||||
|
"build_packages": [
|
||||||
|
"deployer-server",
|
||||||
|
"deployer-ctl",
|
||||||
|
"node-agent",
|
||||||
|
"nix-agent",
|
||||||
|
"plasmacloud-reconciler",
|
||||||
|
"fleet-scheduler"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "client-common",
|
||||||
|
"paths": [
|
||||||
|
"client-common/**"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "mtls-agent",
|
||||||
|
"paths": [
|
||||||
|
"mtls-agent/**"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
134
scripts/ci_changed_workspaces.py
Normal file
134
scripts/ci_changed_workspaces.py
Normal file
|
|
@ -0,0 +1,134 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import fnmatch
|
||||||
|
import json
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
|
||||||
|
def load_changed_files(args: argparse.Namespace) -> list[str]:
|
||||||
|
changed_files: list[str] = []
|
||||||
|
|
||||||
|
for path in args.changed_files_file:
|
||||||
|
for line in Path(path).read_text().splitlines():
|
||||||
|
candidate = line.strip()
|
||||||
|
if candidate:
|
||||||
|
changed_files.append(candidate)
|
||||||
|
|
||||||
|
changed_files.extend(path.strip() for path in args.changed_file if path.strip())
|
||||||
|
return changed_files
|
||||||
|
|
||||||
|
|
||||||
|
def matches_any(path: str, patterns: list[str]) -> bool:
|
||||||
|
return any(fnmatch.fnmatchcase(path, pattern) for pattern in patterns)
|
||||||
|
|
||||||
|
|
||||||
|
def detect_changes(config: dict[str, Any], changed_files: list[str]) -> dict[str, Any]:
|
||||||
|
workspaces: list[dict[str, Any]] = config["workspaces"]
|
||||||
|
all_workspace_names = [workspace["name"] for workspace in workspaces]
|
||||||
|
|
||||||
|
global_changed = any(
|
||||||
|
matches_any(path, config["global_paths"])
|
||||||
|
for path in changed_files
|
||||||
|
)
|
||||||
|
shared_crates_changed = any(
|
||||||
|
matches_any(path, config["shared_crates_paths"])
|
||||||
|
for path in changed_files
|
||||||
|
)
|
||||||
|
|
||||||
|
if global_changed:
|
||||||
|
changed_workspaces = all_workspace_names
|
||||||
|
else:
|
||||||
|
changed_workspaces = [
|
||||||
|
workspace["name"]
|
||||||
|
for workspace in workspaces
|
||||||
|
if any(matches_any(path, workspace["paths"]) for path in changed_files)
|
||||||
|
]
|
||||||
|
|
||||||
|
selected_workspaces = set(changed_workspaces)
|
||||||
|
build_targets: list[dict[str, str]] = []
|
||||||
|
seen_build_targets: set[tuple[str, str]] = set()
|
||||||
|
|
||||||
|
for workspace in workspaces:
|
||||||
|
if workspace["name"] not in selected_workspaces:
|
||||||
|
continue
|
||||||
|
|
||||||
|
for package in workspace.get("build_packages", []):
|
||||||
|
key = (workspace["name"], package)
|
||||||
|
if key in seen_build_targets:
|
||||||
|
continue
|
||||||
|
seen_build_targets.add(key)
|
||||||
|
build_targets.append({
|
||||||
|
"workspace": workspace["name"],
|
||||||
|
"package": package,
|
||||||
|
})
|
||||||
|
|
||||||
|
return {
|
||||||
|
"workspaces": changed_workspaces,
|
||||||
|
"build_targets": build_targets,
|
||||||
|
"any_changed": global_changed or bool(changed_workspaces),
|
||||||
|
"build_changed": bool(build_targets),
|
||||||
|
"global_changed": global_changed,
|
||||||
|
"shared_crates_changed": shared_crates_changed,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def write_github_output(path: Path, result: dict[str, Any]) -> None:
|
||||||
|
serialized = {
|
||||||
|
"workspaces": json.dumps(result["workspaces"], separators=(",", ":")),
|
||||||
|
"build_targets": json.dumps(result["build_targets"], separators=(",", ":")),
|
||||||
|
"any_changed": str(result["any_changed"]).lower(),
|
||||||
|
"build_changed": str(result["build_changed"]).lower(),
|
||||||
|
"global_changed": str(result["global_changed"]).lower(),
|
||||||
|
"shared_crates_changed": str(result["shared_crates_changed"]).lower(),
|
||||||
|
}
|
||||||
|
|
||||||
|
with path.open("a", encoding="utf-8") as handle:
|
||||||
|
for key, value in serialized.items():
|
||||||
|
handle.write(f"{key}={value}\n")
|
||||||
|
|
||||||
|
|
||||||
|
def parse_args() -> argparse.Namespace:
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description="Map changed files to PhotonCloud CI workspaces."
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--config",
|
||||||
|
required=True,
|
||||||
|
help="Path to the JSON CI workspace inventory.",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--changed-files-file",
|
||||||
|
action="append",
|
||||||
|
default=[],
|
||||||
|
help="File containing newline-separated changed paths.",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--changed-file",
|
||||||
|
action="append",
|
||||||
|
default=[],
|
||||||
|
help="Single changed path. Can be repeated.",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--github-output",
|
||||||
|
help="Optional path to append GitHub Actions step outputs.",
|
||||||
|
)
|
||||||
|
return parser.parse_args()
|
||||||
|
|
||||||
|
|
||||||
|
def main() -> int:
|
||||||
|
args = parse_args()
|
||||||
|
config = json.loads(Path(args.config).read_text())
|
||||||
|
changed_files = load_changed_files(args)
|
||||||
|
result = detect_changes(config, changed_files)
|
||||||
|
|
||||||
|
if args.github_output:
|
||||||
|
write_github_output(Path(args.github_output), result)
|
||||||
|
|
||||||
|
print(json.dumps(result, indent=2, sort_keys=True))
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
raise SystemExit(main())
|
||||||
Loading…
Add table
Reference in a new issue