photoncloud-monorepo/prismnet/crates/prismnet-server/src/main.rs
centra d2149b6249 fix(lightningstor): Fix SigV4 canonicalization for AWS S3 auth
- Replace form_urlencoded with RFC 3986 compliant URI encoding
- Implement aws_uri_encode() matching AWS SigV4 spec exactly
- Unreserved chars (A-Z,a-z,0-9,-,_,.,~) not encoded
- All other chars percent-encoded with uppercase hex
- Preserve slashes in paths, encode in query params
- Normalize empty paths to '/' per AWS spec
- Fix test expectations (body hash, HMAC values)
- Add comprehensive SigV4 signature determinism test

This fixes the canonicalization mismatch that caused signature
validation failures in T047. Auth can now be enabled for production.

Refs: T058.S1
2025-12-12 06:23:46 +09:00

180 lines
5.9 KiB
Rust

//! PrismNET network management server binary
use anyhow::anyhow;
use clap::Parser;
use metrics_exporter_prometheus::PrometheusBuilder;
use prismnet_api::{
port_service_server::PortServiceServer,
security_group_service_server::SecurityGroupServiceServer,
subnet_service_server::SubnetServiceServer, vpc_service_server::VpcServiceServer,
};
use prismnet_server::{
NetworkMetadataStore, OvnClient, PortServiceImpl, SecurityGroupServiceImpl, ServerConfig,
SubnetServiceImpl, VpcServiceImpl,
};
use std::net::SocketAddr;
use std::path::PathBuf;
use std::sync::Arc;
use tonic::transport::{Certificate, Identity, Server, ServerTlsConfig};
use tonic_health::server::health_reporter;
use tracing_subscriber::EnvFilter;
/// PrismNET network management server
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
struct Args {
/// Configuration file path
#[arg(short, long, default_value = "prismnet.toml")]
config: PathBuf,
/// gRPC API address (overrides config)
#[arg(long)]
grpc_addr: Option<String>,
/// ChainFire metadata endpoint (optional, uses in-memory if not set)
#[arg(long)]
chainfire_endpoint: Option<String>,
/// Log level (overrides config)
#[arg(short, long)]
log_level: Option<String>,
/// Metrics port for Prometheus scraping
#[arg(long, default_value = "9096")]
metrics_port: u16,
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let args = Args::parse();
// Load configuration from file or use defaults
let mut config = if args.config.exists() {
let contents = tokio::fs::read_to_string(&args.config).await?;
toml::from_str(&contents)?
} else {
tracing::info!(
"Config file not found: {}, using defaults",
args.config.display()
);
ServerConfig::default()
};
// Apply command line overrides
if let Some(grpc_addr_str) = args.grpc_addr {
config.grpc_addr = grpc_addr_str.parse()?;
}
if let Some(log_level) = args.log_level {
config.log_level = log_level;
}
if let Some(chainfire_endpoint) = args.chainfire_endpoint {
config.chainfire_endpoint = Some(chainfire_endpoint);
}
// Initialize tracing
tracing_subscriber::fmt()
.with_env_filter(
EnvFilter::try_from_default_env()
.unwrap_or_else(|_| EnvFilter::new(&config.log_level)),
)
.init();
tracing::info!("Starting PrismNET server");
tracing::info!(" gRPC: {}", config.grpc_addr);
// Initialize Prometheus metrics exporter
let metrics_addr = format!("0.0.0.0:{}", args.metrics_port);
let builder = PrometheusBuilder::new();
builder
.with_http_listener(metrics_addr.parse::<std::net::SocketAddr>()?)
.install()
.expect("Failed to install Prometheus metrics exporter");
tracing::info!(
"Prometheus metrics available at http://{}/metrics",
metrics_addr
);
// Create metadata store
let metadata = if let Some(endpoint) = &config.chainfire_endpoint {
tracing::info!(" Metadata: ChainFire @ {}", endpoint);
Arc::new(
NetworkMetadataStore::new(Some(endpoint.clone()))
.await
.map_err(|e| anyhow!("Failed to init metadata store: {}", e))?,
)
} else {
tracing::info!(" Metadata: in-memory (no persistence)");
Arc::new(NetworkMetadataStore::new_in_memory())
};
// Initialize OVN client (default: mock)
let ovn =
Arc::new(OvnClient::from_env().map_err(|e| anyhow!("Failed to init OVN client: {}", e))?);
// Create gRPC services
let vpc_service = VpcServiceImpl::new(metadata.clone(), ovn.clone());
let subnet_service = SubnetServiceImpl::new(metadata.clone());
let port_service = PortServiceImpl::new(metadata.clone(), ovn.clone());
let sg_service = SecurityGroupServiceImpl::new(metadata.clone(), ovn.clone());
// Setup health service
let (mut health_reporter, health_service) = health_reporter();
health_reporter
.set_serving::<VpcServiceServer<VpcServiceImpl>>()
.await;
health_reporter
.set_serving::<SubnetServiceServer<SubnetServiceImpl>>()
.await;
health_reporter
.set_serving::<PortServiceServer<PortServiceImpl>>()
.await;
health_reporter
.set_serving::<SecurityGroupServiceServer<SecurityGroupServiceImpl>>()
.await;
// Parse address
let grpc_addr: SocketAddr = config.grpc_addr;
// Configure TLS if enabled
let mut server = Server::builder();
if let Some(tls_config) = &config.tls {
tracing::info!("TLS enabled, loading certificates...");
let cert = tokio::fs::read(&tls_config.cert_file).await?;
let key = tokio::fs::read(&tls_config.key_file).await?;
let server_identity = Identity::from_pem(cert, key);
let tls = if tls_config.require_client_cert {
tracing::info!("mTLS enabled");
let ca_cert = tokio::fs::read(
tls_config
.ca_file
.as_ref()
.ok_or("ca_file required for mTLS")?,
)
.await?;
let ca = Certificate::from_pem(ca_cert);
ServerTlsConfig::new()
.identity(server_identity)
.client_ca_root(ca)
} else {
ServerTlsConfig::new().identity(server_identity)
};
server = server.tls_config(tls)?;
}
// Start gRPC server
tracing::info!("gRPC server listening on {}", grpc_addr);
server
.add_service(health_service)
.add_service(VpcServiceServer::new(vpc_service))
.add_service(SubnetServiceServer::new(subnet_service))
.add_service(PortServiceServer::new(port_service))
.add_service(SecurityGroupServiceServer::new(sg_service))
.serve(grpc_addr)
.await?;
Ok(())
}