photoncloud-monorepo/baremetal/image-builder/examples/hardware-specific.nix
centra 5c6eb04a46 T036: Add VM cluster deployment configs for nixos-anywhere
- netboot-base.nix with SSH key auth
- Launch scripts for node01/02/03
- Node configuration.nix and disko.nix
- Nix modules for first-boot automation

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
2025-12-11 09:59:19 +09:00

442 lines
11 KiB
Nix

{ config, pkgs, lib, ... }:
# ==============================================================================
# HARDWARE-SPECIFIC NETBOOT CONFIGURATION EXAMPLE
# ==============================================================================
# This example demonstrates hardware-specific configurations for common
# bare-metal server platforms. Use this as a template for your specific hardware.
#
# Common Server Platforms:
# - Dell PowerEdge (R640, R650, R750)
# - HP ProLiant (DL360, DL380, DL560)
# - Supermicro (X11, X12 series)
# - Generic whitebox servers
#
# Usage:
# 1. Copy relevant sections to your netboot configuration
# 2. Adjust based on your specific hardware
# 3. Test boot on target hardware
# ==============================================================================
{
imports = [
../netboot-base.nix
../../modules
];
# ============================================================================
# DELL POWEREDGE R640 CONFIGURATION
# ============================================================================
# Uncomment this section for Dell PowerEdge R640 servers
/*
# Hardware-specific kernel modules
boot.initrd.availableKernelModules = [
# Dell PERC RAID controller
"megaraid_sas"
# Intel X710 10GbE NIC
"i40e"
# NVMe drives
"nvme"
# Standard modules
"ahci"
"xhci_pci"
"usb_storage"
"sd_mod"
"sr_mod"
];
boot.kernelModules = [
"kvm-intel" # Intel VT-x
"ipmi_devintf" # IPMI interface
"ipmi_si" # IPMI system interface
];
# Dell-specific firmware
hardware.enableRedistributableFirmware = true;
hardware.cpu.intel.updateMicrocode = true;
# Network interface naming
# R640 typically has:
# - eno1, eno2: Onboard 1GbE (Intel i350)
# - ens1f0, ens1f1: PCIe 10GbE (Intel X710)
networking.interfaces = {
eno1 = { useDHCP = true; };
ens1f0 = {
useDHCP = false;
mtu = 9000; # Jumbo frames for 10GbE
};
};
# iDRAC/IPMI configuration
services.freeipmi.enable = true;
# Dell OpenManage tools (optional)
environment.systemPackages = with pkgs; [
ipmitool
freeipmi
];
*/
# ============================================================================
# HP PROLIANT DL360 GEN10 CONFIGURATION
# ============================================================================
# Uncomment this section for HP ProLiant DL360 Gen10 servers
/*
boot.initrd.availableKernelModules = [
# HP Smart Array controller
"hpsa"
# Broadcom/Intel NIC
"tg3"
"bnx2x"
"i40e"
# NVMe
"nvme"
# Standard
"ahci"
"xhci_pci"
"usb_storage"
"sd_mod"
];
boot.kernelModules = [
"kvm-intel"
"ipmi_devintf"
"ipmi_si"
];
hardware.enableRedistributableFirmware = true;
hardware.cpu.intel.updateMicrocode = true;
# HP-specific tools
environment.systemPackages = with pkgs; [
ipmitool
smartmontools
];
# iLO/IPMI
services.freeipmi.enable = true;
*/
# ============================================================================
# SUPERMICRO X11 SERIES CONFIGURATION
# ============================================================================
# Uncomment this section for Supermicro X11 series servers
/*
boot.initrd.availableKernelModules = [
# LSI/Broadcom RAID
"megaraid_sas"
"mpt3sas"
# Intel NIC (common on Supermicro)
"igb"
"ixgbe"
"i40e"
# NVMe
"nvme"
# Standard
"ahci"
"xhci_pci"
"ehci_pci"
"usb_storage"
"sd_mod"
];
boot.kernelModules = [
"kvm-intel" # Or kvm-amd for AMD CPUs
"ipmi_devintf"
"ipmi_si"
];
hardware.enableRedistributableFirmware = true;
# CPU-specific (adjust based on your CPU)
hardware.cpu.intel.updateMicrocode = true;
# hardware.cpu.amd.updateMicrocode = true; # For AMD CPUs
# IPMI configuration
services.freeipmi.enable = true;
environment.systemPackages = with pkgs; [
ipmitool
dmidecode
smartmontools
];
*/
# ============================================================================
# GENERIC HIGH-PERFORMANCE SERVER
# ============================================================================
# This configuration works for most modern x86_64 servers
boot.initrd.availableKernelModules = [
# SATA/AHCI
"ahci"
"ata_piix"
# NVMe
"nvme"
# USB
"xhci_pci"
"ehci_pci"
"usb_storage"
"usbhid"
# SCSI/SAS
"sd_mod"
"sr_mod"
# Common RAID controllers
"megaraid_sas" # LSI MegaRAID
"mpt3sas" # LSI SAS3
"hpsa" # HP Smart Array
"aacraid" # Adaptec
# Network
"e1000e" # Intel GbE
"igb" # Intel GbE
"ixgbe" # Intel 10GbE
"i40e" # Intel 10/25/40GbE
"bnx2x" # Broadcom 10GbE
"mlx4_core" # Mellanox ConnectX-3
"mlx5_core" # Mellanox ConnectX-4/5
];
boot.kernelModules = [
"kvm-intel" # Intel VT-x
"kvm-amd" # AMD-V
];
# Enable all firmware
hardware.enableRedistributableFirmware = true;
# CPU microcode (both Intel and AMD)
hardware.cpu.intel.updateMicrocode = true;
hardware.cpu.amd.updateMicrocode = true;
# ============================================================================
# NETWORK INTERFACE CONFIGURATION
# ============================================================================
# Predictable interface names disabled in base config, using eth0, eth1, etc.
# For specific hardware, you may want to use biosdevname or systemd naming
# Example: Bond configuration for redundancy
/*
networking.bonds.bond0 = {
interfaces = [ "eth0" "eth1" ];
driverOptions = {
mode = "802.3ad"; # LACP
xmit_hash_policy = "layer3+4";
lacp_rate = "fast";
miimon = "100";
};
};
networking.interfaces.bond0 = {
useDHCP = true;
mtu = 9000;
};
*/
# Example: VLAN configuration
/*
networking.vlans = {
vlan100 = {
id = 100;
interface = "eth0";
};
vlan200 = {
id = 200;
interface = "eth0";
};
};
networking.interfaces.vlan100 = {
useDHCP = false;
ipv4.addresses = [{
address = "10.100.1.10";
prefixLength = 24;
}];
};
*/
# ============================================================================
# STORAGE CONFIGURATION
# ============================================================================
# Enable RAID support
boot.swraid.enable = true;
boot.swraid.mdadmConf = ''
ARRAY /dev/md0 level=raid1 num-devices=2
'';
# LVM support
services.lvm.enable = true;
# ZFS support (if needed)
# boot.supportedFilesystems = [ "zfs" ];
# boot.zfs.forceImportRoot = false;
# ============================================================================
# CPU-SPECIFIC OPTIMIZATIONS
# ============================================================================
# Intel-specific
boot.kernelParams = lib.mkIf (config.hardware.cpu.intel.updateMicrocode) [
"intel_pstate=active" # Use Intel P-State driver
"intel_iommu=on" # Enable IOMMU for VT-d
];
# AMD-specific
boot.kernelParams = lib.mkIf (config.hardware.cpu.amd.updateMicrocode) [
"amd_iommu=on" # Enable IOMMU for AMD-Vi
];
# ============================================================================
# MEMORY CONFIGURATION
# ============================================================================
# Hugepages for high-performance applications (DPDK, databases)
boot.kernelParams = [
"hugepagesz=2M"
"hugepages=1024" # 2GB of 2MB hugepages
"default_hugepagesz=2M"
];
# Transparent Hugepages
boot.kernel.sysctl = {
"vm.nr_hugepages" = 1024;
# "vm.nr_overcommit_hugepages" = 512; # Additional hugepages if needed
};
# ============================================================================
# IPMI/BMC CONFIGURATION
# ============================================================================
# Enable IPMI kernel modules
boot.kernelModules = [ "ipmi_devintf" "ipmi_si" ];
# IPMI tools
services.freeipmi.enable = true;
environment.systemPackages = with pkgs; [
ipmitool # IPMI command-line tool
freeipmi # Alternative IPMI tools
];
# Example: Configure BMC network (usually done via IPMI)
# Run manually: ipmitool lan set 1 ipaddr 10.0.100.10
# Run manually: ipmitool lan set 1 netmask 255.255.255.0
# Run manually: ipmitool lan set 1 defgw ipaddr 10.0.100.1
# ============================================================================
# PERFORMANCE TUNING
# ============================================================================
# Set CPU governor for performance
powerManagement.cpuFreqGovernor = "performance";
# Disable power management features that can cause latency
boot.kernelParams = [
"processor.max_cstate=1" # Limit C-states
"intel_idle.max_cstate=1" # Limit idle states
"idle=poll" # Aggressive polling (high power usage!)
];
# Note: The above settings prioritize performance over power efficiency
# Remove or adjust for non-latency-sensitive workloads
# ============================================================================
# HARDWARE MONITORING
# ============================================================================
# Enable hardware sensors
# services.lm_sensors.enable = true; # Uncomment if needed
# SMART monitoring
services.smartd = {
enable = true;
autodetect = true;
};
# ============================================================================
# GPU CONFIGURATION (if applicable)
# ============================================================================
# NVIDIA GPU
/*
hardware.nvidia = {
modesetting.enable = true;
powerManagement.enable = false;
powerManagement.finegrained = false;
open = false; # Use proprietary driver
nvidiaSettings = false; # No GUI needed
};
services.xserver.videoDrivers = [ "nvidia" ];
# NVIDIA Container Runtime (for GPU containers)
hardware.nvidia-container-toolkit.enable = true;
environment.systemPackages = with pkgs; [
cudaPackages.cudatoolkit
nvidia-docker
];
*/
# AMD GPU
/*
boot.initrd.kernelModules = [ "amdgpu" ];
services.xserver.videoDrivers = [ "amdgpu" ];
*/
# ============================================================================
# INFINIBAND/RDMA (for high-performance networking)
# ============================================================================
/*
boot.kernelModules = [
"ib_core"
"ib_uverbs"
"ib_umad"
"rdma_cm"
"rdma_ucm"
"mlx5_core"
"mlx5_ib"
];
environment.systemPackages = with pkgs; [
rdma-core
libfabric
# perftest # RDMA performance tests
];
# Configure IPoIB (IP over InfiniBand)
networking.interfaces.ib0 = {
useDHCP = false;
ipv4.addresses = [{
address = "192.168.100.10";
prefixLength = 24;
}];
mtu = 65520; # Max for IPoIB connected mode
};
*/
# ============================================================================
# SYSTEM STATE VERSION
# ============================================================================
system.stateVersion = "24.11";
}