133 lines
4.1 KiB
YAML
133 lines
4.1 KiB
YAML
experiment:
|
|
name: "progressive_reasoning_gemma3_1b_cpu_offload"
|
|
base_model: "google/gemma-3-1b-pt" # Using Gemma 3 1B
|
|
output_dir: "./outputs"
|
|
use_wandb: true
|
|
wandb_project: "matsuo-llm-comp-2025"
|
|
|
|
model:
|
|
load_in_4bit: true # Enable 4-bit quantization for QLoRA
|
|
bnb_4bit_compute_dtype: "bfloat16"
|
|
bnb_4bit_use_double_quant: true
|
|
bnb_4bit_quant_type: "nf4"
|
|
device_map: "auto" # Let accelerate handle device placement
|
|
max_memory:
|
|
0: "5GB" # Limit GPU memory to 3GB (leave room for CUDA kernels)
|
|
"cpu": "32GB" # Allow up to 32GB CPU RAM
|
|
offload_folder: "./offload" # Directory for disk offloading if needed
|
|
gradient_checkpointing: true # Trade compute for memory
|
|
use_flash_attention_2: false
|
|
use_eager_attention: true
|
|
|
|
progressive_stages:
|
|
- name: "basic_cot"
|
|
description: "Basic Chain-of-Thought reasoning"
|
|
dataset_path: "./data/basic_cot/"
|
|
adapter_config:
|
|
r: 8 # Lower rank for memory efficiency
|
|
lora_alpha: 16
|
|
lora_dropout: 0.1
|
|
target_modules: ["q_proj", "k_proj", "v_proj", "o_proj"]
|
|
init_lora_weights: true
|
|
training:
|
|
num_epochs: 2
|
|
per_device_batch_size: 2 # Smaller batch size
|
|
gradient_accumulation_steps: 8 # Compensate with gradient accumulation
|
|
learning_rate: 5e-4
|
|
warmup_steps: 50
|
|
max_length: 512 # Shorter sequences for memory
|
|
bf16: true
|
|
max_grad_norm: 1.0
|
|
weight_decay: 0.001
|
|
save_steps: 100
|
|
logging_steps: 10
|
|
dataloader_num_workers: 0 # Disable multiprocessing to save memory
|
|
optim: "paged_adamw_8bit" # Use 8-bit optimizer
|
|
|
|
- name: "math_reasoning"
|
|
description: "Mathematical reasoning with OpenR1-Math-220k dataset"
|
|
dataset_path: "open-r1/OpenR1-Math-220k"
|
|
inherit_from: "basic_cot"
|
|
adapter_config:
|
|
r: 16
|
|
lora_alpha: 32
|
|
lora_dropout: 0.1
|
|
target_modules: ["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"]
|
|
init_lora_weights: true
|
|
training:
|
|
num_epochs: 1
|
|
per_device_batch_size: 1 # Minimal batch size
|
|
gradient_accumulation_steps: 16
|
|
learning_rate: 3e-4
|
|
warmup_steps: 100
|
|
max_length: 1024
|
|
bf16: true
|
|
max_grad_norm: 1.0
|
|
weight_decay: 0.001
|
|
save_steps: 1000
|
|
logging_steps: 100
|
|
optim: "paged_adamw_8bit"
|
|
dataset_config:
|
|
streaming: true
|
|
max_samples: 200000 # Reduced for testing
|
|
split: "train"
|
|
|
|
- name: "complex_reasoning"
|
|
description: "Complex multi-step reasoning with Mixture-of-Thoughts"
|
|
dataset_path: "open-r1/Mixture-of-Thoughts" # HuggingFace dataset
|
|
inherit_from: "math_reasoning"
|
|
adapter_config:
|
|
r: 32
|
|
lora_alpha: 64
|
|
lora_dropout: 0.1
|
|
target_modules: ["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"]
|
|
init_lora_weights: true
|
|
training:
|
|
num_epochs: 1
|
|
per_device_batch_size: 1
|
|
gradient_accumulation_steps: 32
|
|
learning_rate: 2e-4
|
|
warmup_steps: 200
|
|
max_length: 2048
|
|
bf16: true
|
|
max_grad_norm: 1.0
|
|
weight_decay: 0.001
|
|
optim: "paged_adamw_8bit"
|
|
save_steps: 500
|
|
logging_steps: 50
|
|
dataset_config:
|
|
streaming: true
|
|
max_samples: 300000 # Limited for CPU offload config
|
|
split: "train"
|
|
|
|
evaluation:
|
|
benchmarks:
|
|
- "HLE"
|
|
- "Do-Not-Answer"
|
|
save_results: true
|
|
results_dir: "./outputs/evaluation_results"
|
|
|
|
# DeepSpeed configuration for advanced CPU offloading (optional)
|
|
# Uncomment to use DeepSpeed ZeRO-2 with CPU offload
|
|
# deepspeed:
|
|
# zero_optimization:
|
|
# stage: 2
|
|
# offload_optimizer:
|
|
# device: "cpu"
|
|
# pin_memory: true
|
|
# offload_param:
|
|
# device: "cpu"
|
|
# pin_memory: true
|
|
# overlap_comm: true
|
|
# contiguous_gradients: true
|
|
# sub_group_size: 1e9
|
|
# reduce_bucket_size: 1e6
|
|
|
|
# FSDP configuration for distributed training (optional)
|
|
# Uncomment to use FSDP with CPU offload
|
|
# fsdp:
|
|
# sharding_strategy: "FULL_SHARD"
|
|
# cpu_offload: true
|
|
# auto_wrap_policy: "TRANSFORMER_BASED_WRAP"
|
|
# transformer_layer_cls_to_wrap: "GemmaDecoderLayer"
|
|
# min_num_params: 1e6
|