138 lines
No EOL
3.5 KiB
YAML
138 lines
No EOL
3.5 KiB
YAML
experiment:
|
|
name: "progressive_reasoning_8gpu_deepspeed"
|
|
base_model: "google/gemma-2-2b-it"
|
|
output_dir: "./outputs"
|
|
use_wandb: true
|
|
wandb_project: "matsuo-llm-comp-2025"
|
|
|
|
model:
|
|
load_in_4bit: false
|
|
device_map: null # Let DeepSpeed handle device placement
|
|
gradient_checkpointing: true
|
|
use_flash_attention_2: true
|
|
use_eager_attention: false
|
|
|
|
# DeepSpeed Configuration
|
|
deepspeed:
|
|
zero_optimization:
|
|
stage: 2 # ZeRO Stage 2 (partition optimizer states and gradients)
|
|
allgather_partitions: true
|
|
allgather_bucket_size: 200000000
|
|
overlap_comm: true
|
|
reduce_scatter: true
|
|
reduce_bucket_size: 200000000
|
|
contiguous_gradients: true
|
|
cpu_offload: false # Keep on GPU for speed
|
|
|
|
optimizer:
|
|
type: "AdamW"
|
|
params:
|
|
lr: 3e-4
|
|
betas: [0.9, 0.999]
|
|
eps: 1e-8
|
|
weight_decay: 0.001
|
|
|
|
scheduler:
|
|
type: "WarmupLR"
|
|
params:
|
|
warmup_min_lr: 0
|
|
warmup_max_lr: 3e-4
|
|
warmup_num_steps: 200
|
|
|
|
fp16:
|
|
enabled: false
|
|
|
|
bf16:
|
|
enabled: true
|
|
|
|
gradient_clipping: 1.0
|
|
|
|
train_batch_size: 512 # Total batch size across all GPUs
|
|
train_micro_batch_size_per_gpu: 64 # Per-GPU batch size
|
|
|
|
progressive_stages:
|
|
- name: "basic_cot"
|
|
description: "Basic Chain-of-Thought reasoning"
|
|
dataset_path: "./data/basic_cot/"
|
|
adapter_config:
|
|
r: 64
|
|
lora_alpha: 128
|
|
lora_dropout: 0.1
|
|
target_modules: ["q_proj", "k_proj", "v_proj", "o_proj"]
|
|
init_lora_weights: true
|
|
training:
|
|
num_epochs: 2
|
|
per_device_batch_size: 64 # Large batch with DeepSpeed
|
|
gradient_accumulation_steps: 1
|
|
learning_rate: 5e-4
|
|
warmup_steps: 100
|
|
max_length: 2048
|
|
bf16: true
|
|
max_grad_norm: 1.0
|
|
weight_decay: 0.001
|
|
save_steps: 50
|
|
logging_steps: 10
|
|
dataloader_num_workers: 8
|
|
|
|
- name: "math_reasoning"
|
|
description: "Mathematical reasoning with OpenR1-Math-220k dataset"
|
|
dataset_path: "open-r1/OpenR1-Math-220k"
|
|
inherit_from: "basic_cot"
|
|
adapter_config:
|
|
r: 128
|
|
lora_alpha: 256
|
|
lora_dropout: 0.1
|
|
target_modules: ["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"]
|
|
init_lora_weights: true
|
|
training:
|
|
num_epochs: 1
|
|
per_device_batch_size: 32
|
|
gradient_accumulation_steps: 1
|
|
learning_rate: 3e-4
|
|
warmup_steps: 200
|
|
max_length: 4096
|
|
bf16: true
|
|
max_grad_norm: 1.0
|
|
weight_decay: 0.001
|
|
save_steps: 100
|
|
logging_steps: 20
|
|
dataloader_num_workers: 8
|
|
dataset_config:
|
|
streaming: true
|
|
max_samples: 200000
|
|
split: "train"
|
|
|
|
- name: "complex_reasoning"
|
|
description: "Complex multi-step reasoning with Mixture-of-Thoughts"
|
|
dataset_path: "open-r1/Mixture-of-Thoughts"
|
|
inherit_from: "math_reasoning"
|
|
adapter_config:
|
|
r: 256
|
|
lora_alpha: 512
|
|
lora_dropout: 0.1
|
|
target_modules: ["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"]
|
|
init_lora_weights: true
|
|
training:
|
|
num_epochs: 1
|
|
per_device_batch_size: 16
|
|
gradient_accumulation_steps: 2
|
|
learning_rate: 2e-4
|
|
warmup_steps: 300
|
|
max_length: 8192
|
|
bf16: true
|
|
max_grad_norm: 1.0
|
|
weight_decay: 0.001
|
|
save_steps: 200
|
|
logging_steps: 50
|
|
dataloader_num_workers: 8
|
|
dataset_config:
|
|
streaming: true
|
|
max_samples: 100000
|
|
split: "train"
|
|
|
|
evaluation:
|
|
benchmarks:
|
|
- "HLE"
|
|
- "Do-Not-Answer"
|
|
save_results: true
|
|
results_dir: "./outputs/evaluation_results" |