101 lines
No EOL
2.8 KiB
YAML
101 lines
No EOL
2.8 KiB
YAML
experiment:
|
|
name: "progressive_reasoning_70b"
|
|
base_model: "meta-llama/Llama-3.2-70B" # 70B model - requires significant resources
|
|
output_dir: "./outputs"
|
|
use_wandb: true
|
|
wandb_project: "matsuo-llm-comp-2025"
|
|
|
|
model:
|
|
load_in_4bit: true
|
|
bnb_4bit_compute_dtype: "bfloat16"
|
|
bnb_4bit_use_double_quant: true
|
|
bnb_4bit_quant_type: "nf4"
|
|
device_map: "auto"
|
|
gradient_checkpointing: true
|
|
use_flash_attention_2: true
|
|
|
|
progressive_stages:
|
|
- name: "basic_cot"
|
|
description: "Basic Chain-of-Thought reasoning"
|
|
dataset_path: "./data/basic_cot/"
|
|
adapter_config:
|
|
r: 64 # Even higher rank for 70B models
|
|
lora_alpha: 128
|
|
lora_dropout: 0.05
|
|
target_modules: ["q_proj", "v_proj", "k_proj", "o_proj"]
|
|
init_lora_weights: true
|
|
training:
|
|
num_epochs: 1
|
|
per_device_batch_size: 1
|
|
gradient_accumulation_steps: 64
|
|
learning_rate: 5e-5 # Lower learning rate for stability
|
|
warmup_steps: 200
|
|
max_length: 2048
|
|
bf16: true
|
|
max_grad_norm: 0.3
|
|
weight_decay: 0.001
|
|
dataloader_num_workers: 2
|
|
|
|
- name: "math_reasoning"
|
|
description: "Mathematical reasoning with think tags"
|
|
dataset_path: "./data/math_reasoning/"
|
|
inherit_from: "basic_cot"
|
|
adapter_config:
|
|
r: 128
|
|
lora_alpha: 256
|
|
lora_dropout: 0.05
|
|
target_modules: ["q_proj", "v_proj", "k_proj", "o_proj", "gate_proj", "up_proj", "down_proj"]
|
|
init_lora_weights: true
|
|
training:
|
|
num_epochs: 1
|
|
per_device_batch_size: 1
|
|
gradient_accumulation_steps: 128
|
|
learning_rate: 3e-5
|
|
warmup_steps: 300
|
|
max_length: 4096
|
|
bf16: true
|
|
max_grad_norm: 0.3
|
|
dataloader_num_workers: 2
|
|
|
|
- name: "complex_reasoning"
|
|
description: "Complex multi-step reasoning"
|
|
dataset_path: "./data/complex_reasoning/"
|
|
inherit_from: "math_reasoning"
|
|
adapter_config:
|
|
r: 256 # Maximum rank for 70B models
|
|
lora_alpha: 512
|
|
lora_dropout: 0.05
|
|
target_modules: ["q_proj", "v_proj", "k_proj", "o_proj", "gate_proj", "up_proj", "down_proj"]
|
|
init_lora_weights: true
|
|
training:
|
|
num_epochs: 1
|
|
per_device_batch_size: 1
|
|
gradient_accumulation_steps: 256
|
|
learning_rate: 2e-5
|
|
warmup_steps: 500
|
|
max_length: 8192
|
|
bf16: true
|
|
max_grad_norm: 0.3
|
|
dataloader_num_workers: 2
|
|
|
|
evaluation:
|
|
benchmarks:
|
|
- "HLE"
|
|
- "Do-Not-Answer"
|
|
save_results: true
|
|
results_dir: "./outputs/evaluation_results"
|
|
|
|
# Additional settings for 70B models
|
|
optimization:
|
|
gradient_checkpointing: true
|
|
gradient_checkpointing_kwargs:
|
|
use_reentrant: false
|
|
ddp_find_unused_parameters: false
|
|
# Multi-GPU settings
|
|
fsdp: "full_shard auto_wrap"
|
|
fsdp_transformer_layer_cls_to_wrap: "LlamaDecoderLayer"
|
|
fsdp_min_num_params: 1000000
|
|
fsdp_config:
|
|
min_num_params: 1000000
|
|
sharding_strategy: "FULL_SHARD"
|
|
cpu_offload: false |