progressive-llm/config/training_config_public.yaml
2025-07-10 18:09:14 +09:00

82 lines
No EOL
2.2 KiB
YAML

experiment:
name: "progressive_reasoning_public_model"
base_model: "microsoft/DialoGPT-medium" # Public model, no authentication needed
output_dir: "./outputs"
use_wandb: false
wandb_project: "matsuo-llm-comp-2025"
model:
load_in_4bit: false # DialoGPT is smaller, quantization not needed
bnb_4bit_compute_dtype: "bfloat16"
bnb_4bit_use_double_quant: true
device_map: "auto"
gradient_checkpointing: false
progressive_stages:
- name: "basic_cot"
description: "Basic Chain-of-Thought reasoning"
dataset_path: "./data/basic_cot/"
adapter_config:
r: 16
lora_alpha: 32
lora_dropout: 0.1
target_modules: ["c_attn", "c_proj"] # GPT-2 style attention modules
init_lora_weights: true
training:
num_epochs: 2
per_device_batch_size: 4
gradient_accumulation_steps: 4
learning_rate: 2e-4
warmup_steps: 100
max_length: 1024
fp16: false
bf16: false # Use fp32 for smaller models
max_grad_norm: 1.0
weight_decay: 0.001
- name: "math_reasoning"
description: "Mathematical reasoning with think tags"
dataset_path: "./data/math_reasoning/"
inherit_from: "basic_cot"
adapter_config:
r: 32
lora_alpha: 64
lora_dropout: 0.1
target_modules: ["c_attn", "c_proj"]
init_lora_weights: true
training:
num_epochs: 3
per_device_batch_size: 2
gradient_accumulation_steps: 8
learning_rate: 1e-4
warmup_steps: 200
max_length: 2048
bf16: false
max_grad_norm: 1.0
- name: "complex_reasoning"
description: "Complex multi-step reasoning"
dataset_path: "./data/complex_reasoning/"
inherit_from: "math_reasoning"
adapter_config:
r: 64
lora_alpha: 128
lora_dropout: 0.1
target_modules: ["c_attn", "c_proj"]
init_lora_weights: true
training:
num_epochs: 2
per_device_batch_size: 1
gradient_accumulation_steps: 16
learning_rate: 5e-5
warmup_steps: 300
max_length: 4096
bf16: false
max_grad_norm: 1.0
evaluation:
benchmarks:
- "HLE"
- "Do-Not-Answer"
save_results: true
results_dir: "./outputs/evaluation_results"