36 lines
No EOL
1.1 KiB
YAML
36 lines
No EOL
1.1 KiB
YAML
experiment:
|
|
name: "progressive_reasoning_experiment"
|
|
base_model: "microsoft/DialoGPT-small" # Lightweight model for testing
|
|
output_dir: "./outputs"
|
|
use_wandb: false
|
|
wandb_project: "matsuo-llm-comp-2025"
|
|
|
|
model:
|
|
load_in_4bit: false # Disable quantization for small model
|
|
bnb_4bit_compute_dtype: "bfloat16"
|
|
bnb_4bit_use_double_quant: true
|
|
device_map: "auto"
|
|
|
|
progressive_stages:
|
|
- name: "basic_cot"
|
|
description: "Basic Chain-of-Thought reasoning"
|
|
dataset_path: "./data/basic_cot/"
|
|
adapter_config:
|
|
r: 8
|
|
lora_alpha: 16
|
|
lora_dropout: 0.1
|
|
target_modules: ["c_attn", "c_proj"]
|
|
training:
|
|
num_epochs: 2
|
|
per_device_batch_size: 8 # Increase batch size for small model
|
|
gradient_accumulation_steps: 2 # Reduce accumulation steps
|
|
learning_rate: 5e-4 # Higher learning rate for faster training
|
|
warmup_steps: 50
|
|
max_length: 1024 # Shorter sequences
|
|
|
|
evaluation:
|
|
benchmarks:
|
|
- "HLE" # Humanity's Last Exam
|
|
- "Do-Not-Answer"
|
|
save_results: true
|
|
results_dir: "./outputs/evaluation_results" |