progressive-llm/config/training_config_gemma3_1b.yaml
2025-07-10 18:09:14 +09:00

102 lines
3 KiB
YAML

experiment:
name: "progressive_reasoning_gemma3_1b"
base_model: "google/gemma-3-1b-pt" # Using Gemma 2 2B (1B might not be available)
output_dir: "./outputs"
use_wandb: true
wandb_project: "matsuo-llm-comp-2025"
model:
load_in_4bit: false
bnb_4bit_compute_dtype: "bfloat16"
bnb_4bit_use_double_quant: true
device_map: "auto"
gradient_checkpointing: false # Not needed for small models
use_flash_attention_2: false
use_eager_attention: true
progressive_stages:
- name: "basic_cot"
description: "Basic Chain-of-Thought reasoning"
dataset_path: "./data/basic_cot/"
adapter_config:
r: 8
lora_alpha: 16
lora_dropout: 0.1
target_modules: ["q_proj", "k_proj", "v_proj", "o_proj"] # Gemma attention modules
init_lora_weights: true
training:
num_epochs: 2
per_device_batch_size: 8
gradient_accumulation_steps: 2
learning_rate: 5e-4
warmup_steps: 50
max_length: 1024
fp16: false
bf16: true
max_grad_norm: 1.0
weight_decay: 0.001
save_steps: 100
logging_steps: 10
- name: "math_reasoning"
description: "Mathematical reasoning with OpenR1-Math-220k dataset"
dataset_path: "open-r1/OpenR1-Math-220k" # HuggingFace dataset
inherit_from: "basic_cot"
adapter_config:
r: 16
lora_alpha: 32
lora_dropout: 0.1
target_modules: ["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"]
init_lora_weights: true
training:
num_epochs: 1 # Large dataset, fewer epochs
per_device_batch_size: 4
gradient_accumulation_steps: 4
learning_rate: 3e-4
warmup_steps: 100
max_length: 2048
bf16: true
max_grad_norm: 1.0
weight_decay: 0.001
save_steps: 1000
logging_steps: 100
dataset_config:
# OpenR1-Math-220k specific settings
streaming: true # Use streaming for large dataset
max_samples: 200000 # Limit samples for faster training
split: "train"
- name: "complex_reasoning"
description: "Complex multi-step reasoning with Mixture-of-Thoughts"
dataset_path: "open-r1/Mixture-of-Thoughts" # HuggingFace dataset
inherit_from: "math_reasoning"
adapter_config:
r: 32
lora_alpha: 64
lora_dropout: 0.1
target_modules: ["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"]
init_lora_weights: true
training:
num_epochs: 1 # Large dataset, fewer epochs
per_device_batch_size: 2
gradient_accumulation_steps: 8
learning_rate: 2e-4
warmup_steps: 200
max_length: 4096
bf16: true
max_grad_norm: 1.0
weight_decay: 0.001
save_steps: 500
logging_steps: 50
dataset_config:
# Mixture-of-Thoughts specific settings
streaming: true # Use streaming for large dataset
max_samples: 30000 # Limit samples for faster training
split: "train"
evaluation:
benchmarks:
- "HLE"
- "Do-Not-Answer"
save_results: true
results_dir: "./outputs/evaluation_results"