progressive-llm/config/training_config_gemma3_1b_minimal.yaml
2025-07-10 23:25:51 +09:00

45 lines
No EOL
1.1 KiB
YAML

experiment:
name: "progressive_reasoning_gemma3_1b_minimal"
base_model: "google/gemma-3-1b-pt"
output_dir: "./outputs"
use_wandb: true
wandb_project: "matsuo-llm-comp-2025"
model:
load_in_4bit: false
device_map: "auto"
gradient_checkpointing: true
use_flash_attention_2: false
use_eager_attention: true
progressive_stages:
- name: "basic_cot"
description: "Basic Chain-of-Thought reasoning"
dataset_path: "./data/basic_cot/"
adapter_config:
r: 4 # Extremely minimal rank
lora_alpha: 8
lora_dropout: 0.1
target_modules: ["q_proj"] # Only one module
init_lora_weights: true
training:
num_epochs: 1 # Reduced epochs
per_device_batch_size: 1
gradient_accumulation_steps: 4
learning_rate: 5e-4
warmup_steps: 10
max_length: 128 # Very short sequences
bf16: true
max_grad_norm: 1.0
weight_decay: 0.001
save_steps: 100
logging_steps: 10
dataloader_num_workers: 1
dataloader_pin_memory: false
evaluation:
benchmarks:
- "HLE"
- "Do-Not-Answer"
save_results: true
results_dir: "./outputs/evaluation_results"