experiment: name: "progressive_reasoning_large_model" base_model: "meta-llama/Llama-3.2-8B" # Or other whitelisted models output_dir: "./outputs" use_wandb: true wandb_project: "matsuo-llm-comp-2025" model: load_in_4bit: true # Enable 4-bit quantization for memory efficiency bnb_4bit_compute_dtype: "bfloat16" bnb_4bit_use_double_quant: true bnb_4bit_quant_type: "nf4" device_map: "auto" # Additional memory optimizations gradient_checkpointing: true use_flash_attention_2: true # If available progressive_stages: - name: "basic_cot" description: "Basic Chain-of-Thought reasoning" dataset_path: "./data/basic_cot/" adapter_config: r: 16 # Larger rank for bigger models lora_alpha: 32 lora_dropout: 0.05 target_modules: ["q_proj", "v_proj", "k_proj", "o_proj"] init_lora_weights: true # Identity initialization training: num_epochs: 1 per_device_batch_size: 1 # Small batch size for large models gradient_accumulation_steps: 16 # Effective batch size = 16 learning_rate: 2e-4 warmup_steps: 100 max_length: 2048 fp16: false bf16: true max_grad_norm: 0.3 weight_decay: 0.001 save_steps: 50 logging_steps: 10 - name: "math_reasoning" description: "Mathematical reasoning with think tags" dataset_path: "./data/math_reasoning/" inherit_from: "basic_cot" adapter_config: r: 32 # Increase rank for more complex tasks lora_alpha: 64 lora_dropout: 0.05 target_modules: ["q_proj", "v_proj", "k_proj", "o_proj", "gate_proj", "up_proj", "down_proj"] init_lora_weights: true training: num_epochs: 2 per_device_batch_size: 1 gradient_accumulation_steps: 32 # Effective batch size = 32 learning_rate: 1e-4 warmup_steps: 200 max_length: 4096 bf16: true max_grad_norm: 0.3 weight_decay: 0.001 - name: "complex_reasoning" description: "Complex multi-step reasoning" dataset_path: "./data/complex_reasoning/" inherit_from: "math_reasoning" adapter_config: r: 64 # Maximum rank for most complex tasks lora_alpha: 128 lora_dropout: 0.05 target_modules: ["q_proj", "v_proj", "k_proj", "o_proj", "gate_proj", "up_proj", "down_proj"] init_lora_weights: true training: num_epochs: 2 per_device_batch_size: 1 gradient_accumulation_steps: 64 # Effective batch size = 64 learning_rate: 5e-5 warmup_steps: 300 max_length: 8192 bf16: true max_grad_norm: 0.3 weight_decay: 0.001 evaluation: benchmarks: - "HLE" - "Do-Not-Answer" save_results: true results_dir: "./outputs/evaluation_results" # Memory optimization settings optimization: gradient_checkpointing: true gradient_checkpointing_kwargs: use_reentrant: false ddp_find_unused_parameters: false fsdp: "full_shard auto_wrap" # For multi-GPU setups fsdp_transformer_layer_cls_to_wrap: "LlamaDecoderLayer"