| bf16: true |
| cutoff_len: 4096 |
| dataset: oit |
| dataset_dir: data |
| ddp_timeout: 180000000 |
| do_train: true |
| double_quantization: true |
| enable_thinking: true |
| finetuning_type: lora |
| flash_attn: auto |
| gradient_accumulation_steps: 8 |
| include_num_input_tokens_seen: true |
| learning_rate: 5.0e-05 |
| logging_steps: 5 |
| lora_alpha: 16 |
| lora_dropout: 0 |
| lora_rank: 8 |
| lora_target: all |
| lr_scheduler_type: cosine |
| max_grad_norm: 1.0 |
| max_samples: 100000 |
| model_name_or_path: unsloth/gemma-3-1b-it |
| num_train_epochs: 3.0 |
| optim: adamw_torch |
| output_dir: saves/Gemma-3-1B-Instruct/lora/train_2025-07-14-13-26-41 |
| packing: false |
| per_device_train_batch_size: 3 |
| plot_loss: true |
| preprocessing_num_workers: 16 |
| quantization_bit: 4 |
| quantization_method: bnb |
| report_to: none |
| save_steps: 100 |
| stage: sft |
| template: gemma |
| trust_remote_code: true |
| warmup_steps: 0 |
|
|