| cutoff_len: 2048 | |
| dataset: treino_pt_rde | |
| dataset_dir: data | |
| ddp_timeout: 180000000 | |
| do_train: true | |
| finetuning_type: lora | |
| flash_attn: auto | |
| fp16: true | |
| gradient_accumulation_steps: 4 | |
| learning_rate: 3.0e-05 | |
| logging_steps: 10 | |
| lora_alpha: 16 | |
| lora_dropout: 0 | |
| lora_rank: 8 | |
| lora_target: all | |
| lr_scheduler_type: cosine | |
| max_grad_norm: 1.0 | |
| max_samples: 3716 | |
| model_name_or_path: google/gemma-2-9b-it | |
| num_train_epochs: 3.0 | |
| optim: adamw_torch | |
| output_dir: saves/Gemma-2-9B-Instruct/lora/gemma2-9b-finetuned | |
| packing: false | |
| per_device_train_batch_size: 8 | |
| plot_loss: true | |
| preprocessing_num_workers: 16 | |
| quantization_bit: 4 | |
| quantization_method: bitsandbytes | |
| report_to: none | |
| save_steps: 1000 | |
| stage: sft | |
| template: alpaca | |
| use_unsloth: true | |
| warmup_steps: 0 | |