bf16: true cutoff_len: 2048 dataset: en_train_part_1,en_train_part_3 dataset_dir: data ddp_timeout: 180000000 do_train: true double_quantization: true finetuning_type: lora flash_attn: auto gradient_accumulation_steps: 2 include_num_input_tokens_seen: true learning_rate: 3.0e-05 logging_steps: 10 lora_alpha: 16 lora_dropout: 0 lora_rank: 8 lora_target: all lr_scheduler_type: cosine max_grad_norm: 1.0 max_samples: 1858 model_name_or_path: mistralai/Mistral-7B-Instruct-v0.3 num_train_epochs: 3.0 optim: adamw_torch output_dir: saves/Mistral-7B-Instruct-v0.3/lora/2_part packing: false per_device_train_batch_size: 4 plot_loss: true preprocessing_num_workers: 16 quantization_bit: 4 quantization_method: bitsandbytes report_to: none save_steps: 1000 stage: sft template: alpaca trust_remote_code: true use_unsloth: true warmup_steps: 0