bf16: true cutoff_len: 2048 dataset: demo_data dataset_dir: data ddp_timeout: 180000000 do_train: true double_quantization: true eval_steps: 20 eval_strategy: steps finetuning_type: lora flash_attn: auto gradient_accumulation_steps: 8 include_num_input_tokens_seen: true learning_rate: 5.0e-05 logging_steps: 5 lora_alpha: 16 lora_dropout: 0.05 lora_rank: 8 lora_target: all lr_scheduler_type: cosine max_grad_norm: 1.0 max_samples: 100000 model_name_or_path: microsoft/Phi-3-medium-4k-instruct num_train_epochs: 10.0 optim: adamw_torch output_dir: saves/Phi-3-14B-8k-Instruct/lora/train_2025-04-09-16-08 packing: false per_device_eval_batch_size: 2 per_device_train_batch_size: 2 plot_loss: true preprocessing_num_workers: 16 quantization_bit: 8 quantization_method: bnb report_to: - tensorboard save_steps: 20 stage: sft template: phi trust_remote_code: true use_rslora: true val_size: 0.115 warmup_steps: 0