### model model_name_or_path: Qwen/Qwen2.5-1.5B trust_remote_code: true ### method stage: sft do_train: true finetuning_type: lora lora_rank: 64 lora_target: all ### dataset dataset: QAtrain eval_dataset: QAval template: qwen cutoff_len: 2048 # max_samples: 50 overwrite_cache: true preprocessing_num_workers: 8 ### output resume_from_checkpoint: /kaggle/working/Model/last-checkpoint output_dir: /kaggle/working/ logging_steps: 50 save_steps: 200 plot_loss: true overwrite_output_dir: true ### train per_device_train_batch_size: 1 gradient_accumulation_steps: 16 learning_rate: 1.0e-4 num_train_epochs: 3.0 lr_scheduler_type: cosine warmup_ratio: 0.1 bf16: true # full ddp_timeout: 180000000 torch_compile: false ### eval # val_size: 0.1 per_device_eval_batch_size: 1 eval_strategy: steps eval_steps: 200 report_to: wandb run_name: Qwennn push_to_hub: true export_hub_model_id: Youssef/QWEN_Arabic_Q&A hub_private_repo: true hub_strategy: checkpoint