| ### model | |
| model_name_or_path: Qwen/Qwen3-8B-Base | |
| trust_remote_code: true | |
| ### method | |
| stage: sft | |
| do_train: true | |
| finetuning_type: full | |
| deepspeed: /shared_workspace_mfs/ximing/LLaMA-Factory/examples/deepspeed/ds_z3_offload_config.json | |
| # choices: [ds_z0_config.json, ds_z2_config.json, ds_z3_config.json, ds_z3_offload_config.json] | |
| ### dataset | |
| dataset: dolci_10k_with_tool_call_batch3 | |
| template: qwen3_nothink | |
| cutoff_len: 65536 | |
| rope_scaling: yarn | |
| max_samples: 10000 | |
| overwrite_cache: true | |
| preprocessing_num_workers: 16 | |
| dataloader_num_workers: 4 | |
| ### output | |
| output_dir: /shared_workspace_mfs/ximing/sft_dolci_10k_with_tool_call_batch3 | |
| logging_steps: 1 | |
| save_strategy: epoch | |
| plot_loss: true | |
| overwrite_output_dir: true | |
| report_to: wandb | |
| ### train | |
| per_device_train_batch_size: 4 | |
| gradient_accumulation_steps: 4 | |
| learning_rate: 5e-5 | |
| num_train_epochs: 1 | |
| lr_scheduler_type: cosine | |
| warmup_ratio: 0.1 | |
| bf16: true | |
| ddp_timeout: 180000000 | |
| resume_from_checkpoint: null | |
| ### eval | |
| # eval_dataset: alpaca_en_demo | |
| # val_size: 0.1 | |
| # per_device_eval_batch_size: 1 | |
| # eval_strategy: steps | |
| # eval_steps: 500 | |