assistant_tag: assistant bf16: true content_tag: value cutoff_len: 16384 dataset: /leonardo_work/EUHPC_E03_068/marianna/lf_datasets/mlfoundations-dev/extra_verified dataset_dir: ONLINE ddp_timeout: 180000000 deepspeed: dcft/train/zero3_offload.json do_train: true enable_liger_kernel: true finetuning_type: full formatting: sharegpt global_batch_size: 384 gradient_accumulation_steps: 1 hub_model_id: mlfoundations-dev/mlfoundations-dev_extra_verified-32B learning_rate: 1.0e-05 logging_steps: 1 lr_scheduler_type: cosine max_samples: 1000000 messages: conversations model_name_or_path: Qwen/Qwen2.5-32B-Instruct num_train_epochs: 3.0 output_dir: /leonardo_scratch/large/userexternal/mnezhuri/dcft_checkpoints/saves/DCFT-Stratos-mlfoundations-dev_extra_verified-32B overwrite_cache: true per_device_train_batch_size: 1 plot_loss: true preprocessing_num_workers: 16 push_to_db: false push_to_hub: false report_to: wandb resume_from_checkpoint: /leonardo_scratch/large/userexternal/mnezhuri/dcft_checkpoints/saves/DCFT-Stratos-mlfoundations-dev_extra_verified-32B/checkpoint-850 role_tag: from run_name: DCFT-Stratos-mlfoundations-dev_extra_verified-32B save_steps: 50 stage: sft template: qwen25 tokenized_path: /leonardo_work/EUHPC_E03_068/marianna/lf_datasets/mlfoundations-dev_extra_verified_tokenized user_tag: user warmup_ratio: 0.1