| PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True accelerate launch --config_file "configs/fsdp_config.yaml" \ |
| --main_process_ip "172.51.185.4" \ |
| --main_process_port "29080" \ |
| --machine_rank $BACKENDAI_CLUSTER_LOCAL_RANK \ |
| --num_processes 8 \ |
| --num_machines 2 \ |
| --monitor_interval 10 \ |
| --num_cpu_threads_per_process 8 \ |
| train.py \ |
| --seed 100 \ |
| --model_name_or_path "meta-llama/Meta-Llama-3.1-8B-Instruct" \ |
| --output_dir "results/mncai/Meta-Llama-3.1-8B-Instruct-v0.2" \ |
| --dataset_name "mncai/ai2_arc,mncai/hellaswag,mncai/mbpp,mncai/openbookqa,mncai/winogrande,mncai/trivia_qa,mncai/KMMLU" \ |
| --chat_template_format "llama3.1" \ |
| --add_special_tokens False \ |
| --append_concat_token False \ |
| --splits "train,test" \ |
| --max_seq_len 8192 \ |
| --num_train_epochs 1 \ |
| --logging_steps 5 \ |
| --log_level "info" \ |
| --logging_strategy "steps" \ |
| --eval_strategy "steps" \ |
| --eval_steps 200 \ |
| --save_total_limit 2 \ |
| --bf16 True \ |
| --packing False \ |
| --learning_rate 1e-4 \ |
| --lr_scheduler_type "cosine" \ |
| --weight_decay 1e-4 \ |
| --warmup_ratio 0.0 \ |
| --max_grad_norm 1.0 \ |
| --per_device_train_batch_size 8 \ |
| --per_device_eval_batch_size 8 \ |
| --gradient_accumulation_steps 1 \ |
| --gradient_checkpointing True \ |
| --use_reentrant False \ |
| --dataset_text_field "content" \ |
| --use_flash_attn True \ |
| --optim paged_adamw_32bit \ |
| --report_to "wandb" \ |
| # --hub_model_id "mncai/Meta-Llama-3.1-8B-Instruct-v0.2" \ |
| # --push_to_hub True \ |
| # --hub_private_repo True \ |
| # --hub_strategy "end" \ |
| # --hub_token $HF_WRITE_TOKEN \ |
|
|
| # --save_strategy "steps" \ |
| # --save_steps 200 \ |
| # --use_peft_lora True \ |
| # --lora_r 8 \ |
| # --lora_alpha 16 \ |
| # --lora_dropout 0.1 \ |
| # --lora_target_modules "all-linear" \ |
| # --use_4bit_quantization False |