| export WANDB_MODE=online | |
| cd /workspace/medvicuna | |
| torchrun --nproc_per_node=8 --master_port=20001 /workspace/medvicuna/fastchat/train/train_mem.py \ | |
| --model_name_or_path lmsys/vicuna-33b-v1.3 \ | |
| --data_path /workspace/medvicuna/33b/medqa_opt4.json \ | |
| --cache_dir /workspace/.cache \ | |
| --push_to_hub False \ | |
| --bf16 True \ | |
| --output_dir output_vicuna_33b_medqa_opt4 \ | |
| --num_train_epochs 8 \ | |
| --per_device_train_batch_size 1 \ | |
| --per_device_eval_batch_size 1 \ | |
| --gradient_accumulation_steps 16 \ | |
| --evaluation_strategy "steps" \ | |
| --eval_steps 250 \ | |
| --save_strategy "steps" \ | |
| --save_steps 300 \ | |
| --save_total_limit 100 \ | |
| --learning_rate 2e-5 \ | |
| --weight_decay 0. \ | |
| --warmup_ratio 0.04 \ | |
| --lr_scheduler_type "cosine" \ | |
| --logging_steps 1 \ | |
| --fsdp "full_shard auto_wrap" \ | |
| --fsdp_transformer_layer_cls_to_wrap 'LlamaDecoderLayer' \ | |
| --tf32 True \ | |
| --model_max_length 2048 \ | |
| --gradient_checkpointing True \ | |
| --lazy_preprocess True &>> /workspace/medvicuna/logs/output_vicuna_33b_medqa_opt4.log | |
| sleep 120s | |
| nohup bash /workspace/medvicuna/push_to_hub.sh https://huggingface.co/s1ghhh/vicuna_33b_medqa_opt4_0707 /workspace/medvicuna/output_vicuna_33b_medqa_opt4 & | |
| cd /workspace/medvicuna | |
| torchrun --nproc_per_node=8 --master_port=20001 /workspace/medvicuna/fastchat/train/train_mem.py \ | |
| --model_name_or_path alexl83/LLaMA-33B-HF \ | |
| --data_path /workspace/medvicuna/33b/medqa_opt4.json \ | |
| --cache_dir /workspace/.cache \ | |
| --push_to_hub False \ | |
| --bf16 True \ | |
| --output_dir output_llama_33b_medqa_opt4 \ | |
| --num_train_epochs 8 \ | |
| --lazy_preprocess True \ | |
| --per_device_train_batch_size 1 \ | |
| --per_device_eval_batch_size 1 \ | |
| --gradient_accumulation_steps 16 \ | |
| --evaluation_strategy "steps" \ | |
| --eval_steps 250 \ | |
| --save_strategy "steps" \ | |
| --save_steps 300 \ | |
| --save_total_limit 100 \ | |
| --learning_rate 2e-5 \ | |
| --weight_decay 0. \ | |
| --warmup_ratio 0.04 \ | |
| --lr_scheduler_type "cosine" \ | |
| --logging_steps 1 \ | |
| --fsdp "full_shard auto_wrap" \ | |
| --fsdp_transformer_layer_cls_to_wrap 'LlamaDecoderLayer' \ | |
| --tf32 True \ | |
| --model_max_length 2048 \ | |
| --gradient_checkpointing True &>> /workspace/medvicuna/logs/output_llama_33b_medqa_opt4.log | |
| sleep 120s | |
| nohup bash /workspace/medvicuna/push_to_hub.sh https://huggingface.co/s1ghhh/llama_33b_medqa_opt4_0707 /workspace/medvicuna/output_llama_33b_medqa_opt4 & | |
| cd /workspace/medvicuna | |
| torchrun --nproc_per_node=8 --master_port=20001 /workspace/medvicuna/fastchat/train/train_mem.py \ | |
| --model_name_or_path lmsys/vicuna-33b-v1.3 \ | |
| --data_path /workspace/medvicuna/33b/medqa_opt4_aug.json \ | |
| --cache_dir /workspace/.cache \ | |
| --push_to_hub False \ | |
| --lazy_preprocess True \ | |
| --bf16 True \ | |
| --output_dir output_vicuna_33b_medqa_opt4_aug \ | |
| --num_train_epochs 8 \ | |
| --per_device_train_batch_size 1 \ | |
| --per_device_eval_batch_size 1 \ | |
| --gradient_accumulation_steps 16 \ | |
| --evaluation_strategy "steps" \ | |
| --eval_steps 250 \ | |
| --save_strategy "steps" \ | |
| --save_steps 300 \ | |
| --save_total_limit 100 \ | |
| --learning_rate 2e-5 \ | |
| --weight_decay 0. \ | |
| --warmup_ratio 0.04 \ | |
| --lr_scheduler_type "cosine" \ | |
| --logging_steps 1 \ | |
| --fsdp "full_shard auto_wrap" \ | |
| --fsdp_transformer_layer_cls_to_wrap 'LlamaDecoderLayer' \ | |
| --tf32 True \ | |
| --model_max_length 2048 \ | |
| --gradient_checkpointing True &>> /workspace/medvicuna/logs/output_vicuna_33b_medqa_opt4_aug.log | |
| sleep 120s | |
| nohup bash /workspace/medvicuna/push_to_hub.sh https://huggingface.co/s1ghhh/vicuna_33b_medqa_opt4_aug_0707 /workspace/medvicuna/output_vicuna_33b_medqa_opt4_aug & | |
| cd /workspace/medvicuna | |
| torchrun --nproc_per_node=8 --master_port=20001 /workspace/medvicuna/fastchat/train/train_mem.py \ | |
| --model_name_or_path alexl83/LLaMA-33B-HF \ | |
| --data_path /workspace/medvicuna/33b/medqa_opt4_aug.json \ | |
| --cache_dir /workspace/.cache \ | |
| --push_to_hub False \ | |
| --bf16 True \ | |
| --output_dir output_llama_33b_medqa_opt4_aug \ | |
| --num_train_epochs 8 \ | |
| --lazy_preprocess True \ | |
| --per_device_train_batch_size 1 \ | |
| --per_device_eval_batch_size 1 \ | |
| --gradient_accumulation_steps 16 \ | |
| --evaluation_strategy "steps" \ | |
| --eval_steps 250 \ | |
| --save_strategy "steps" \ | |
| --save_steps 300 \ | |
| --save_total_limit 100 \ | |
| --learning_rate 2e-5 \ | |
| --weight_decay 0. \ | |
| --warmup_ratio 0.04 \ | |
| --lr_scheduler_type "cosine" \ | |
| --logging_steps 1 \ | |
| --fsdp "full_shard auto_wrap" \ | |
| --fsdp_transformer_layer_cls_to_wrap 'LlamaDecoderLayer' \ | |
| --tf32 True \ | |
| --model_max_length 2048 \ | |
| --gradient_checkpointing True &>> /workspace/medvicuna/logs/output_llama_33b_medqa_opt4_aug.log | |
| sleep 120s | |
| nohup bash /workspace/medvicuna/push_to_hub.sh https://huggingface.co/s1ghhh/llama_33b_medqa_opt4_aug_0707 /workspace/medvicuna/output_llama_33b_medqa_opt4_aug & | |
| cd /workspace/medvicuna | |
| torchrun --nproc_per_node=8 --master_port=20001 /workspace/medvicuna/fastchat/train/train_mem.py \ | |
| --model_name_or_path eachadea/vicuna-13b-1.1 \ | |
| --data_path /workspace/medvicuna/medvicuna_v1.1_520k_augAndNoaug.json \ | |
| --cache_dir /workspace/.cache \ | |
| --push_to_hub False \ | |
| --bf16 True \ | |
| --lazy_preprocess True \ | |
| --output_dir output_medvicuna_v1.1_13b \ | |
| --num_train_epochs 8 \ | |
| --per_device_train_batch_size 16 \ | |
| --per_device_eval_batch_size 16 \ | |
| --gradient_accumulation_steps 1 \ | |
| --evaluation_strategy "steps" \ | |
| --eval_steps 1150 \ | |
| --save_strategy "steps" \ | |
| --save_steps 1150 \ | |
| --save_total_limit 100 \ | |
| --learning_rate 2e-5 \ | |
| --weight_decay 0. \ | |
| --warmup_ratio 0.04 \ | |
| --lr_scheduler_type "cosine" \ | |
| --logging_steps 1 \ | |
| --fsdp "full_shard auto_wrap" \ | |
| --fsdp_transformer_layer_cls_to_wrap 'LlamaDecoderLayer' \ | |
| --tf32 True \ | |
| --model_max_length 2048 \ | |
| --gradient_checkpointing True &>> /workspace/medvicuna/logs/output_medvicuna_v1.1_13b.log | |
| nohup bash /workspace/medvicuna/push_to_hub.sh https://huggingface.co/s1ghhh/medvicuna_v1.1_13b_0707 /workspace/medvicuna/output_medvicuna_v1.1_13b & | |
| cd /workspace/medvicuna | |
| torchrun --nproc_per_node=8 --master_port=20001 fastchat/train/train_mem.py \ | |
| --model_name_or_path eachadea/vicuna-7b-1.1 \ | |
| --data_path /workspace/medvicuna/medvicuna_v1.1_520k_augAndNoaug.json \ | |
| --push_to_hub False \ | |
| --bf16 True \ | |
| --output_dir medvicuna_7b_epoch8_test \ | |
| --num_train_epochs 8 \ | |
| --per_device_train_batch_size 32 \ | |
| --per_device_eval_batch_size 16 \ | |
| --gradient_accumulation_steps 1 \ | |
| --evaluation_strategy "epoch" \ | |
| --eval_steps 1894 \ | |
| --save_strategy "no" \ | |
| --save_steps 3787 \ | |
| --save_total_limit 32 \ | |
| --learning_rate 2e-5 \ | |
| --weight_decay 0. \ | |
| --warmup_ratio 0.02 \ | |
| --lr_scheduler_type "cosine" \ | |
| --logging_steps 1 \ | |
| --fsdp "full_shard auto_wrap" \ | |
| --fsdp_transformer_layer_cls_to_wrap 'LlamaDecoderLayer' \ | |
| --tf32 True \ | |
| --model_max_length 2048 \ | |
| --gradient_checkpointing True \ | |
| --lazy_preprocess True &>> medvicuna_7b_epoch8_test.log |