s1ghhh commited on
Commit
da0a02b
·
1 Parent(s): 25cee03

Upload train_multi_0725.sh

Browse files
Files changed (1) hide show
  1. train_multi_0725.sh +170 -0
train_multi_0725.sh ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ export WANDB_MODE=online
2
+
3
+ cd /workspace/medvicuna
4
+
5
+ torchrun --nproc_per_node=7 --master_port=20001 fastchat/train/train_eval_mem.py \
6
+ --model_name_or_path lmsys/vicuna-33b-v1.3 \
7
+ --data_path /workspace/medvicuna/33b/medqa_opt4.json \
8
+ --eval_data_path /workspace/medvicuna/33b/opt4_medQA_usmle_dev_plus_1272_vicunatplt.json \
9
+ --cache_dir /workspace/.cache \
10
+ --push_to_hub False \
11
+ --bf16 True \
12
+ --output_dir output_vicuna_33b_medqa_opt4 \
13
+ --num_train_epochs 6 \
14
+ --per_device_train_batch_size 8 \
15
+ --per_device_eval_batch_size 1 \
16
+ --gradient_accumulation_steps 2 \
17
+ --evaluation_strategy "steps" \
18
+ --eval_steps 150 \
19
+ --save_strategy "steps" \
20
+ --save_steps 300 \
21
+ --save_total_limit 100 \
22
+ --learning_rate 2e-5 \
23
+ --weight_decay 0. \
24
+ --warmup_ratio 0.04 \
25
+ --lr_scheduler_type "cosine" \
26
+ --logging_steps 1 \
27
+ --fsdp "full_shard auto_wrap offload" \
28
+ --fsdp_transformer_layer_cls_to_wrap 'LlamaDecoderLayer' \
29
+ --tf32 True \
30
+ --model_max_length 1024 \
31
+ --gradient_checkpointing True \
32
+ --lazy_preprocess True &>> /workspace/medvicuna/logs/output_vicuna_33b_medqa_opt4.log
33
+
34
+ sleep 120s
35
+
36
+ bash /workspace/medvicuna/push_to_hub.sh https://huggingface.co/s1ghhh/vicuna_33b_medqa_opt4_0725 /workspace/medvicuna/output_vicuna_33b_medqa_opt4 &
37
+
38
+ torchrun --nproc_per_node=8 --master_port=20001 fastchat/train/train_eval_mem.py \
39
+ --model_name_or_path lmsys/vicuna-13b-v1.3 \
40
+ --data_path /workspace/medvicuna/33b/medqa_opt4.json \
41
+ --eval_data_path /workspace/medvicuna/33b/opt4_medQA_usmle_dev_plus_1272_vicunatplt.json \
42
+ --cache_dir /workspace/.cache \
43
+ --push_to_hub False \
44
+ --bf16 True \
45
+ --output_dir output_vicuna_13b_medqa_opt4 \
46
+ --num_train_epochs 6 \
47
+ --per_device_train_batch_size 16 \
48
+ --per_device_eval_batch_size 1 \
49
+ --gradient_accumulation_steps 1 \
50
+ --evaluation_strategy "steps" \
51
+ --eval_steps 150 \
52
+ --save_strategy "steps" \
53
+ --save_steps 300 \
54
+ --save_total_limit 100 \
55
+ --learning_rate 2e-5 \
56
+ --weight_decay 0. \
57
+ --warmup_ratio 0.04 \
58
+ --lr_scheduler_type "cosine" \
59
+ --logging_steps 1 \
60
+ --fsdp "full_shard auto_wrap" \
61
+ --fsdp_transformer_layer_cls_to_wrap 'LlamaDecoderLayer' \
62
+ --tf32 True \
63
+ --model_max_length 1024 \
64
+ --gradient_checkpointing True \
65
+ --lazy_preprocess True &>> /workspace/medvicuna/logs/output_vicuna_13b_medqa_opt4.log
66
+
67
+ sleep 120s
68
+
69
+ bash /workspace/medvicuna/push_to_hub.sh https://huggingface.co/s1ghhh/vicuna_13b_medqa_opt4_0725 /workspace/medvicuna/output_vicuna_13b_medqa_opt4 &
70
+
71
+ torchrun --nproc_per_node=7 --master_port=20001 fastchat/train/train_eval_mem.py \
72
+ --model_name_or_path lmsys/vicuna-33b-v1.3 \
73
+ --data_path /workspace/medvicuna/33b/medqa_opt4.json \
74
+ --eval_data_path /workspace/medvicuna/33b/opt4_medQA_usmle_dev_plus_1272_vicunatplt.json \
75
+ --cache_dir /workspace/.cache \
76
+ --push_to_hub False \
77
+ --bf16 True \
78
+ --output_dir output_vicuna_33b_medqa_opt4 \
79
+ --num_train_epochs 6 \
80
+ --per_device_train_batch_size 4 \
81
+ --per_device_eval_batch_size 1 \
82
+ --gradient_accumulation_steps 2 \
83
+ --evaluation_strategy "steps" \
84
+ --eval_steps 150 \
85
+ --save_strategy "steps" \
86
+ --save_steps 300 \
87
+ --save_total_limit 100 \
88
+ --learning_rate 2e-5 \
89
+ --weight_decay 0. \
90
+ --warmup_ratio 0.04 \
91
+ --lr_scheduler_type "cosine" \
92
+ --logging_steps 1 \
93
+ --fsdp "full_shard auto_wrap offload" \
94
+ --fsdp_transformer_layer_cls_to_wrap 'LlamaDecoderLayer' \
95
+ --tf32 True \
96
+ --model_max_length 1024 \
97
+ --gradient_checkpointing True \
98
+ --lazy_preprocess True &>> /workspace/medvicuna/logs/output_vicuna_33b_medqa_opt4.log
99
+
100
+ sleep 120s
101
+
102
+ bash /workspace/medvicuna/push_to_hub.sh https://huggingface.co/s1ghhh/vicuna_33b_medqa_opt4_0725 /workspace/medvicuna/output_vicuna_33b_medqa_opt4 &
103
+
104
+
105
+
106
+ torchrun --nproc_per_node=7 --master_port=20001 fastchat/train/train_eval_mem.py \
107
+ --model_name_or_path lmsys/vicuna-33b-v1.3 \
108
+ --data_path /workspace/medvicuna/33b/medqa_opt4_aug.json \
109
+ --eval_data_path /workspace/medvicuna/33b/opt4_medQA_usmle_dev_plus_1272_vicunatplt.json \
110
+ --cache_dir /workspace/.cache \
111
+ --push_to_hub False \
112
+ --bf16 True \
113
+ --output_dir output_vicuna_33b_medqa_aug_opt4 \
114
+ --num_train_epochs 6 \
115
+ --per_device_train_batch_size 4 \
116
+ --per_device_eval_batch_size 1 \
117
+ --gradient_accumulation_steps 2 \
118
+ --evaluation_strategy "steps" \
119
+ --eval_steps 150 \
120
+ --save_strategy "steps" \
121
+ --save_steps 300 \
122
+ --save_total_limit 100 \
123
+ --learning_rate 2e-5 \
124
+ --weight_decay 0. \
125
+ --warmup_ratio 0.04 \
126
+ --lr_scheduler_type "cosine" \
127
+ --logging_steps 1 \
128
+ --fsdp "full_shard auto_wrap offload" \
129
+ --fsdp_transformer_layer_cls_to_wrap 'LlamaDecoderLayer' \
130
+ --tf32 True \
131
+ --model_max_length 2048 \
132
+ --gradient_checkpointing True \
133
+ --lazy_preprocess True &>> /workspace/medvicuna/logs/output_vicuna_33b_medqa_aug_opt4.log
134
+
135
+ sleep 120s
136
+
137
+ nohup bash /workspace/medvicuna/push_to_hub.sh https://huggingface.co/s1ghhh/vicuna_33b_medqa_opt4_aug_0725 /workspace/medvicuna/output_vicuna_33b_medqa_aug_opt4 &
138
+ #
139
+ #
140
+ #torchrun --nproc_per_node=7 --master_port=20001 fastchat/train/train_eval_mem.py \
141
+ # --model_name_or_path lmsys/vicuna-33b-v1.3 \
142
+ # --data_path /workspace/medvicuna/33b/medvicuna_720k_0722.json \
143
+ # --eval_data_path /workspace/medvicuna/33b/opt4_medQA_usmle_dev_plus_1272_vicunatplt.json \
144
+ # --cache_dir /workspace/.cache \
145
+ # --push_to_hub False \
146
+ # --bf16 True \
147
+ # --output_dir output_vicuna_33b_medqa_opt4 \
148
+ # --num_train_epochs 8 \
149
+ # --per_device_train_batch_size 1 \
150
+ # --per_device_eval_batch_size 1 \
151
+ # --gradient_accumulation_steps 16 \
152
+ # --evaluation_strategy "steps" \
153
+ # --eval_steps 150 \
154
+ # --save_strategy "no" \
155
+ # --save_steps 300 \
156
+ # --save_total_limit 100 \
157
+ # --learning_rate 2e-5 \
158
+ # --weight_decay 0. \
159
+ # --warmup_ratio 0.04 \
160
+ # --lr_scheduler_type "cosine" \
161
+ # --logging_steps 1 \
162
+ # --fsdp "full_shard auto_wrap" \
163
+ # --fsdp_transformer_layer_cls_to_wrap 'LlamaDecoderLayer' \
164
+ # --tf32 True \
165
+ # --model_max_length 2048 \
166
+ # --gradient_checkpointing True \
167
+ # --lazy_preprocess True &>> /workspace/medvicuna/logs/output_vicuna_33b_720k.log
168
+ #
169
+ #
170
+ #nohup bash /workspace/medvicuna/push_to_hub.sh https://huggingface.co/s1ghhh/medvicuna_v1.1_13b_0725 /workspace/medvicuna/output_medvicuna_v1.1_13b &