s1ghhh commited on
Commit
3edac4e
·
1 Parent(s): 58f8d3f

Upload 2 files

Browse files
Files changed (2) hide show
  1. train_13b_0707.sh +29 -0
  2. train_multi_0707.sh +199 -0
train_13b_0707.sh ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ export WANDB_MODE=online
2
+
3
+ torchrun --nproc_per_node=8 --master_port=20001 /workspace/medvicuna/fastchat/train/train_mem.py \
4
+ --model_name_or_path eachadea/vicuna-13b-1.1 \
5
+ --data_path /workspace/medvicuna/medvicuna_v1.1_520k_augAndNoaug.json \
6
+ --cache_dir /workspace/.cache \
7
+ --push_to_hub False \
8
+ --bf16 True \
9
+ --output_dir output_medvicuna_v1.1_13b \
10
+ --num_train_epochs 8 \
11
+ --per_device_train_batch_size 16 \
12
+ --per_device_eval_batch_size 16 \
13
+ --gradient_accumulation_steps 1 \
14
+ --evaluation_strategy "steps" \
15
+ --eval_steps 1150 \
16
+ --save_strategy "steps" \
17
+ --lazy_preprocess True \
18
+ --save_steps 1150 \
19
+ --save_total_limit 100 \
20
+ --learning_rate 2e-5 \
21
+ --weight_decay 0. \
22
+ --warmup_ratio 0.04 \
23
+ --lr_scheduler_type "cosine" \
24
+ --logging_steps 1 \
25
+ --fsdp "full_shard auto_wrap" \
26
+ --fsdp_transformer_layer_cls_to_wrap 'LlamaDecoderLayer' \
27
+ --tf32 True \
28
+ --model_max_length 2048 \
29
+ --gradient_checkpointing True &>> output_medvicuna_v1.1_13b.log
train_multi_0707.sh ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ export WANDB_MODE=online
2
+
3
+ cd /workspace/medvicuna
4
+
5
+ torchrun --nproc_per_node=8 --master_port=20001 /workspace/medvicuna/fastchat/train/train_mem.py \
6
+ --model_name_or_path lmsys/vicuna-33b-v1.3 \
7
+ --data_path /workspace/medvicuna/33b/medqa_opt4.json \
8
+ --cache_dir /workspace/.cache \
9
+ --push_to_hub False \
10
+ --bf16 True \
11
+ --output_dir output_vicuna_33b_medqa_opt4 \
12
+ --num_train_epochs 8 \
13
+ --per_device_train_batch_size 1 \
14
+ --per_device_eval_batch_size 1 \
15
+ --gradient_accumulation_steps 16 \
16
+ --evaluation_strategy "steps" \
17
+ --eval_steps 250 \
18
+ --save_strategy "steps" \
19
+ --save_steps 300 \
20
+ --save_total_limit 100 \
21
+ --learning_rate 2e-5 \
22
+ --weight_decay 0. \
23
+ --warmup_ratio 0.04 \
24
+ --lr_scheduler_type "cosine" \
25
+ --logging_steps 1 \
26
+ --fsdp "full_shard auto_wrap" \
27
+ --fsdp_transformer_layer_cls_to_wrap 'LlamaDecoderLayer' \
28
+ --tf32 True \
29
+ --model_max_length 2048 \
30
+ --gradient_checkpointing True \
31
+ --lazy_preprocess True &>> /workspace/medvicuna/logs/output_vicuna_33b_medqa_opt4.log
32
+
33
+ sleep 120s
34
+
35
+ nohup bash /workspace/medvicuna/push_to_hub.sh https://huggingface.co/s1ghhh/vicuna_33b_medqa_opt4_0707 /workspace/medvicuna/output_vicuna_33b_medqa_opt4 &
36
+
37
+ cd /workspace/medvicuna
38
+
39
+ torchrun --nproc_per_node=8 --master_port=20001 /workspace/medvicuna/fastchat/train/train_mem.py \
40
+ --model_name_or_path alexl83/LLaMA-33B-HF \
41
+ --data_path /workspace/medvicuna/33b/medqa_opt4.json \
42
+ --cache_dir /workspace/.cache \
43
+ --push_to_hub False \
44
+ --bf16 True \
45
+ --output_dir output_llama_33b_medqa_opt4 \
46
+ --num_train_epochs 8 \
47
+ --lazy_preprocess True \
48
+ --per_device_train_batch_size 1 \
49
+ --per_device_eval_batch_size 1 \
50
+ --gradient_accumulation_steps 16 \
51
+ --evaluation_strategy "steps" \
52
+ --eval_steps 250 \
53
+ --save_strategy "steps" \
54
+ --save_steps 300 \
55
+ --save_total_limit 100 \
56
+ --learning_rate 2e-5 \
57
+ --weight_decay 0. \
58
+ --warmup_ratio 0.04 \
59
+ --lr_scheduler_type "cosine" \
60
+ --logging_steps 1 \
61
+ --fsdp "full_shard auto_wrap" \
62
+ --fsdp_transformer_layer_cls_to_wrap 'LlamaDecoderLayer' \
63
+ --tf32 True \
64
+ --model_max_length 2048 \
65
+ --gradient_checkpointing True &>> /workspace/medvicuna/logs/output_llama_33b_medqa_opt4.log
66
+
67
+ sleep 120s
68
+
69
+ nohup bash /workspace/medvicuna/push_to_hub.sh https://huggingface.co/s1ghhh/llama_33b_medqa_opt4_0707 /workspace/medvicuna/output_llama_33b_medqa_opt4 &
70
+
71
+ cd /workspace/medvicuna
72
+
73
+ torchrun --nproc_per_node=8 --master_port=20001 /workspace/medvicuna/fastchat/train/train_mem.py \
74
+ --model_name_or_path lmsys/vicuna-33b-v1.3 \
75
+ --data_path /workspace/medvicuna/33b/medqa_opt4_aug.json \
76
+ --cache_dir /workspace/.cache \
77
+ --push_to_hub False \
78
+ --lazy_preprocess True \
79
+ --bf16 True \
80
+ --output_dir output_vicuna_33b_medqa_opt4_aug \
81
+ --num_train_epochs 8 \
82
+ --per_device_train_batch_size 1 \
83
+ --per_device_eval_batch_size 1 \
84
+ --gradient_accumulation_steps 16 \
85
+ --evaluation_strategy "steps" \
86
+ --eval_steps 250 \
87
+ --save_strategy "steps" \
88
+ --save_steps 300 \
89
+ --save_total_limit 100 \
90
+ --learning_rate 2e-5 \
91
+ --weight_decay 0. \
92
+ --warmup_ratio 0.04 \
93
+ --lr_scheduler_type "cosine" \
94
+ --logging_steps 1 \
95
+ --fsdp "full_shard auto_wrap" \
96
+ --fsdp_transformer_layer_cls_to_wrap 'LlamaDecoderLayer' \
97
+ --tf32 True \
98
+ --model_max_length 2048 \
99
+ --gradient_checkpointing True &>> /workspace/medvicuna/logs/output_vicuna_33b_medqa_opt4_aug.log
100
+
101
+ sleep 120s
102
+
103
+ nohup bash /workspace/medvicuna/push_to_hub.sh https://huggingface.co/s1ghhh/vicuna_33b_medqa_opt4_aug_0707 /workspace/medvicuna/output_vicuna_33b_medqa_opt4_aug &
104
+
105
+ cd /workspace/medvicuna
106
+
107
+ torchrun --nproc_per_node=8 --master_port=20001 /workspace/medvicuna/fastchat/train/train_mem.py \
108
+ --model_name_or_path alexl83/LLaMA-33B-HF \
109
+ --data_path /workspace/medvicuna/33b/medqa_opt4_aug.json \
110
+ --cache_dir /workspace/.cache \
111
+ --push_to_hub False \
112
+ --bf16 True \
113
+ --output_dir output_llama_33b_medqa_opt4_aug \
114
+ --num_train_epochs 8 \
115
+ --lazy_preprocess True \
116
+ --per_device_train_batch_size 1 \
117
+ --per_device_eval_batch_size 1 \
118
+ --gradient_accumulation_steps 16 \
119
+ --evaluation_strategy "steps" \
120
+ --eval_steps 250 \
121
+ --save_strategy "steps" \
122
+ --save_steps 300 \
123
+ --save_total_limit 100 \
124
+ --learning_rate 2e-5 \
125
+ --weight_decay 0. \
126
+ --warmup_ratio 0.04 \
127
+ --lr_scheduler_type "cosine" \
128
+ --logging_steps 1 \
129
+ --fsdp "full_shard auto_wrap" \
130
+ --fsdp_transformer_layer_cls_to_wrap 'LlamaDecoderLayer' \
131
+ --tf32 True \
132
+ --model_max_length 2048 \
133
+ --gradient_checkpointing True &>> /workspace/medvicuna/logs/output_llama_33b_medqa_opt4_aug.log
134
+
135
+ sleep 120s
136
+
137
+ nohup bash /workspace/medvicuna/push_to_hub.sh https://huggingface.co/s1ghhh/llama_33b_medqa_opt4_aug_0707 /workspace/medvicuna/output_llama_33b_medqa_opt4_aug &
138
+
139
+ cd /workspace/medvicuna
140
+
141
+ torchrun --nproc_per_node=8 --master_port=20001 /workspace/medvicuna/fastchat/train/train_mem.py \
142
+ --model_name_or_path eachadea/vicuna-13b-1.1 \
143
+ --data_path /workspace/medvicuna/medvicuna_v1.1_520k_augAndNoaug.json \
144
+ --cache_dir /workspace/.cache \
145
+ --push_to_hub False \
146
+ --bf16 True \
147
+ --lazy_preprocess True \
148
+ --output_dir output_medvicuna_v1.1_13b \
149
+ --num_train_epochs 8 \
150
+ --per_device_train_batch_size 16 \
151
+ --per_device_eval_batch_size 16 \
152
+ --gradient_accumulation_steps 1 \
153
+ --evaluation_strategy "steps" \
154
+ --eval_steps 1150 \
155
+ --save_strategy "steps" \
156
+ --save_steps 1150 \
157
+ --save_total_limit 100 \
158
+ --learning_rate 2e-5 \
159
+ --weight_decay 0. \
160
+ --warmup_ratio 0.04 \
161
+ --lr_scheduler_type "cosine" \
162
+ --logging_steps 1 \
163
+ --fsdp "full_shard auto_wrap" \
164
+ --fsdp_transformer_layer_cls_to_wrap 'LlamaDecoderLayer' \
165
+ --tf32 True \
166
+ --model_max_length 2048 \
167
+ --gradient_checkpointing True &>> /workspace/medvicuna/logs/output_medvicuna_v1.1_13b.log
168
+
169
+
170
+ nohup bash /workspace/medvicuna/push_to_hub.sh https://huggingface.co/s1ghhh/medvicuna_v1.1_13b_0707 /workspace/medvicuna/output_medvicuna_v1.1_13b &
171
+
172
+ cd /workspace/medvicuna
173
+
174
+ torchrun --nproc_per_node=8 --master_port=20001 fastchat/train/train_mem.py \
175
+ --model_name_or_path eachadea/vicuna-7b-1.1 \
176
+ --data_path /workspace/medvicuna/medvicuna_v1.1_520k_augAndNoaug.json \
177
+ --push_to_hub False \
178
+ --bf16 True \
179
+ --output_dir medvicuna_7b_epoch8_test \
180
+ --num_train_epochs 8 \
181
+ --per_device_train_batch_size 32 \
182
+ --per_device_eval_batch_size 16 \
183
+ --gradient_accumulation_steps 1 \
184
+ --evaluation_strategy "epoch" \
185
+ --eval_steps 1894 \
186
+ --save_strategy "no" \
187
+ --save_steps 3787 \
188
+ --save_total_limit 32 \
189
+ --learning_rate 2e-5 \
190
+ --weight_decay 0. \
191
+ --warmup_ratio 0.02 \
192
+ --lr_scheduler_type "cosine" \
193
+ --logging_steps 1 \
194
+ --fsdp "full_shard auto_wrap" \
195
+ --fsdp_transformer_layer_cls_to_wrap 'LlamaDecoderLayer' \
196
+ --tf32 True \
197
+ --model_max_length 2048 \
198
+ --gradient_checkpointing True \
199
+ --lazy_preprocess True &>> medvicuna_7b_epoch8_test.log