s1ghhh commited on
Commit
a248b8d
·
1 Parent(s): e8dac4c

Upload train_v0.4_0629_steps.sh

Browse files
Files changed (1) hide show
  1. train_v0.4_0629_steps.sh +55 -0
train_v0.4_0629_steps.sh ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ export WANDB_MODE=online
2
+
3
+ torchrun --nproc_per_node=8 --master_port=20001 /workspace/medvicuna/fastchat/train/train_mem.py \
4
+ --model_name_or_path eachadea/vicuna-7b-1.1 \
5
+ --data_path /workspace/medvicuna/medvicuna_v0.4_590k.json \
6
+ --push_to_hub False \
7
+ --bf16 True \
8
+ --output_dir output_medvicuna_v0.4_7b \
9
+ --num_train_epochs 8 \
10
+ --per_device_train_batch_size 32 \
11
+ --per_device_eval_batch_size 16 \
12
+ --gradient_accumulation_steps 1 \
13
+ --evaluation_strategy "steps" \
14
+ --eval_steps 1150 \
15
+ --save_strategy "steps" \
16
+ --save_steps 1150 \
17
+ --save_total_limit 100 \
18
+ --learning_rate 2e-5 \
19
+ --weight_decay 0. \
20
+ --warmup_ratio 0.04 \
21
+ --lr_scheduler_type "cosine" \
22
+ --logging_steps 1 \
23
+ --fsdp "full_shard auto_wrap" \
24
+ --fsdp_transformer_layer_cls_to_wrap 'LlamaDecoderLayer' \
25
+ --tf32 True \
26
+ --model_max_length 2048 \
27
+ --gradient_checkpointing True &>> output_medvicuna_v0.4_7b.log
28
+
29
+ sleep 1200s
30
+
31
+ torchrun --nproc_per_node=8 --master_port=20001 /workspace/medvicuna/fastchat/train/train_mem.py \
32
+ --model_name_or_path eachadea/vicuna-13b-1.1 \
33
+ --data_path /workspace/medvicuna/medvicuna_v0.4_590k.json \
34
+ --push_to_hub False \
35
+ --bf16 True \
36
+ --output_dir output_medvicuna_v0.4_13b \
37
+ --num_train_epochs 8 \
38
+ --per_device_train_batch_size 32 \
39
+ --per_device_eval_batch_size 16 \
40
+ --gradient_accumulation_steps 1 \
41
+ --evaluation_strategy "steps" \
42
+ --eval_steps 1150 \
43
+ --save_strategy "steps" \
44
+ --save_steps 1150 \
45
+ --save_total_limit 100 \
46
+ --learning_rate 2e-5 \
47
+ --weight_decay 0. \
48
+ --warmup_ratio 0.04 \
49
+ --lr_scheduler_type "cosine" \
50
+ --logging_steps 1 \
51
+ --fsdp "full_shard auto_wrap" \
52
+ --fsdp_transformer_layer_cls_to_wrap 'LlamaDecoderLayer' \
53
+ --tf32 True \
54
+ --model_max_length 2048 \
55
+ --gradient_checkpointing True &>> output_medvicuna_v0.4_13b.log