File size: 4,054 Bytes
f4dcc30
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46caca0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f4dcc30
 
 
46caca0
f4dcc30
 
 
 
 
 
 
46caca0
 
 
f4dcc30
 
 
46caca0
f4dcc30
 
 
 
 
 
 
46caca0
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
# BASE_MODEL="facebook/opt-125m" #"meta-llama/Llama-2-7b-hf   facebook/opt-125m
BASE_MODEL="meta-llama/Llama-2-7b-hf"
# DATA_PATH="./data/MetaMathQA.json"
DATA_PATH="./data/MetaMathQA-40K.json"
OUTPUT="output/cp3e5"

export WANDB_PROJECT="HRA_MetaMath395"

# python finetune_32.py \
#     --model_name_or_path $BASE_MODEL \
#     --output_dir $OUTPUT \
#     --hrft_r 32 \
#     --init_a 1e-4 \
#     --eps 1e-4 \
#     --add_orth "none" \
#     --lamda 1e-4 \
#     --data_path $DATA_PATH \
#     --dataset_split "train[:100000]"\
#     --dataset_field query response \
#     --num_train_epochs 2 \
#     --per_device_train_batch_size 8 \
#     --gradient_accumulation_steps 4 \
#     --save_strategy "steps" \
#     --save_steps 0 \
#     --save_total_limit 1 \
#     --learning_rate 3e-5 \
#     --weight_decay 0. \
#     --warmup_ratio 0.005 \
#     --lr_scheduler_type "cosine" \
#     --logging_steps 1000 \
#     --bf16 True \ 
#     --tf32 True \
#     --report_to "none" \
# wandb sync wandb/latest-run

# OUTPUT="output/cp1e5N"
# python finetune_32.py \
#     --model_name_or_path $BASE_MODEL \
#     --output_dir $OUTPUT \
#     --hrft_r 32 \
#     --init_a 1e-4 \
#     --eps 1e-4 \
#     --add_orth "none" \
#     --lamda 1e-4 \
#     --data_path $DATA_PATH \
#     --dataset_split "train[:100000]"\
#     --dataset_field query response \
#     --num_train_epochs 2 \
#     --per_device_train_batch_size 8 \
#     --gradient_accumulation_steps 4 \
#     --save_strategy "steps" \
#     --save_steps 0 \
#     --save_total_limit 1 \
#     --learning_rate 1e-5 \
#     --weight_decay 0. \
#     --warmup_ratio 0.005 \
#     --lr_scheduler_type "cosine" \
#     --logging_steps 1000 \
#     --bf16 True \
#     --tf32 True \
#     --report_to "wandb"
# wandb sync wandb/latest-run

# OUTPUT="output/cpr1"
# python finetune_32.py \
#     --model_name_or_path $BASE_MODEL \
#     --output_dir $OUTPUT \
#     --hrft_r 1 \
#     --init_a 1e-4 \
#     --eps 1e-4 \
#     --add_orth "none" \
#     --lamda 1e-4 \
#     --data_path $DATA_PATH \
#     --dataset_split "train"\
#     --dataset_field query response \
#     --num_train_epochs 2 \
#     --per_device_train_batch_size 32 \
#     --gradient_accumulation_steps 1 \
#     --save_strategy "steps" \
#     --save_steps 0 \
#     --save_total_limit 1 \
#     --learning_rate 3e-5 \
#     --weight_decay 0. \
#     --warmup_ratio 0.005 \
#     --lr_scheduler_type "cosine" \
#     --logging_steps 1000 \
#     --bf16 True \
#     --tf32 True \
#     --report_to "wandb"
# wandb sync wandb/latest-run

# OUTPUT="output/cpr2"
# python finetune_32.py \
#     --model_name_or_path $BASE_MODEL \
#     --output_dir $OUTPUT \
#     --hrft_r 1 \
#     --init_a 1e-4 \
#     --eps 1e-4 \
#     --add_orth "none" \
#     --lamda 1e-4 \
#     --data_path $DATA_PATH \
#     --dataset_split "train"\
#     --dataset_field query response \
#     --num_train_epochs 3 \
#     --per_device_train_batch_size 32 \
#     --gradient_accumulation_steps 1 \
#     --save_strategy "steps" \
#     --save_steps 0 \
#     --save_total_limit 1 \
#     --learning_rate 3e-5 \
#     --weight_decay 0. \
#     --warmup_ratio 0.005 \
#     --lr_scheduler_type "cosine" \
#     --logging_steps 200 \
#     --bf16 True \
#     --tf32 True \
#     --report_to "wandb"
# wandb sync wandb/latest-run

OUTPUT="output/cms3"
python finetune_32.py \
    --model_name_or_path $BASE_MODEL \
    --output_dir $OUTPUT \
    --hrft_r 32 \
    --init_a 1e-4 \
    --eps 1e-4 \
    --add_orth "none" \
    --lamda 1e-4 \
    --data_path $DATA_PATH \
    --dataset_split "train"\
    --dataset_field query response \
    --num_train_epochs 2 \
    --per_device_train_batch_size 8 \
    --gradient_accumulation_steps 4 \
    --save_strategy "steps" \
    --save_steps 0 \
    --save_total_limit 1 \
    --learning_rate 1e-5 \
    --weight_decay 0. \
    --warmup_ratio 0.005 \
    --lr_scheduler_type "cosine" \
    --logging_steps 200 \
    --bf16 True \
    --tf32 True \
    --report_to "wandb"
date +"%F %T"
# wandb sync wandb/latest-run