File size: 1,834 Bytes
f4d4c5c | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 | #!/bin/bash
export OMP_NUM_THREADS=20
export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
# 定义参数
lr=1e-5
base=QwQ-32B
tokenizer=QwQ-32B
# train_data=hopotqa_1217.json
train_data=no_error_data_871
bsz=1
acc=8
# 生成随机 JOB-ID
JOB_ID=$(( RANDOM % 100000 )) # 生成一个 0 到 99999 的随机数
save_path="JOB:${JOB_ID}#LR:${lr}#BASE:${base}#TOKEN:${tokenizer}#BSZ:${bsz}#ACC:${acc}_${train_data}_mixed_math"
# 输出路径
output_dir="/capacity/userdata/ss/sft_search/${save_path}"
output_dir_1=${output_dir}
model_name_1=${base}
dataset_1=${train_data}
# 创建输出目录
mkdir -p "$output_dir"
echo ${output_dir}
# 执行 deepspeed 命令
/opt/aps/workdir/miniforge3/envs/ss_train/bin/deepspeed \
--hostfile=hostfile \
--no_ssh \
--node_rank=0 \
--master_addr=172.19.164.116 \
--master_port=9944 \
sft_2_math.py \
--deepspeed ds_zero3_offload.json \
--model_name_or_path "/capacity/userdata/models/${base}" \
--tokenizer_name_or_path "/capacity/userdata/models/${tokenizer}" \
--do_train \
--save_safetensors true \
--data_path "/opt/aps/workdir/sunshuang/deep_search/search_o1/sft_data/${train_data}.json" \
--lr_scheduler_type cosine \
--output_dir "$output_dir" \
--overwrite_output_dir \
--warmup_ratio 0.03 \
--gradient_checkpointing true \
--per_device_train_batch_size "$bsz" \
--gradient_accumulation_steps "$acc" \
--logging_steps 1 \
--learning_rate "$lr" \
--num_train_epochs 6 \
--save_strategy epoch \
--save_only_model true \
--model_max_length 30000 \
--save_total_limit 6 \
--bf16 || exit 1
bash test_two_model_qwq.sh $output_dir_1 $model_name_1 $dataset_1
# bash test.sh $output_dir_2 $model_name_2
# bash test.sh $output_dir_3 $model_name_3
# bash test.sh $output_dir_2 $model_name_2
|