export WANDB_MODE=disabled export CUDA_LAUNCH_BLOCKING=0 DATASET=Instruments CKPT_PATH=/datain/v-yinju/rq-llama/v3-train/Instruments/item_only_dv DATA_PATH=/datain/v-yinju/rqvae-zzx/data OUTPUT_DIR=$CKPT_PATH/finetune_0817 torchrun --nproc_per_node=8 fine-tune.py \ --ckpt_path $CKPT_PATH \ --output_dir $OUTPUT_DIR \ --dataset $DATASET \ --data_path $DATA_PATH \ --per_device_batch_size 6 \ --gradient_accumulation_steps 2 \ --learning_rate 5e-4 \ --epochs 20 \ --weight_decay 0.01 \ --save_and_eval_strategy epoch \ --fp16 \ --deepspeed ./config/ds_z2_fp16.json \ --dataloader_num_workers 4 \ --only_train_response \ --re_index 29 \ --tasks seqrec,itemsearch,preferenceobtain,item2index,index2item,fusionseqrec \ --train_prompt_sample_num 1,1,1,1,1,1 \ --train_data_sample_num 0,0,0,0,0,0 \ --index_file $CKPT_PATH/indices.json cd convert nohup ./convert.sh $OUTPUT_DIR >convert.log 2>&1 & cd .. DATASET=Instruments BASE_MODEL=/datain/v-yinju/llama-7b DATA_PATH=/datain/v-yinju/rqvae-zzx/data CKPT_PATH=/datain/v-yinju/rq-llama/v3-train/Instruments/item_only_dv/finetune_0817 RESULTS_FILE=$CKPT_PATH/eval_result.json torchrun --nproc_per_node=8 evaluate-finetuned.py \ --base_model $BASE_MODEL \ --ckpt_path $CKPT_PATH \ --dataset $DATASET \ --data_path $DATA_PATH \ --results_file $RESULTS_FILE \ --test_batch_size 1 \ --num_beams 20 \ --test_prompt_ids all \ --test_task seqrec \ --index_file /datain/v-yinju/rq-llama/v3-train/Instruments/item_only_dv/indices.json cd ~/rq-llama source instruments_train.sh