| |
| set -x |
|
|
| nnodes=1 |
| num_gpus=8 |
|
|
| DATA_ROOT='/jizhicfs/bojoli' |
|
|
| DATA_PATH=${DATA_ROOT}/llava-pretrain/blip_laion_cc_sbu_558k.json |
| IMAGE_FOLDER=${DATA_ROOT}/llava-pretrain/images |
|
|
|
|
|
|
| VISION_TOWER_NAME='clip-vit-large-patch14-336' |
| MODEL_NAME='vicuna-7b-1.5' |
|
|
| VISION_TOWER=${DATA_ROOT}/${VISION_TOWER_NAME} |
| MODEL_PATH=${DATA_ROOT}/${MODEL_NAME} |
| RUN_NAME='final_mmpe'_${MODEL_NAME}'_'${VISION_TOWER_NAME} |
|
|
| deepspeed --num_nodes ${nnodes} --num_gpus ${num_gpus} --master_port=10277 llava/train/train_mem.py \ |
| --deepspeed ./scripts/zero2.json \ |
| --model_name_or_path ${MODEL_PATH} \ |
| --version plain \ |
| --data_path ${DATA_PATH} \ |
| --image_folder ${IMAGE_FOLDER} \ |
| --vision_tower ${VISION_TOWER} \ |
| --mm_projector_type mlp2x_gelu \ |
| --tune_mm_mlp_adapter True \ |
| --unfreeze_mm_vision_tower False \ |
| --mm_vision_select_layer -2 \ |
| --mm_use_im_start_end False \ |
| --mm_use_im_patch_token False \ |
| --mm_patch_merge_type spatial_unpad \ |
| --image_aspect_ratio anyres \ |
| --group_by_modality_length False \ |
| --bf16 True \ |
| --output_dir ./checkpoints/${RUN_NAME} \ |
| --num_train_epochs 1 \ |
| --per_device_train_batch_size 8 \ |
| --per_device_eval_batch_size 4 \ |
| --gradient_accumulation_steps 4 \ |
| --evaluation_strategy "no" \ |
| --use_mmpe True \ |
| --save_strategy "steps" \ |
| --save_steps 24000 \ |
| --save_total_limit 1 \ |
| --learning_rate 1e-3 \ |
| --weight_decay 0. \ |
| --warmup_ratio 0.03 \ |
| --lr_scheduler_type "cosine" \ |
| --logging_steps 1 \ |
| --tf32 True \ |
| --image_grid_pinpoints "[(672,672),(336, 672), (672, 336), (336, 1008),(1008,336),(336,1344),(1344,336)]" \ |
| --model_max_length 4096 \ |
| --gradient_checkpointing True \ |
| --dataloader_num_workers 4 \ |
| --lazy_preprocess True \ |
| --report_to none \ |
| --run_name ${RUN_NAME} |
|
|