| #!/bin/bash |
|
|
| cd /mnt/bn/vl-research/workspace/boli01/zzzprojects/LLaVA |
|
|
| |
| if ! pip show yolk3k > /dev/null 2>&1; then |
| pip install yolk3k |
| fi |
|
|
| |
| installed_version=$(pip show transformers | grep Version | cut -d ' ' -f 2) |
|
|
| |
| latest_version=$(yolk -V transformers | cut -d ' ' -f 2) |
|
|
| |
| if [ "$installed_version" != "$latest_version" ]; then |
| pip install -U transformers |
| fi |
|
|
| |
| installed_version=$(pip show deepspeed | grep Version | cut -d ' ' -f 2) |
|
|
| |
| latest_version=$(yolk -V deepspeed | cut -d ' ' -f 2) |
|
|
| |
| |
| if [ "$installed_version" != "$latest_version" ]; then |
| pip install deepspeed==0.12.2 |
| fi |
|
|
| |
| if ! pip show flash-attn > /dev/null 2>&1; then |
| pip install flash-attn --no-build-isolation |
| fi |
|
|
| |
| PROMPT_VERSION=v1 |
| MODEL_VERSION="vicuna-7b-v1-5" |
| |
|
|
|
|
| |
| PROJECT_NAME="ds_llava-vicuna-7b-v1-5-mlp2x_gelu-pretrain_blip558k_plain" |
|
|
| |
| DATA_NAME="mixtral_instruct_158K_V1" |
|
|
| |
| export WANDB_API_KEY="03fc62d68025c9498cf6493432551badd7d4f953" |
| wandb login $WANDB_API_KEY |
|
|
| export WANDB_NAME=$PROJECT_NAME--$MODEL_VERSION--$DATA_NAME |
|
|
| export WANDB_PROJECT=LLaVA_Mixtral |
|
|
| export WANDB_MODE=online |
|
|
| |
|
|
| deepspeed --master_port 26000 \ |
| llava/train/train_mem.py \ |
| --deepspeed ./scripts/zero2.json \ |
| --model_name_or_path ./checkpoints/$MODEL_VERSION \ |
| --version $PROMPT_VERSION \ |
| --data_path ./playground/data/$DATA_NAME.json \ |
| --image_folder /mnt/bn/vl-research/workspace/boli01/data/playground/data/coco/train2017 \ |
| --vision_tower openai/clip-vit-large-patch14 \ |
| --pretrain_mm_mlp_adapter ./checkpoints/$PROJECT_NAME/mm_projector.bin \ |
| --mm_vision_select_layer -2 \ |
| --mm_projector_type mlp2x_gelu \ |
| --mm_use_im_start_end False \ |
| --mm_use_im_patch_token False \ |
| --bf16 True \ |
| --output_dir ./checkpoints/llava--$PROJECT_NAME--$MODEL_VERSION--$DATA_NAME--finetune \ |
| --num_train_epochs 1 \ |
| --per_device_train_batch_size 16 \ |
| --per_device_eval_batch_size 4 \ |
| --gradient_accumulation_steps 1 \ |
| --evaluation_strategy "no" \ |
| --save_strategy "steps" \ |
| --save_steps 50000 \ |
| --save_total_limit 1 \ |
| --learning_rate 2e-5 \ |
| --weight_decay 0. \ |
| --warmup_ratio 0.03 \ |
| --lr_scheduler_type "cosine" \ |
| --logging_steps 1 \ |
| --tf32 True \ |
| --model_max_length 2048 \ |
| --gradient_checkpointing True \ |
| --dataloader_num_workers 16 \ |
| --lazy_preprocess True \ |
| --report_to wandb |
|
|