| #!/bin/bash |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| set -uo pipefail |
|
|
| PROJECT_DIR="/mnt/sfs_turbo_new/R11181/project_vlm" |
| EXP_DIR="${PROJECT_DIR}/exp_v5" |
| LOG_DIR="${EXP_DIR}/logs" |
| mkdir -p "${LOG_DIR}" |
| CONDA_ROOT="/mnt/sfs_turbo/R11181/miniconda3" |
| MODEL_DIR="${PROJECT_DIR}/model" |
| VAL_JSONL="${EXP_DIR}/data/exp1/exp1_val_1160.jsonl" |
|
|
| ts="$(date +%Y%m%d_%H%M%S)" |
|
|
| |
| BASE_OUT_ROOT="${EXP_DIR}/output/base_eval_${ts}" |
| mkdir -p "${BASE_OUT_ROOT}" |
|
|
| |
| run_vllm() { |
| local gpu="$1" model_path="$2" name="$3" gmu="$4" |
| local outdir="${BASE_OUT_ROOT}/${name}" |
| local log="${LOG_DIR}/eval_base_${name}_${ts}.log" |
| mkdir -p "${outdir}" |
| { |
| source "${CONDA_ROOT}/etc/profile.d/conda.sh" |
| conda activate vllm_eval |
| export VLLM_ALLOW_LONG_MAX_MODEL_LEN=1 |
| export PYTHONUNBUFFERED=1 |
| echo "[$(date '+%F %T')] ${name} (BASE) eval on GPU ${gpu} (vLLM, gmu=${gmu})" |
| CUDA_VISIBLE_DEVICES="${gpu}" python3 "${PROJECT_DIR}/eval_vln_vllm.py" \ |
| --model_path "${model_path}" \ |
| --val_path "${VAL_JSONL}" \ |
| --output_dir "${outdir}" \ |
| --gpu_memory_utilization "${gmu}" \ |
| --batch_size 32 \ |
| --save_raw |
| echo "[$(date '+%F %T')] ${name} DONE" |
| } > "${log}" 2>&1 & |
| disown |
| echo "[OK] ${name} (vLLM) on GPU ${gpu}, pid=$!" |
| } |
|
|
| |
| run_transformers() { |
| local gpu="$1" model_path="$2" name="$3" |
| local outdir="${BASE_OUT_ROOT}/${name}" |
| local log="${LOG_DIR}/eval_base_${name}_${ts}.log" |
| mkdir -p "${outdir}" |
| { |
| source "${CONDA_ROOT}/etc/profile.d/conda.sh" |
| conda activate vlm_train |
| export PYTHONUNBUFFERED=1 |
| echo "[$(date '+%F %T')] ${name} (BASE) eval on GPU ${gpu} (transformers)" |
| CUDA_VISIBLE_DEVICES="${gpu}" python3 "${PROJECT_DIR}/eval_vln_transformers.py" \ |
| --model_path "${model_path}" \ |
| --val_path "${VAL_JSONL}" \ |
| --output_dir "${outdir}" \ |
| --model_type internvl |
| echo "[$(date '+%F %T')] ${name} DONE" |
| } > "${log}" 2>&1 & |
| disown |
| echo "[OK] ${name} (transformers) on GPU ${gpu}, pid=$!" |
| } |
|
|
| |
| |
| |
| run_vllm 0 "${MODEL_DIR}/Qwen3.5-0.8B" "Qwen3.5-0.8B-base" 0.7 |
| run_vllm 1 "${MODEL_DIR}/Qwen3.5-2B" "Qwen3.5-2B-base" 0.7 |
| run_vllm 2 "${MODEL_DIR}/Qwen3.5-9B" "Qwen3.5-9B-base" 0.7 |
| run_vllm 3 "${MODEL_DIR}/Qwen3-VL-2B-Instruct" "Qwen3-VL-2B-base" 0.7 |
| run_vllm 4 "${MODEL_DIR}/Qwen3-VL-8B-Instruct" "Qwen3-VL-8B-base" 0.7 |
| run_vllm 5 "${MODEL_DIR}/GLM-4.6V-Flash" "GLM-4.6V-Flash-base" 0.7 |
| run_transformers 6 "${MODEL_DIR}/InternVL3_5-8B-HF" "InternVL3.5-8B-base" |
| run_vllm 7 "${MODEL_DIR}/Gemma-4-E4B-it" "Gemma-4-E4B-base" 0.7 |
|
|
| echo "" |
| echo "============================================================" |
| echo "8 base eval lanes launched." |
| echo "Results dir: ${BASE_OUT_ROOT}" |
| echo "Logs: ${LOG_DIR}/eval_base_*_${ts}.log" |
| echo "============================================================" |
|
|