TeenEmo-Scripts / eqbench-ja-run /serve_judge.sh
YUGOROU's picture
sync: eqbench-ja-run/serve_judge.sh
94e32ba verified
raw
history blame
1.89 kB
#!/usr/bin/env bash
# =============================================================================
# serve_judge.sh — Qwen3.5-35B-A3B(採点者)vLLM サーバー起動
# モデル: Qwen/Qwen3.5-35B-A3B(MoE: 35B全weights/3B active params)
# ポート: 8001
# VRAM使用量: ~70GB(全weights bf16)
#
# ⚠️ メモリ注意:
# A100 80GB での同時起動:
# - TeenEmo (serve_test.sh, GPU_UTIL=0.10) + Judge (GPU_UTIL=0.88) = ~98%
# - OOM する場合は serve_test.sh を停止してからこちらを起動し、
# 順次実行モード(setup_eqbench_run.sh の指示に従う)で対応する
#
# Qwen3.5-35B-A3B は VLM のため --language-model-only が必要
# (pipeline_tag: image-text-to-text, HF: https://huggingface.co/Qwen/Qwen3.5-35B-A3B)
# =============================================================================
set -euo pipefail
JUDGE_MODEL="${JUDGE_MODEL:-Qwen/Qwen3.5-35B-A3B}"
PORT="${JUDGE_PORT:-8001}"
HOST="${VLLM_HOST:-0.0.0.0}"
GPU_UTIL="${JUDGE_GPU_UTIL:-0.88}"
MAX_MODEL_LEN="${JUDGE_MAX_MODEL_LEN:-8192}"
DTYPE="${VLLM_DTYPE:-auto}"
MAX_NUM_SEQS="${VLLM_MAX_NUM_SEQS:-16}"
echo "=== Qwen3.5-35B-A3B 採点者サーバー起動 ==="
echo " モデル : ${JUDGE_MODEL}"
echo " ポート : ${PORT}"
echo " GPU 使用率 : ${GPU_UTIL}"
echo " ⚠️ TeenEmoと同時起動の場合は serve_test.sh の GPU_UTIL=0.10 を確認してください"
echo ""
python -c "import vllm; print(f' vLLM バージョン : {vllm.__version__}')" 2>/dev/null || true
echo ""
exec vllm serve "${JUDGE_MODEL}" \
--host "${HOST}" \
--port "${PORT}" \
--dtype "${DTYPE}" \
--gpu-memory-utilization "${GPU_UTIL}" \
--max-model-len "${MAX_MODEL_LEN}" \
--tensor-parallel-size 1 \
--max-num-seqs "${MAX_NUM_SEQS}" \
--language-model-only \
--enable-prefix-caching \
--trust-remote-code