sync: eqbench-vast/serve_test_vast.sh
Browse files
eqbench-vast/serve_test_vast.sh
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env bash
|
| 2 |
+
# =============================================================================
|
| 3 |
+
# serve_test_vast.sh — TeenEmo 受験者サーバー (Vast.ai L4/RTX4090/L40S)
|
| 4 |
+
#
|
| 5 |
+
# 今セッションで判明した問題の修正:
|
| 6 |
+
# [Fix-1] --max-lora-rank 32: UnslothのLoRAはrank=32のためデフォルト値16では起動失敗
|
| 7 |
+
# [Fix-2] --max-model-len 32768: EQ-Bench3はmax_tokens=12000を要求するため
|
| 8 |
+
# max_model_len >= 12000 + プロンプト長 が必要
|
| 9 |
+
# [Fix-3] --gpu-memory-utilization 0.95: TeenEmo専用インスタンスのため全GPU投入
|
| 10 |
+
#
|
| 11 |
+
# 採点者はHF Inference Providers (novita/gpt-oss-120b) を使用するため
|
| 12 |
+
# Judge用のVRAMを確保する必要はない。
|
| 13 |
+
#
|
| 14 |
+
# 対象GPU: L4 (24GB) / RTX 4090 (24GB) / L40S (48GB)
|
| 15 |
+
# LFM2.5-1.2B-Base (bf16 ~2.5GB) + LoRA overhead → 24GBで十分
|
| 16 |
+
# =============================================================================
|
| 17 |
+
|
| 18 |
+
set -euo pipefail
|
| 19 |
+
|
| 20 |
+
BASE_MODEL="${TEST_BASE_MODEL:-LiquidAI/LFM2.5-1.2B-Base}"
|
| 21 |
+
LORA_REPO="${TEST_LORA_REPO:-YUGOROU/TeenEmo-LFM2.5-1.2B-DPO}"
|
| 22 |
+
LORA_NAME="${TEST_LORA_NAME:-teenemo-dpo}"
|
| 23 |
+
PORT="${TEST_PORT:-8000}"
|
| 24 |
+
HOST="${VLLM_HOST:-0.0.0.0}"
|
| 25 |
+
# [Fix-3] TeenEmo専用インスタンスのため全GPU投入
|
| 26 |
+
GPU_UTIL="${TEST_GPU_UTIL:-0.95}"
|
| 27 |
+
# [Fix-2] EQ-Bench3がmax_tokens=12000を要求 + プロンプト長を考慮して32768に設定
|
| 28 |
+
MAX_MODEL_LEN="${TEST_MAX_MODEL_LEN:-32768}"
|
| 29 |
+
DTYPE="${VLLM_DTYPE:-auto}"
|
| 30 |
+
HF_TOKEN="${HF_TOKEN:-}"
|
| 31 |
+
|
| 32 |
+
echo "=== TeenEmo 受験者サーバー起動 (Vast.ai 専用) ==="
|
| 33 |
+
echo " ベースモデル : ${BASE_MODEL}"
|
| 34 |
+
echo " LoRAアダプタ : ${LORA_REPO} (name=${LORA_NAME})"
|
| 35 |
+
echo " ポート : ${PORT}"
|
| 36 |
+
echo " GPU 使用率 : ${GPU_UTIL} (TeenEmo専用)"
|
| 37 |
+
echo " 最大コンテキスト: ${MAX_MODEL_LEN} (EQ-Bench3: max_tokens=12000 対応)"
|
| 38 |
+
echo ""
|
| 39 |
+
|
| 40 |
+
python -c "import vllm; print(f' vLLM: {vllm.__version__}')" 2>/dev/null || true
|
| 41 |
+
echo ""
|
| 42 |
+
|
| 43 |
+
exec vllm serve "${BASE_MODEL}" \
|
| 44 |
+
--host "${HOST}" \
|
| 45 |
+
--port "${PORT}" \
|
| 46 |
+
--dtype "${DTYPE}" \
|
| 47 |
+
--gpu-memory-utilization "${GPU_UTIL}" \
|
| 48 |
+
--max-model-len "${MAX_MODEL_LEN}" \
|
| 49 |
+
--tensor-parallel-size 1 \
|
| 50 |
+
--max-num-seqs 16 \
|
| 51 |
+
--enable-prefix-caching \
|
| 52 |
+
--enable-lora \
|
| 53 |
+
--max-lora-rank 32 \
|
| 54 |
+
--lora-modules "${LORA_NAME}=${LORA_REPO}" \
|
| 55 |
+
--trust-remote-code \
|
| 56 |
+
${HF_TOKEN:+--hf-token "${HF_TOKEN}"}
|