fix: sync eqbench-ja-run/serve_test.sh
Browse files- eqbench-ja-run/serve_test.sh +15 -7
eqbench-ja-run/serve_test.sh
CHANGED
|
@@ -1,29 +1,35 @@
|
|
| 1 |
#!/usr/bin/env bash
|
| 2 |
# =============================================================================
|
| 3 |
# serve_test.sh — TeenEmo(受験者)vLLM サーバー起動
|
| 4 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
# ポート: 8000
|
| 6 |
-
# VRAM使用量: ~3GB(1.2B bf16)
|
| 7 |
# =============================================================================
|
| 8 |
|
| 9 |
set -euo pipefail
|
| 10 |
|
| 11 |
-
|
|
|
|
|
|
|
| 12 |
PORT="${TEST_PORT:-8000}"
|
| 13 |
HOST="${VLLM_HOST:-0.0.0.0}"
|
| 14 |
-
GPU_UTIL="${TEST_GPU_UTIL:-0.10}" # 1.2Bは軽量
|
| 15 |
MAX_MODEL_LEN="${TEST_MAX_MODEL_LEN:-4096}"
|
| 16 |
DTYPE="${VLLM_DTYPE:-auto}"
|
| 17 |
HF_TOKEN="${HF_TOKEN:-}"
|
| 18 |
|
| 19 |
echo "=== TeenEmo 受験者サーバー起動 ==="
|
| 20 |
-
echo " モデル
|
|
|
|
| 21 |
echo " ポート : ${PORT}"
|
| 22 |
echo " GPU 使用率 : ${GPU_UTIL} (judge用にVRAMを確保)"
|
| 23 |
echo ""
|
| 24 |
|
| 25 |
-
|
| 26 |
-
exec vllm serve "${TEST_MODEL}" \
|
| 27 |
--host "${HOST}" \
|
| 28 |
--port "${PORT}" \
|
| 29 |
--dtype "${DTYPE}" \
|
|
@@ -32,5 +38,7 @@ exec vllm serve "${TEST_MODEL}" \
|
|
| 32 |
--tensor-parallel-size 1 \
|
| 33 |
--max-num-seqs 16 \
|
| 34 |
--enable-prefix-caching \
|
|
|
|
|
|
|
| 35 |
--trust-remote-code \
|
| 36 |
${HF_TOKEN:+--hf-token "${HF_TOKEN}"}
|
|
|
|
| 1 |
#!/usr/bin/env bash
|
| 2 |
# =============================================================================
|
| 3 |
# serve_test.sh — TeenEmo(受験者)vLLM サーバー起動
|
| 4 |
+
#
|
| 5 |
+
# TeenEmo は LiquidAI/LFM2.5-1.2B-Base の LoRAアダプタ(88.9MB)のみを
|
| 6 |
+
# HFに保存しているため、vLLMはベースモデルを指定し --enable-lora で
|
| 7 |
+
# アダプタを動的にロードする形式を使用する。
|
| 8 |
+
#
|
| 9 |
# ポート: 8000
|
| 10 |
+
# VRAM使用量: ~3GB(1.2B bf16、GPU_UTIL=0.10で残りをJudge用に確保)
|
| 11 |
# =============================================================================
|
| 12 |
|
| 13 |
set -euo pipefail
|
| 14 |
|
| 15 |
+
BASE_MODEL="${TEST_BASE_MODEL:-LiquidAI/LFM2.5-1.2B-Base}"
|
| 16 |
+
LORA_REPO="${TEST_LORA_REPO:-YUGOROU/TeenEmo-LFM2.5-1.2B-DPO}"
|
| 17 |
+
LORA_NAME="${TEST_LORA_NAME:-teenemo-dpo}"
|
| 18 |
PORT="${TEST_PORT:-8000}"
|
| 19 |
HOST="${VLLM_HOST:-0.0.0.0}"
|
| 20 |
+
GPU_UTIL="${TEST_GPU_UTIL:-0.10}" # 1.2Bは軽量。judge用にVRAMを残す
|
| 21 |
MAX_MODEL_LEN="${TEST_MAX_MODEL_LEN:-4096}"
|
| 22 |
DTYPE="${VLLM_DTYPE:-auto}"
|
| 23 |
HF_TOKEN="${HF_TOKEN:-}"
|
| 24 |
|
| 25 |
echo "=== TeenEmo 受験者サーバー起動 ==="
|
| 26 |
+
echo " ベースモデル : ${BASE_MODEL}"
|
| 27 |
+
echo " LoRAアダプタ : ${LORA_REPO} (name=${LORA_NAME})"
|
| 28 |
echo " ポート : ${PORT}"
|
| 29 |
echo " GPU 使用率 : ${GPU_UTIL} (judge用にVRAMを確保)"
|
| 30 |
echo ""
|
| 31 |
|
| 32 |
+
exec vllm serve "${BASE_MODEL}" \
|
|
|
|
| 33 |
--host "${HOST}" \
|
| 34 |
--port "${PORT}" \
|
| 35 |
--dtype "${DTYPE}" \
|
|
|
|
| 38 |
--tensor-parallel-size 1 \
|
| 39 |
--max-num-seqs 16 \
|
| 40 |
--enable-prefix-caching \
|
| 41 |
+
--enable-lora \
|
| 42 |
+
--lora-modules "${LORA_NAME}=${LORA_REPO}" \
|
| 43 |
--trust-remote-code \
|
| 44 |
${HF_TOKEN:+--hf-token "${HF_TOKEN}"}
|