YUGOROU commited on
Commit
3a885c1
·
verified ·
1 Parent(s): 77a18ce

sync: eqbench-ja-run/serve_test.sh

Browse files
Files changed (1) hide show
  1. eqbench-ja-run/serve_test.sh +36 -0
eqbench-ja-run/serve_test.sh ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ # =============================================================================
3
+ # serve_test.sh — TeenEmo(受験者)vLLM サーバー起動
4
+ # モデル: YUGOROU/TeenEmo-LFM2.5-1.2B-DPO(LoRAアダプタ)
5
+ # ポート: 8000
6
+ # VRAM使用量: ~3GB(1.2B bf16)
7
+ # =============================================================================
8
+
9
+ set -euo pipefail
10
+
11
+ TEST_MODEL="${TEST_MODEL:-YUGOROU/TeenEmo-LFM2.5-1.2B-DPO}"
12
+ PORT="${TEST_PORT:-8000}"
13
+ HOST="${VLLM_HOST:-0.0.0.0}"
14
+ GPU_UTIL="${TEST_GPU_UTIL:-0.10}" # 1.2Bは軽量なので10%に抑える(judge用にVRAMを残す)
15
+ MAX_MODEL_LEN="${TEST_MAX_MODEL_LEN:-4096}"
16
+ DTYPE="${VLLM_DTYPE:-auto}"
17
+ HF_TOKEN="${HF_TOKEN:-}"
18
+
19
+ echo "=== TeenEmo 受験者サーバー起動 ==="
20
+ echo " モデル : ${TEST_MODEL}"
21
+ echo " ポート : ${PORT}"
22
+ echo " GPU 使用率 : ${GPU_UTIL} (judge用にVRAMを確保)"
23
+ echo ""
24
+
25
+ # LoRAアダプタはHFから自動DL
26
+ exec vllm serve "${TEST_MODEL}" \
27
+ --host "${HOST}" \
28
+ --port "${PORT}" \
29
+ --dtype "${DTYPE}" \
30
+ --gpu-memory-utilization "${GPU_UTIL}" \
31
+ --max-model-len "${MAX_MODEL_LEN}" \
32
+ --tensor-parallel-size 1 \
33
+ --max-num-seqs 16 \
34
+ --enable-prefix-caching \
35
+ --trust-remote-code \
36
+ ${HF_TOKEN:+--hf-token "${HF_TOKEN}"}