| set -euo pipefail | |
| # Usage: | |
| # HF_HOME=/data/adaptai/hf_cache \ | |
| # ./scripts/vllm_launch.sh /data/models/Qwen/Qwen3-4B-Instruct-2507 qwen3_4b_ins_2507 18083 "--gpu-memory-utilization 0.90 --trust-remote-code" | |
| MODEL_PATH=${1:?model path} | |
| ALIAS=${2:?served alias} | |
| PORT=${3:?port} | |
| EXTRA_ARGS=${4:-"--gpu-memory-utilization 0.90 --trust-remote-code"} | |
| HF_HOME=${HF_HOME:-/data/adaptai/hf_cache} | |
| TRANSFORMERS_CACHE=${TRANSFORMERS_CACHE:-$HF_HOME} | |
| CMD=(/usr/bin/python3 -m vllm.entrypoints.openai.api_server \ | |
| --model "$MODEL_PATH" \ | |
| --served-model-name "$ALIAS" \ | |
| --host 127.0.0.1 --port "$PORT" \ | |
| ) | |
| # shellcheck disable=SC2206 | |
| CMD+=( $EXTRA_ARGS ) | |
| echo "[vllm] starting $ALIAS on :$PORT using $MODEL_PATH" | |
| env HF_HOME="$HF_HOME" TRANSFORMERS_CACHE="$TRANSFORMERS_CACHE" nohup "${CMD[@]}" >"/tmp/vllm-$ALIAS.log" 2>&1 & | |
| sleep 2 | |
| pgrep -fl "openai.api_server.*--served-model-name $ALIAS" || true | |