| set -euo pipefail | |
| # Config | |
| : "${MODEL_PATH:=/data/adaptai/platform/aiml/checkpoints/qwen3-8b-elizabeth-sft}" | |
| : "${HF_ORG:=LevelUp2x}" | |
| : "${MODEL_NAME:=qwen3-8b-elizabeth-checkpoints}" | |
| : "${PREFER_HF:=0}" | |
| echo "[entrypoint] Starting Elizabeth vLLM container" | |
| echo "[entrypoint] MODEL_PATH=${MODEL_PATH} PREFER_HF=${PREFER_HF} HF_ORG=${HF_ORG} MODEL_NAME=${MODEL_NAME}" | |
| # Ensure caches | |
| mkdir -p "$HF_HOME" "$HF_MODULES_CACHE" || true | |
| # If model not present locally, try to seed | |
| if [ ! -f "$MODEL_PATH/model.safetensors.index.json" ] && [ ! -f "$MODEL_PATH/tokenizer.json" ]; then | |
| echo "[entrypoint] MODEL_PATH missing expected files; attempting to seed" | |
| if [ -n "${SEED_HOST:-}" ]; then | |
| echo "[entrypoint] Seeding from SEED_HOST=$SEED_HOST" | |
| rsync -aH --partial --progress "${SEED_HOST}:${MODEL_PATH%/*}/" "${MODEL_PATH%/*}/" || true | |
| fi | |
| fi | |
| # If still missing and we prefer HF, download | |
| if [ ! -f "$MODEL_PATH/model.safetensors.index.json" ] && [ "$PREFER_HF" = "1" ]; then | |
| if [ -z "${HF_TOKEN:-${HUGGING_FACE_API_KEY:-}}" ]; then | |
| echo "[entrypoint] ERROR: HF_TOKEN or HUGGING_FACE_API_KEY must be set to download from private HF repos" >&2 | |
| exit 1 | |
| fi | |
| echo "[entrypoint] Downloading from HF: ${HF_ORG}/${MODEL_NAME}" | |
| mkdir -p "$MODEL_PATH" | |
| hf download "${HF_ORG}/${MODEL_NAME}" --repo-type model --include '**' --local-dir "$MODEL_PATH" --revision main --token "${HF_TOKEN:-${HUGGING_FACE_API_KEY}}" | |
| fi | |
| echo "[entrypoint] Launching vLLM" | |
| exec bash ./serve_elizabeth_vllm.sh | |