| | #!/usr/bin/env bash |
| | set -euo pipefail |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" |
| | ENV_NAME="${TERMINATOR_ENV_NAME:-terminator}" |
| |
|
| | echo "" |
| | echo "=====================================" |
| | echo " Terminator-Qwen3-14B Setup" |
| | echo "=====================================" |
| | echo "" |
| |
|
| | |
| | |
| | |
| |
|
| | |
| | if command -v micromamba &>/dev/null; then |
| | CONDA_CMD="micromamba" |
| | elif command -v conda &>/dev/null; then |
| | CONDA_CMD="conda" |
| | else |
| | echo "ERROR: Neither conda nor micromamba found." |
| | echo "" |
| | echo "Install micromamba:" |
| | echo ' "${SHELL}" <(curl -L micro.mamba.pm/install.sh)' |
| | echo "" |
| | echo "Or install conda:" |
| | echo " https://docs.conda.io/en/latest/miniconda.html" |
| | exit 1 |
| | fi |
| |
|
| | echo "[1/3] Setting up Python environment..." |
| |
|
| | |
| | if $CONDA_CMD env list 2>/dev/null | grep -q "^${ENV_NAME} \|/${ENV_NAME}\$"; then |
| | echo " Environment '${ENV_NAME}' already exists. Activating..." |
| | else |
| | echo " Creating environment '${ENV_NAME}' with Python 3.12..." |
| | $CONDA_CMD create -n "${ENV_NAME}" python=3.12 -y |
| | fi |
| |
|
| | |
| | run_in_env() { |
| | if [ "$CONDA_CMD" = "micromamba" ]; then |
| | micromamba run -n "${ENV_NAME}" "$@" |
| | else |
| | |
| | (eval "$(conda shell.bash hook 2>/dev/null)" && conda activate "${ENV_NAME}" && "$@") |
| | fi |
| | } |
| |
|
| | echo " Python: $(run_in_env python --version)" |
| |
|
| | |
| | |
| | |
| |
|
| | echo "" |
| | echo "[2/3] Installing packages..." |
| |
|
| | echo " Installing uv..." |
| | run_in_env pip install --upgrade uv --quiet |
| |
|
| | echo " Installing vllm (this may take a few minutes)..." |
| | run_in_env uv pip install vllm --torch-backend=auto |
| |
|
| | echo " Installing openai (for client)..." |
| | run_in_env uv pip install openai |
| |
|
| | echo " Installing accelerate (for HF inference)..." |
| | run_in_env uv pip install accelerate |
| |
|
| | echo " Done." |
| |
|
| | |
| | |
| | |
| |
|
| | echo "" |
| | echo "[3/3] Setting up model directory..." |
| | echo " This downloads Qwen3-14B base weights (~28GB) from HuggingFace." |
| | echo " (Skipped if already cached.)" |
| | echo "" |
| |
|
| | cd "$SCRIPT_DIR" |
| | run_in_env python setup_model_dir.py |
| |
|
| | |
| | |
| | |
| |
|
| | echo "" |
| | echo "=====================================" |
| | echo " Setup Complete!" |
| | echo "=====================================" |
| | echo "" |
| | echo "To start the server:" |
| | echo " $CONDA_CMD activate ${ENV_NAME}" |
| | echo " cd $SCRIPT_DIR" |
| | echo " ./start_server.sh" |
| | echo "" |
| | echo "Then in another terminal:" |
| | echo " $CONDA_CMD activate ${ENV_NAME}" |
| | echo " cd $SCRIPT_DIR" |
| | echo " python client.py --interactive" |
| | echo "" |
| | echo "See README.md for configuration options (GPU memory, context length, etc.)" |
| |
|