|
|
#!/usr/bin/env bash |
|
|
|
|
|
|
|
|
set -e |
|
|
|
|
|
MODEL_PATH="${1:-./your_model.gguf}" |
|
|
PORT="${2:-7777}" |
|
|
GPU_LAYERS="${3:--1}" |
|
|
|
|
|
echo "πͺ Mirror Pond β Linux/macOS GPU Installer" |
|
|
echo "Model: $MODEL_PATH" |
|
|
echo "Port : $PORT" |
|
|
echo "GPU : $GPU_LAYERS layers (-1 = as many as possible)" |
|
|
echo "" |
|
|
|
|
|
if ! command -v python3 >/dev/null 2>&1; then |
|
|
echo "β Python3 not found. Please install Python 3.9+." |
|
|
exit 1 |
|
|
fi |
|
|
|
|
|
echo "π¦ Creating virtualenv .venv..." |
|
|
python3 -m venv .venv |
|
|
|
|
|
echo "π¦ Activating venv..." |
|
|
|
|
|
source .venv/bin/activate |
|
|
|
|
|
echo "β¬οΈ Upgrading pip..." |
|
|
pip install --upgrade pip |
|
|
|
|
|
if [ ! -f requirements.txt ]; then |
|
|
echo "β requirements.txt missing in current directory." |
|
|
exit 1 |
|
|
fi |
|
|
|
|
|
echo "π₯ Installing base dependencies from requirements.txt..." |
|
|
pip install -r requirements.txt |
|
|
|
|
|
echo "π§ Enforcing GPU build of llama.cpp (CUDA 12.1 wheel if possible)..." |
|
|
|
|
|
pip uninstall -y llama-cpp-python || true |
|
|
|
|
|
|
|
|
export CMAKE_ARGS="-DGGML_CUDA=on" |
|
|
|
|
|
if pip install --force-reinstall --no-cache-dir \ |
|
|
llama-cpp-python \ |
|
|
--extra-index-url https://abetlen.github.io/llama-cpp-python/whl/cu121; then |
|
|
echo "β
Installed llama-cpp-python with CUDA (cu121 wheel)." |
|
|
else |
|
|
echo "β οΈ Failed to install CUDA wheel; falling back to CPU build." |
|
|
pip install --force-reinstall --no-cache-dir llama-cpp-python |
|
|
fi |
|
|
|
|
|
echo "" |
|
|
echo "β¨ Setup complete." |
|
|
echo "You can run manually later with:" |
|
|
echo " source .venv/bin/activate" |
|
|
echo " python mirror_pond.py --model \"$MODEL_PATH\" --port $PORT --gpu-layers $GPU_LAYERS" |
|
|
echo "" |
|
|
echo "π Launching Mirror Pond now..." |
|
|
python mirror_pond.py \ |
|
|
--model "$MODEL_PATH" \ |
|
|
--port "$PORT" \ |
|
|
--gpu-layers "$GPU_LAYERS" |
|
|
|