File size: 2,597 Bytes
1fd72d1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
# setup.ps1 β€” GPU-first installer for Mirror Pond (Windows, NVIDIA CUDA)

param(
    [string]$ModelPath = ".\your_model.gguf",
    [int]$Port = 7777,
    [int]$GpuLayers = -1
)

Write-Host "πŸͺž Mirror Pond β€” Windows GPU Installer" -ForegroundColor Green
Write-Host "Model: $ModelPath"
Write-Host "Port : $Port"
Write-Host "GPU  : $GpuLayers layers (-1 = as many as possible)"
Write-Host ""

if (-not (Get-Command python -ErrorAction SilentlyContinue)) {
    Write-Host "❌ Python not found. Please install Python 3.9+ and ensure 'python' is in PATH." -ForegroundColor Red
    exit 1
}

Write-Host "πŸ“¦ Creating virtualenv .venv..." -ForegroundColor Cyan
python -m venv .venv

$venvActivation = ".\venv\Scripts\Activate.ps1"
if (-not (Test-Path $venvActivation)) {
    $venvActivation = ".\.venv\Scripts\Activate.ps1"
}
if (-not (Test-Path $venvActivation)) {
    Write-Host "❌ Could not find virtualenv activation script." -ForegroundColor Red
    exit 1
}

Write-Host "πŸ“¦ Activating venv..." -ForegroundColor Cyan
. $venvActivation

Write-Host "⬆️  Upgrading pip..." -ForegroundColor Cyan
pip install --upgrade pip

if (-not (Test-Path ".\requirements.txt")) {
    Write-Host "❌ requirements.txt missing in current directory." -ForegroundColor Red
    exit 1
}

Write-Host "πŸ“₯ Installing base dependencies from requirements.txt..." -ForegroundColor Cyan
pip install -r requirements.txt

Write-Host "🧠 Enforcing GPU build of llama.cpp (CUDA 12.1 wheel if possible)..." -ForegroundColor Cyan

# Remove any existing CPU build
pip uninstall -y llama-cpp-python | Out-Null

# Use official CUDA 12.1 wheel index
$env:CMAKE_ARGS = "-DGGML_CUDA=on"

try {
    pip install --force-reinstall --no-cache-dir `
        llama-cpp-python `
        --extra-index-url https://abetlen.github.io/llama-cpp-python/whl/cu121
    Write-Host "βœ… Installed llama-cpp-python with CUDA (cu121 wheel)." -ForegroundColor Green
}
catch {
    Write-Host "⚠️ Failed to install CUDA wheel; falling back to CPU build." -ForegroundColor Yellow
    pip install --force-reinstall --no-cache-dir llama-cpp-python
}

Write-Host ""
Write-Host "✨ Setup complete." -ForegroundColor Green
Write-Host "You can run manually later with:" -ForegroundColor Green
Write-Host "  .\.venv\Scripts\activate" -ForegroundColor Yellow
Write-Host "  python mirror_pond.py --model `"$ModelPath`" --port $Port --gpu-layers $GpuLayers" -ForegroundColor Yellow
Write-Host ""
Write-Host "πŸš€ Launching Mirror Pond now..." -ForegroundColor Green

python mirror_pond.py --model "$ModelPath" --port $Port --gpu-layers $GpuLayers