| | #!/bin/bash |
| | set -e |
| |
|
| | export OLLAMA_HOST=127.0.0.1:11434 |
| | export OLLAMA_MODELS=/home/user/.ollama/models |
| |
|
| | echo "==> Starting Ollama..." |
| | ollama serve & |
| |
|
| | |
| | echo "==> Waiting for Ollama to be ready..." |
| | MAX_RETRIES=30 |
| | COUNT=0 |
| | until curl -s http://127.0.0.1:11434/api/version > /dev/null 2>&1; do |
| | COUNT=$((COUNT + 1)) |
| | if [ $COUNT -ge $MAX_RETRIES ]; then |
| | echo "ERROR: Ollama did not start." |
| | exit 1 |
| | fi |
| | echo " ... attempt $COUNT/$MAX_RETRIES" |
| | sleep 2 |
| | done |
| | echo "==> Ollama ready!" |
| |
|
| | |
| | echo "==> Pulling deepseek-r1:latest..." |
| | ollama pull deepseek-r1:latest |
| |
|
| | echo "==> Pulling qwen3-vl:latest..." |
| | ollama pull qwen3-vl:latest |
| |
|
| | echo "==> Models loaded:" |
| | ollama list |
| |
|
| | echo "==> Starting proxy on :7860..." |
| | exec uvicorn proxy:app --host 0.0.0.0 --port 7860 |