File size: 880 Bytes
22dcdfd 4eda213 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 |
#!/usr/bin/env bash
set -e
echo "Starting Ollama..."
ollama serve &
OLLAMA_PID=$!
# Wait for Ollama to start
until curl -s http://localhost:11434/api/tags > /dev/null; do
echo "Waiting for Ollama..."
sleep 2
done
echo "Pulling models..."
# Pull models based on environment variables or defaults
OLLAMA_MODEL=${OLLAMA_MODEL:-"qwen3:0.6b"}
OLLAMA_EMBEDDING_MODEL=${OLLAMA_EMBEDDING_MODEL:-"embeddinggemma:300m"}
echo "Pulling model: $OLLAMA_MODEL"
ollama pull $OLLAMA_MODEL
echo "Pulling embedding model: $OLLAMA_EMBEDDING_MODEL"
ollama pull $OLLAMA_EMBEDDING_MODEL
echo "Ollama ready."
echo "Starting Backend Service on port 7860..."
# Set variables for the python service
export HOST=0.0.0.0
export PORT=7860
export OLLAMA_BASE_URL=http://localhost:11434
# Ensure logs are visible
export PYTHONUNBUFFERED=1
# Start the python service
uv run python run_service.py
|