|
|
#!/usr/bin/env bash |
|
|
set -e |
|
|
|
|
|
echo "Starting Ollama..." |
|
|
ollama serve & |
|
|
OLLAMA_PID=$! |
|
|
|
|
|
|
|
|
until curl -s http://localhost:11434/api/tags > /dev/null; do |
|
|
echo "Waiting for Ollama..." |
|
|
sleep 2 |
|
|
done |
|
|
|
|
|
echo "Pulling models..." |
|
|
|
|
|
OLLAMA_MODEL=${OLLAMA_MODEL:-"qwen3:0.6b"} |
|
|
OLLAMA_EMBEDDING_MODEL=${OLLAMA_EMBEDDING_MODEL:-"embeddinggemma:300m"} |
|
|
|
|
|
echo "Pulling model: $OLLAMA_MODEL" |
|
|
ollama pull $OLLAMA_MODEL |
|
|
|
|
|
echo "Pulling embedding model: $OLLAMA_EMBEDDING_MODEL" |
|
|
ollama pull $OLLAMA_EMBEDDING_MODEL |
|
|
|
|
|
echo "Ollama ready." |
|
|
|
|
|
echo "Starting Backend Service on port 7860..." |
|
|
|
|
|
export HOST=0.0.0.0 |
|
|
export PORT=7860 |
|
|
export OLLAMA_BASE_URL=http://localhost:11434 |
|
|
|
|
|
|
|
|
export PYTHONUNBUFFERED=1 |
|
|
|
|
|
|
|
|
uv run python run_service.py |
|
|
|