ollama-tools / start.sh
abc1181's picture
Update start.sh
3c211c0 verified
#!/bin/bash
exec 2>&1
echo "!!! [SYSTEM]: Starting Ollama..."
ollama serve > /dev/null 2>&1 &
until curl -s http://localhost:11434/api/tags > /dev/null; do
sleep 2
done
# βœ… Router model
if ! ollama list | grep -q "qwen3:0.6b"; then
echo "!!! [SYSTEM]: Pulling router model qwen3:0.6b..."
ollama pull qwen3:0.6b
else
echo "!!! [SYSTEM]: Router model qwen3:0.6b ready βœ…"
fi
# βœ… Core model β€” pull if missing
if ! ollama list | grep -q "qwen3.5:4b"; then
echo "!!! [SYSTEM]: Pulling core model qwen3.5:4b (first boot, ~2.5GB)..."
ollama pull qwen3.5:0.8b
echo "!!! [SYSTEM]: Core model ready βœ…"
else
echo "!!! [SYSTEM]: Core model qwen3.5:4b ready βœ…"
fi
# llama.cpp install if needed
if [ "$USE_LLAMA_CPP" = "true" ]; then
if ! python3 -c "import llama_cpp" 2>/dev/null; then
echo "!!! [SYSTEM]: Installing llama-cpp-python..."
pip3 install llama-cpp-python --no-cache-dir --break-system-packages \
--extra-index-url https://abetlen.github.io/llama-cpp-python/whl/cpu \
|| pip3 install llama-cpp-python --no-cache-dir --break-system-packages
fi
else
echo "!!! [SYSTEM]: USE_LLAMA_CPP=false β€” using Ollama"
fi
echo "!!! [SYSTEM]: Starting Brain App..."
exec python3 -u app.py