#!/bin/bash set -e echo "🚀 Starting Ollama backend..." ollama serve & OLLAMA_PID=$! # Wait for Ollama to be ready echo "⏳ Waiting for Ollama to initialize..." until curl -s http://localhost:11434/api/tags > /dev/null 2>&1; do sleep 2 echo " still waiting for Ollama..." done echo "✅ Ollama is up!" # Auto-pull a lightweight default model (CPU-friendly) echo "📥 Pulling default model: phi3:mini (~2.2GB)..." ollama pull phi3:mini || echo "⚠️ Pull failed, continuing anyway (pull from UI)" # Also pull mistral as a general purpose model echo "📥 Pulling mistral:latest (~4.4GB)..." ollama pull mistral:latest || echo "⚠️ mistral pull failed, continuing" echo "🌐 Starting Open WebUI on port 7860..." exec open-webui serve \ --host 0.0.0.0 \ --port 7860 \ --data-dir /data/webui \ --ollama-base-url http://localhost:11434