dboa9
Fix gateway proxy 7->8 chars + set Ollama context to 16384
e7883c7
#!/bin/bash
#/home/mrdbo/projects/moltbot-hybrid-engine/start.sh
# Moltbot Hybrid Engine - Multi-service Startup
# Starts: Ollama (background, optional) + FastAPI/uvicorn (foreground on port 7860)
# Build: 2026-02-08 v6.0
# v6: Ollama is optional - HF Inference API provides fallback
echo "============================================================"
echo " Moltbot Hybrid Engine v7.1.0 - Starting..."
echo "============================================================"
echo " Timestamp: $(date '+%Y-%m-%d %H:%M:%S')"
echo " User: $(whoami) | Home: $HOME"
echo " BUILD_MARKER=2026-03-02-fix-hf-ollama-openclaw"
echo ""
echo "[DEBUG] Ollama check: $(command -v ollama 2>/dev/null || echo MISSING)"
echo "[DEBUG] /usr/local/bin/ollama: $(ls -lh /usr/local/bin/ollama 2>/dev/null || echo NOT_FOUND)"
echo "[DEBUG] Binary type: $(file /usr/local/bin/ollama 2>/dev/null || echo N/A)"
echo ""
# Optimize for HF Spaces Free tier (2 CPU, 16GB RAM)
export OMP_NUM_THREADS=2
export MKL_NUM_THREADS=2
# 1. Check if Ollama binary exists and is valid
echo "[1/4] Checking Ollama installation..."
OLLAMA_OK=false
if command -v ollama &> /dev/null; then
OLLAMA_PATH="$(which ollama)"
OLLAMA_SIZE=$(stat -c%s "$OLLAMA_PATH" 2>/dev/null || echo 0)
echo " Ollama path: $OLLAMA_PATH (size: ${OLLAMA_SIZE} bytes)"
# Verify binary is executable, right arch, and not truncated (>1MB expected)
if file "$OLLAMA_PATH" 2>/dev/null | grep -q "ELF.*x86-64" && [ "$OLLAMA_SIZE" -gt 1000000 ]; then
echo " ✅ Ollama binary verified (x86-64 ELF, ${OLLAMA_SIZE} bytes)"
OLLAMA_OK=true
else
echo " ⚠️ Ollama binary exists but may be corrupt or wrong architecture"
echo " Binary type: $(file "$OLLAMA_PATH" 2>/dev/null)"
echo " Size: ${OLLAMA_SIZE} bytes (expected >1MB)"
echo " → Will use HF Inference API fallback"
fi
elif [ -f /usr/local/bin/ollama ]; then
echo " ⚠️ Ollama at /usr/local/bin/ollama but not in PATH"
echo " Adding to PATH..."
export PATH="/usr/local/bin:$PATH"
if file /usr/local/bin/ollama 2>/dev/null | grep -q "ELF.*x86-64"; then
echo " ✅ Ollama now accessible"
OLLAMA_OK=true
fi
else
echo " ⚠️ Ollama binary not found at any standard location"
echo " Checked: PATH, /usr/local/bin/ollama"
echo " → Will use HF Inference API fallback (this is normal on free tier)"
fi
# 2. Start Ollama server in background (if binary is OK)
if [ "$OLLAMA_OK" = true ]; then
echo "[2/4] Starting Ollama server..."
OLLAMA_NUM_CTX=16384 ollama serve &
OLLAMA_PID=$!
echo " Ollama PID: $OLLAMA_PID"
# 3. Wait for Ollama to be ready (up to 20 seconds)
echo "[3/4] Waiting for Ollama to be ready..."
MAX_WAIT=20
WAITED=0
while [ $WAITED -lt $MAX_WAIT ]; do
if curl -s http://localhost:11434/api/tags > /dev/null 2>&1; then
echo " ✅ Ollama ready after ${WAITED}s"
break
fi
sleep 1
WAITED=$((WAITED + 1))
done
if [ $WAITED -ge $MAX_WAIT ]; then
echo " ⚠️ Ollama not ready after ${MAX_WAIT}s"
echo " → LLM will use HF Inference API fallback"
else
# Pull small model in background (1.5b for free tier)
echo " Checking for qwen2.5:1.5b model..."
if ! ollama list 2>/dev/null | grep -q "qwen2.5"; then
echo " ⏳ Model not found, pulling qwen2.5:1.5b in background..."
echo " (Takes 1-2 minutes, model cached after first pull)"
nohup ollama pull qwen2.5:1.5b > /tmp/ollama_pull.log 2>&1 &
PULL_PID=$!
echo " Pull PID: $PULL_PID"
else
echo " ✅ Model already available"
fi
fi
else
echo "[2/4] Skipping Ollama (not available)"
echo "[3/4] Skipping Ollama model pull"
fi
echo ""
echo " 💡 HF Inference API fallback is always available"
echo " (Uses Qwen/Qwen2.5-7B-Instruct hosted by HuggingFace)"
echo ""
# 3b. Start OpenClaw/Clawdbot gateway in background (port 18789) if available
echo "[3b/5] Starting OpenClaw (Clawdbot) gateway..."
if command -v openclaw &> /dev/null; then
export OPENCLAW_HOME="${HOME:-/home/user}/.openclaw"
if [ -d "$OPENCLAW_HOME" ] || [ -f "$OPENCLAW_HOME/openclaw.json" ]; then
# Force numeric gateway port (fixes "Invalid port: '18789y'" if env/config had a typo)
GATEWAY_PORT=18789
if [ -f "$OPENCLAW_HOME/openclaw.json" ]; then
sed -i 's/"port":[^,}]*/"port": 18789/' "$OPENCLAW_HOME/openclaw.json" 2>/dev/null || true
fi
nohup openclaw gateway --port "$GATEWAY_PORT" > /tmp/openclaw-gateway.log 2>&1 &
OPENCLAW_PID=$!
echo " ✅ Clawdbot gateway started (PID $OPENCLAW_PID, port 18789)"
echo " → Control UI / WebChat available via this Space at /gateway (see app)"
else
echo " ⚠️ OpenClaw config not found at $OPENCLAW_HOME — skip gateway"
fi
else
echo " ⚠️ openclaw binary not found — skip Clawdbot gateway"
fi
echo ""
# 4. Start FastAPI (foreground - keeps container alive)
echo "[4/5] Starting FastAPI on port 7860..."
echo "============================================================"
echo ""
python -m uvicorn app:app --host 0.0.0.0 --port 7860