Spaces:
Running
Running
File size: 5,315 Bytes
25c4c6d e7883c7 9c0f869 f6586bc 25c4c6d 9c0f869 d68d4dc 9c0f869 bc6529a d68d4dc 9c0f869 d818819 f6586bc bc6529a f6586bc bc6529a 4be3d54 f6586bc 4be3d54 f6586bc 4be3d54 bc6529a 4be3d54 bc6529a f6586bc b3baeed f6586bc 9c0f869 f6586bc 9c0f869 f6586bc 9c0f869 f6586bc 9c0f869 f6586bc 9c0f869 f6586bc e787148 bb1a1e7 e787148 f6586bc e787148 9c0f869 d818819 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 | #!/bin/bash
#/home/mrdbo/projects/moltbot-hybrid-engine/start.sh
# Moltbot Hybrid Engine - Multi-service Startup
# Starts: Ollama (background, optional) + FastAPI/uvicorn (foreground on port 7860)
# Build: 2026-02-08 v6.0
# v6: Ollama is optional - HF Inference API provides fallback
echo "============================================================"
echo " Moltbot Hybrid Engine v7.1.0 - Starting..."
echo "============================================================"
echo " Timestamp: $(date '+%Y-%m-%d %H:%M:%S')"
echo " User: $(whoami) | Home: $HOME"
echo " BUILD_MARKER=2026-03-02-fix-hf-ollama-openclaw"
echo ""
echo "[DEBUG] Ollama check: $(command -v ollama 2>/dev/null || echo MISSING)"
echo "[DEBUG] /usr/local/bin/ollama: $(ls -lh /usr/local/bin/ollama 2>/dev/null || echo NOT_FOUND)"
echo "[DEBUG] Binary type: $(file /usr/local/bin/ollama 2>/dev/null || echo N/A)"
echo ""
# Optimize for HF Spaces Free tier (2 CPU, 16GB RAM)
export OMP_NUM_THREADS=2
export MKL_NUM_THREADS=2
# 1. Check if Ollama binary exists and is valid
echo "[1/4] Checking Ollama installation..."
OLLAMA_OK=false
if command -v ollama &> /dev/null; then
OLLAMA_PATH="$(which ollama)"
OLLAMA_SIZE=$(stat -c%s "$OLLAMA_PATH" 2>/dev/null || echo 0)
echo " Ollama path: $OLLAMA_PATH (size: ${OLLAMA_SIZE} bytes)"
# Verify binary is executable, right arch, and not truncated (>1MB expected)
if file "$OLLAMA_PATH" 2>/dev/null | grep -q "ELF.*x86-64" && [ "$OLLAMA_SIZE" -gt 1000000 ]; then
echo " ✅ Ollama binary verified (x86-64 ELF, ${OLLAMA_SIZE} bytes)"
OLLAMA_OK=true
else
echo " ⚠️ Ollama binary exists but may be corrupt or wrong architecture"
echo " Binary type: $(file "$OLLAMA_PATH" 2>/dev/null)"
echo " Size: ${OLLAMA_SIZE} bytes (expected >1MB)"
echo " → Will use HF Inference API fallback"
fi
elif [ -f /usr/local/bin/ollama ]; then
echo " ⚠️ Ollama at /usr/local/bin/ollama but not in PATH"
echo " Adding to PATH..."
export PATH="/usr/local/bin:$PATH"
if file /usr/local/bin/ollama 2>/dev/null | grep -q "ELF.*x86-64"; then
echo " ✅ Ollama now accessible"
OLLAMA_OK=true
fi
else
echo " ⚠️ Ollama binary not found at any standard location"
echo " Checked: PATH, /usr/local/bin/ollama"
echo " → Will use HF Inference API fallback (this is normal on free tier)"
fi
# 2. Start Ollama server in background (if binary is OK)
if [ "$OLLAMA_OK" = true ]; then
echo "[2/4] Starting Ollama server..."
OLLAMA_CONTEXT_LENGTH=16384 ollama serve &
OLLAMA_PID=$!
echo " Ollama PID: $OLLAMA_PID"
# 3. Wait for Ollama to be ready (up to 20 seconds)
echo "[3/4] Waiting for Ollama to be ready..."
MAX_WAIT=20
WAITED=0
while [ $WAITED -lt $MAX_WAIT ]; do
if curl -s http://localhost:11434/api/tags > /dev/null 2>&1; then
echo " ✅ Ollama ready after ${WAITED}s"
break
fi
sleep 1
WAITED=$((WAITED + 1))
done
if [ $WAITED -ge $MAX_WAIT ]; then
echo " ⚠️ Ollama not ready after ${MAX_WAIT}s"
echo " → LLM will use HF Inference API fallback"
else
# Pull small model in background (1.5b for free tier)
echo " Checking for qwen2.5:1.5b model..."
if ! ollama list 2>/dev/null | grep -q "qwen2.5"; then
echo " ⏳ Model not found, pulling qwen2.5:1.5b in background..."
echo " (Takes 1-2 minutes, model cached after first pull)"
nohup ollama pull qwen2.5:1.5b > /tmp/ollama_pull.log 2>&1 &
PULL_PID=$!
echo " Pull PID: $PULL_PID"
else
echo " ✅ Model already available"
fi
fi
else
echo "[2/4] Skipping Ollama (not available)"
echo "[3/4] Skipping Ollama model pull"
fi
echo ""
echo " 💡 HF Inference API fallback is always available"
echo " (Uses Qwen/Qwen2.5-7B-Instruct hosted by HuggingFace)"
echo ""
# 3b. Start OpenClaw/Clawdbot gateway in background (port 18789) if available
echo "[3b/5] Starting OpenClaw (Clawdbot) gateway..."
if command -v openclaw &> /dev/null; then
export OPENCLAW_HOME="${HOME:-/home/user}/.openclaw"
if [ -d "$OPENCLAW_HOME" ] || [ -f "$OPENCLAW_HOME/openclaw.json" ]; then
# Force numeric gateway port (fixes "Invalid port: '18789y'" if env/config had a typo)
GATEWAY_PORT=18789
if [ -f "$OPENCLAW_HOME/openclaw.json" ]; then
sed -i 's/"port":[^,}]*/"port": 18789/' "$OPENCLAW_HOME/openclaw.json" 2>/dev/null || true
fi
nohup openclaw gateway --port "$GATEWAY_PORT" > /tmp/openclaw-gateway.log 2>&1 &
OPENCLAW_PID=$!
echo " ✅ Clawdbot gateway started (PID $OPENCLAW_PID, port 18789)"
echo " → Control UI / WebChat available via this Space at /gateway (see app)"
else
echo " ⚠️ OpenClaw config not found at $OPENCLAW_HOME — skip gateway"
fi
else
echo " ⚠️ openclaw binary not found — skip Clawdbot gateway"
fi
echo ""
# 4. Start FastAPI (foreground - keeps container alive)
echo "[4/5] Starting FastAPI on port 7860..."
echo "============================================================"
echo ""
python -m uvicorn app:app --host 0.0.0.0 --port 7860
|