laion-tunes / start-backend.sh
Christoph Schuhmann
Enable Whisper encoder on CPU
d7a77fa
#!/usr/bin/env bash
# ============================================================================
# LAION-Tunes Backend Startup Script
# ============================================================================
# Starts the FastAPI search server with EmbeddingGemma 300M loaded directly
# in Python (CPU), plus a cloudflared tunnel for public HTTPS access.
#
# Usage:
# ./start-backend.sh # default: port 7860
# ./start-backend.sh --port 7861 # custom FastAPI port
# ./start-backend.sh --skip-download # skip LFS data download
#
# Prerequisites:
# - Python 3.10+ with pip
# - cloudflared binary at ../cloudflared
# - ~20 GB disk for search indices
# - ~20 GB RAM for loaded indices + embedder
# ============================================================================
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
cd "$SCRIPT_DIR"
# ── Configuration ────────────────────────────────────────────────────────────
FASTAPI_PORT="${FASTAPI_PORT:-7860}"
VENV_DIR="$SCRIPT_DIR/.venv"
CLOUDFLARED="$SCRIPT_DIR/../cloudflared"
LOG_DIR="$SCRIPT_DIR/logs"
SKIP_DOWNLOAD=false
NO_WHISPER=false # Whisper encoder works on CPU too
# Parse args
while [[ $# -gt 0 ]]; do
case "$1" in
--port) FASTAPI_PORT="$2"; shift 2 ;;
--skip-download) SKIP_DOWNLOAD=true; shift ;;
--with-whisper) NO_WHISPER=false; shift ;;
*) echo "Unknown arg: $1"; exit 1 ;;
esac
done
mkdir -p "$LOG_DIR"
echo "============================================================"
echo " LAION-Tunes Backend Startup"
echo "============================================================"
echo " FastAPI port: $FASTAPI_PORT"
echo " Venv: $VENV_DIR"
echo " Skip download: $SKIP_DOWNLOAD"
echo " No whisper: $NO_WHISPER"
echo "============================================================"
# ── Step 1: Download LFS data (search indices) if needed ─────────────────────
if [ "$SKIP_DOWNLOAD" = false ]; then
echo ""
echo "[1/4] Checking search index data..."
mkdir -p "$SCRIPT_DIR/search_index"
# Check if metadata.db exists and is a real file (not LFS pointer)
if [ -f "$SCRIPT_DIR/search_index/metadata.db" ] && \
[ "$(stat -c%s "$SCRIPT_DIR/search_index/metadata.db" 2>/dev/null || echo 0)" -gt 1000 ]; then
echo " Search indices already downloaded."
else
echo " Downloading search indices from HuggingFace (this may take a while ~16 GB)..."
if [ -d "$SCRIPT_DIR/repo-source/.git" ]; then
echo " Using git-lfs pull in cloned repo..."
cd "$SCRIPT_DIR/repo-source"
git lfs pull --include="search_index/*" 2>&1 | tail -5
# Copy real files over
cp -f search_index/* "$SCRIPT_DIR/search_index/" 2>/dev/null || true
cd "$SCRIPT_DIR"
elif command -v huggingface-cli &>/dev/null; then
echo " Using huggingface-cli..."
huggingface-cli download laion/laion-tunes \
--repo-type dataset \
--include "search_index/*" \
--local-dir "$SCRIPT_DIR" \
2>&1 | tail -5
else
echo " ERROR: No method to download LFS data."
echo " Either clone the repo first:"
echo " GIT_LFS_SKIP_SMUDGE=1 git clone https://huggingface.co/datasets/laion/laion-tunes repo-source"
echo " Or install huggingface-cli:"
echo " pip install huggingface-hub"
exit 1
fi
echo " Download complete."
fi
else
echo ""
echo "[1/4] Skipping data download (--skip-download)"
fi
# ── Step 2: Set up Python virtual environment ────────────────────────────────
echo ""
echo "[2/4] Setting up Python environment..."
if [ ! -d "$VENV_DIR" ]; then
echo " Creating virtual environment..."
python3 -m venv "$VENV_DIR"
fi
source "$VENV_DIR/bin/activate"
# Install dependencies
echo " Installing Python dependencies..."
pip install --quiet --upgrade pip
# Set HF token for gated model access (google/embeddinggemma-300m is gated)
if [ -z "${HF_TOKEN:-}" ]; then
if [ -f "$HOME/.cache/huggingface/token" ]; then
export HF_TOKEN="$(cat "$HOME/.cache/huggingface/token")"
echo " HF token loaded from ~/.cache/huggingface/token"
elif [ -f "$HOME/.huggingface/token" ]; then
export HF_TOKEN="$(cat "$HOME/.huggingface/token")"
echo " HF token loaded from ~/.huggingface/token"
else
echo " WARNING: No HF_TOKEN set. google/embeddinggemma-300m is a gated model."
echo " Set HF_TOKEN env var or run: huggingface-cli login"
fi
fi
# Install CPU-only PyTorch first (smaller, no CUDA)
pip install --quiet torch --index-url https://download.pytorch.org/whl/cpu
# Install everything else
pip install --quiet \
fastapi \
'uvicorn[standard]' \
faiss-cpu \
numpy \
pandas \
scipy \
tqdm \
requests \
pydantic \
python-multipart \
sentence-transformers \
transformers \
optimum[onnxruntime] \
onnxruntime \
librosa \
soundfile
echo " Python environment ready."
# ── Step 3: Start FastAPI server (with direct Python EmbeddingGemma) ─────────
echo ""
echo "[3/4] Starting FastAPI search server on port $FASTAPI_PORT..."
echo " EmbeddingGemma 300M will be loaded directly in Python (CPU)."
echo " First query may take ~500ms for embedding; subsequent ones ~430ms."
WHISPER_FLAG=""
if [ "$NO_WHISPER" = true ]; then
WHISPER_FLAG="--no-whisper"
fi
# Kill any existing server on this port
fuser -k "${FASTAPI_PORT}/tcp" 2>/dev/null || true
sleep 1
# Run WITHOUT --tei-url so server.py loads EmbeddingGemma via SentenceTransformer
# HF_TOKEN is inherited from the environment for gated model access
nohup env HF_TOKEN="${HF_TOKEN:-}" "$VENV_DIR/bin/python" server.py \
--port "$FASTAPI_PORT" \
--host 0.0.0.0 \
--gpu 0 \
$WHISPER_FLAG \
> "$LOG_DIR/fastapi.log" 2>&1 &
FASTAPI_PID=$!
echo " FastAPI PID: $FASTAPI_PID"
echo "$FASTAPI_PID" > "$LOG_DIR/fastapi.pid"
# Wait for FastAPI to be ready
echo " Waiting for FastAPI to start (loading indices ~20 GB RAM + downloading EmbeddingGemma)..."
FASTAPI_READY=false
for i in $(seq 1 600); do
if curl -sf "http://localhost:${FASTAPI_PORT}/api/stats" >/dev/null 2>&1; then
FASTAPI_READY=true
echo " FastAPI ready after ${i}s"
break
fi
# Check if process is still alive
if ! kill -0 "$FASTAPI_PID" 2>/dev/null; then
echo " ERROR: FastAPI process died. Check logs:"
tail -50 "$LOG_DIR/fastapi.log"
exit 1
fi
# Show progress every 30s
if (( i % 30 == 0 )); then
echo " Still loading... (${i}s elapsed)"
tail -1 "$LOG_DIR/fastapi.log" 2>/dev/null | sed 's/^/ /'
fi
sleep 1
done
if [ "$FASTAPI_READY" = false ]; then
echo " WARNING: FastAPI not ready after 600s. Check: tail -f $LOG_DIR/fastapi.log"
fi
# ── Step 4: Start cloudflared tunnel for public HTTPS ────────────────────────
echo ""
echo "[4/4] Starting cloudflared tunnel for public HTTPS..."
# Kill existing tunnel if running
if [ -f "$LOG_DIR/cloudflared-backend.pid" ]; then
kill "$(cat "$LOG_DIR/cloudflared-backend.pid")" 2>/dev/null || true
fi
nohup "$CLOUDFLARED" tunnel --url "http://localhost:${FASTAPI_PORT}" \
> "$LOG_DIR/cloudflared-backend.log" 2>&1 &
CF_PID=$!
echo "$CF_PID" > "$LOG_DIR/cloudflared-backend.pid"
# Wait for tunnel URL
sleep 5
TUNNEL_URL=""
for i in $(seq 1 30); do
TUNNEL_URL=$(grep -oP 'https://[a-z0-9-]+\.trycloudflare\.com' "$LOG_DIR/cloudflared-backend.log" 2>/dev/null | head -1)
if [ -n "$TUNNEL_URL" ]; then
break
fi
sleep 1
done
echo ""
echo "============================================================"
echo " LAION-Tunes Backend is running!"
echo "============================================================"
echo ""
echo " Local: http://localhost:${FASTAPI_PORT}"
if [ -n "$TUNNEL_URL" ]; then
echo " Public: $TUNNEL_URL"
else
echo " Public: (tunnel URL pending, check: grep trycloudflare $LOG_DIR/cloudflared-backend.log)"
fi
echo ""
echo " Logs:"
echo " FastAPI: tail -f $LOG_DIR/fastapi.log"
echo " Cloudflared: tail -f $LOG_DIR/cloudflared-backend.log"
echo ""
echo " Stop everything:"
echo " kill $FASTAPI_PID $CF_PID"
echo " # or: ./stop-all.sh"
echo "============================================================"