#!/usr/bin/env python3 """ LAION-Tunes Search Server ========================== FastAPI server providing semantic search, BM25 text search, music similarity search, and metadata filtering over the LAION-Tunes music dataset. Features: - Vector similarity search via FAISS (tag/lyric/mood/caption/transcription embeddings) - Music audio similarity search via Whisper encoder mean-pooled embeddings - BM25 text search (tags, caption, transcription, privacy-hashed lyrics) - Combined search: vector search + BM25 + filter by aesthetics/subset + re-rank - Real-time query embedding via EmbeddingGemma 300M - Audio upload → Whisper encoder embedding → FAISS similarity search - Dark-mode HTML frontend with audio players Usage: python server.py [--port 7860] [--gpu 0] [--host 0.0.0.0] Requires: search_index/ directory built by build_search_index.py """ import os import sys import json import time import hmac import hashlib import pickle import re import sqlite3 import logging import argparse import tempfile import io import threading from pathlib import Path from typing import Optional from contextlib import asynccontextmanager import numpy as np import faiss from fastapi import FastAPI, Query, UploadFile, File, Form, Request from fastapi.responses import HTMLResponse, JSONResponse from fastapi.middleware.cors import CORSMiddleware from pydantic import BaseModel # Import BM25Index from the build script (needed for pickle deserialization) sys.path.insert(0, str(Path(__file__).parent)) from build_search_index import BM25Index # ── Paths ──────────────────────────────────────────────────────────────────── BASE_DIR = Path(__file__).parent INDEX_DIR = BASE_DIR / "search_index" HTML_PATH = BASE_DIR / "index.html" LYRICS_HMAC_KEY = b"laion-tunes-search-2026-secret-key" log = logging.getLogger("server") logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s] %(message)s", datefmt="%H:%M:%S") # ── Global state (loaded at startup) ───────────────────────────────────────── state = { "faiss": {}, # field -> faiss.Index "id_maps": {}, # field -> numpy array of row_ids "bm25": {}, # field -> BM25Index "db_path": None, # SQLite path "embedder": None, # SentenceTransformer model "total_tracks": 0, # Whisper encoder for music similarity "whisper_processor": None, "whisper_encoder": None, "whisper_device": None, "whisper_rid_to_idx": {}, # row_id -> faiss_idx for reverse lookup "audio_emb_cache": {}, # (ip, filename) -> {"embedding": np.array, "expires": timestamp} } # ── Audio Embedding Cache ──────────────────────────────────────────────────── CACHE_TTL = 3600 # 1 hour _cache_lock = threading.Lock() def cache_get(ip, filename): """Get cached audio embedding. Returns numpy array or None.""" key = (ip, filename) with _cache_lock: entry = state["audio_emb_cache"].get(key) if entry and entry["expires"] > time.time(): return entry["embedding"] # Expired or missing if entry: del state["audio_emb_cache"][key] return None def cache_set(ip, filename, embedding): """Cache an audio embedding.""" key = (ip, filename) with _cache_lock: state["audio_emb_cache"][key] = { "embedding": embedding, "expires": time.time() + CACHE_TTL, } # Evict expired entries (max 1000 entries) if len(state["audio_emb_cache"]) > 1000: now = time.time() expired = [k for k, v in state["audio_emb_cache"].items() if v["expires"] < now] for k in expired: del state["audio_emb_cache"][k] # ── Tokenizer (must match build_search_index.py) ───────────────────────────── _TOKEN_RE = re.compile(r"[a-z0-9]+") def tokenize(text): if not text or not isinstance(text, str): return [] return [t for t in _TOKEN_RE.findall(text.lower()) if len(t) >= 2] def tokenize_and_hash(text, secret_key=LYRICS_HMAC_KEY): tokens = tokenize(text) return [hmac.new(secret_key, t.encode(), hashlib.sha256).hexdigest()[:16] for t in tokens] # ── Startup / Shutdown ─────────────────────────────────────────────────────── def load_indices(): """Load all search indices into memory.""" log.info("Loading search indices...") # FAISS indices (including whisper) for field in ["tag", "lyric", "mood", "caption", "transcription", "whisper"]: idx_path = INDEX_DIR / f"faiss_{field}.index" map_path = INDEX_DIR / f"idmap_{field}.npy" if idx_path.exists() and map_path.exists(): state["faiss"][field] = faiss.read_index(str(idx_path)) state["id_maps"][field] = np.load(str(map_path)) log.info(f" FAISS {field}: {state['faiss'][field].ntotal:,} vectors") else: log.info(f" FAISS {field}: not found (skipped)") # Build reverse mapping for whisper (row_id -> faiss_idx) if "whisper" in state["id_maps"]: idmap = state["id_maps"]["whisper"] state["whisper_rid_to_idx"] = {int(rid): idx for idx, rid in enumerate(idmap)} log.info(f" Whisper reverse map: {len(state['whisper_rid_to_idx']):,} entries") # BM25 indices for field in ["tags", "caption", "transcription", "lyrics_hashed"]: pkl_path = INDEX_DIR / f"bm25_{field}.pkl" if pkl_path.exists(): with open(pkl_path, "rb") as f: state["bm25"][field] = pickle.load(f) log.info(f" BM25 {field}: {len(state['bm25'][field].row_ids):,} documents") else: log.info(f" BM25 {field}: not found (skipped)") # SQLite state["db_path"] = str(INDEX_DIR / "metadata.db") conn = sqlite3.connect(state["db_path"]) state["total_tracks"] = conn.execute("SELECT COUNT(*) FROM tracks").fetchone()[0] conn.close() log.info(f" SQLite: {state['total_tracks']:,} tracks") class TEIEmbedder: """Wraps Hugging Face Text Embeddings Inference HTTP API to match SentenceTransformer interface.""" def __init__(self, url): self.url = url.rstrip("/") import requests as _req self._session = _req.Session() # Warm up / verify r = self._session.post(f"{self.url}/embed", json={"inputs": "test"}) r.raise_for_status() self.dim = len(r.json()[0]) log.info(f" TEI connected at {self.url} (dim={self.dim})") def encode(self, text, normalize_embeddings=True): r = self._session.post(f"{self.url}/embed", json={"inputs": text}) r.raise_for_status() emb = np.array(r.json()[0], dtype=np.float32) if normalize_embeddings: norm = np.linalg.norm(emb) if norm > 0: emb = emb / norm return emb def load_embedder(gpu_id): """Load EmbeddingGemma 300M for real-time query embedding. Supports TEI backend (--embedder tei --tei-url URL) or SentenceTransformer. """ tei_url = getattr(app.state, "tei_url", None) if tei_url: log.info(f"Connecting to TEI backend at {tei_url}...") state["embedder"] = TEIEmbedder(tei_url) return hf_cache = str(BASE_DIR / ".hf_cache") os.environ["HF_HOME"] = hf_cache os.environ["TRANSFORMERS_CACHE"] = hf_cache import torch from sentence_transformers import SentenceTransformer # Try ONNX quantized version first (non-gated, faster on CPU) try: log.info("Loading EmbeddingGemma 300M (ONNX quantized, CPU)...") model = SentenceTransformer( "onnx-community/embeddinggemma-300m-ONNX", backend="onnx", model_kwargs={"file_name": "onnx/model_quantized.onnx"}, ) state["embedder"] = model log.info(" EmbeddingGemma ONNX (q8) loaded on CPU") return except Exception as e: log.warning(f" ONNX load failed ({e}), falling back to PyTorch...") # Fallback: PyTorch version (gated, requires HF token) log.info(f"Loading EmbeddingGemma 300M (PyTorch) on GPU {gpu_id}...") device = f"cuda:{gpu_id}" if torch.cuda.is_available() else "cpu" model = SentenceTransformer( "google/embeddinggemma-300m", device=device, model_kwargs={"torch_dtype": torch.bfloat16}, ) state["embedder"] = model log.info(f" EmbeddingGemma loaded on {device}") def load_whisper_encoder(gpu_id): """Load laion/music-whisper encoder for audio similarity search.""" if "whisper" not in state["faiss"]: log.info(" Whisper FAISS index not found, skipping encoder load") return cache_dir = str(BASE_DIR / ".hf_cache_embeddings") os.environ["HF_HOME"] = cache_dir os.environ["TRANSFORMERS_CACHE"] = cache_dir import torch from transformers import WhisperProcessor, WhisperModel device = f"cuda:{gpu_id}" if torch.cuda.is_available() else "cpu" log.info(f"Loading Music-Whisper encoder on {device}...") processor = WhisperProcessor.from_pretrained("laion/music-whisper", cache_dir=cache_dir) model = WhisperModel.from_pretrained("laion/music-whisper", cache_dir=cache_dir) if device != "cpu": encoder = model.encoder.to(device).half().eval() else: encoder = model.encoder.eval() del model if torch.cuda.is_available(): torch.cuda.empty_cache() state["whisper_processor"] = processor state["whisper_encoder"] = encoder state["whisper_device"] = device log.info(f" Music-Whisper encoder loaded on {device}") # Restore HF_HOME for EmbeddingGemma hf_cache = str(BASE_DIR / ".hf_cache") os.environ["HF_HOME"] = hf_cache os.environ["TRANSFORMERS_CACHE"] = hf_cache @asynccontextmanager async def lifespan(app: FastAPI): # Startup load_indices() load_embedder(gpu_id=app.state.gpu_id) if not getattr(app.state, "no_whisper", False): load_whisper_encoder(gpu_id=app.state.gpu_id) else: log.info("Whisper encoder loading skipped (--no-whisper)") yield # Shutdown (nothing to clean up) # ── FastAPI App ────────────────────────────────────────────────────────────── app = FastAPI(title="LAION-Tunes Search", lifespan=lifespan) app.add_middleware(CORSMiddleware, allow_origins=["*"], allow_methods=["*"], allow_headers=["*"]) # ── Request/Response models ────────────────────────────────────────────────── class SearchRequest(BaseModel): query: str negative_query: Optional[str] = None # Negative prompt for vector search (subtracted from query) search_type: str = "bm25" # "vector" | "bm25" | "combined" vector_field: str = "caption" # "tag" | "lyric" | "mood" | "caption" | "transcription" bm25_field: str = "caption" # "tags" | "caption" | "transcription" | "lyrics_hashed" rank_by: str = "similarity" # "similarity" | "aesthetics" | "plays" | "likes" min_aesthetics: Optional[float] = None # Filter: minimum score_average min_similarity: Optional[float] = None # Filter: minimum cosine similarity subset_filter: Optional[str] = None # Filter: "suno" | "udio" etc vocal_filter: Optional[str] = None # "instrumental" | "vocals" | None (all) min_duration: Optional[float] = 60.0 # Minimum duration in seconds (default 1 min) languages: Optional[list[str]] = None # List of language codes to include, None = all negative_weight: float = 0.7 # Weight for negative query subtraction (0-1) nsfw_filter: Optional[str] = None # None=all, "sfw_only", "nsfw_only" top_k: int = 50 # Two-stage search stage2_enabled: bool = False stage2_query: Optional[str] = None stage2_search_type: str = "vector" # "vector" | "bm25" stage2_vector_field: str = "caption" stage2_bm25_field: str = "caption" stage2_top_k: int = 50 class SimilarSearchRequest(BaseModel): row_id: int top_k: int = 50 rank_by: str = "similarity" min_aesthetics: Optional[float] = None subset_filter: Optional[str] = None vocal_filter: Optional[str] = None min_duration: Optional[float] = 60.0 languages: Optional[list[str]] = None nsfw_filter: Optional[str] = None # Stage 2 stage2_enabled: bool = False stage2_query: Optional[str] = None stage2_search_type: str = "vector" stage2_vector_field: str = "caption" stage2_bm25_field: str = "caption" stage2_top_k: int = 50 # ── Helper: fetch metadata from SQLite ─────────────────────────────────────── def fetch_tracks(row_ids, conn): """Fetch track metadata for given row_ids from SQLite.""" if len(row_ids) == 0: return {} placeholders = ",".join("?" * len(row_ids)) cursor = conn.execute( f"SELECT * FROM tracks WHERE row_id IN ({placeholders})", list(int(r) for r in row_ids), ) columns = [desc[0] for desc in cursor.description] result = {} for row in cursor: d = dict(zip(columns, row)) result[d["row_id"]] = d return result def format_result(track, score=None, score_type="similarity", has_whisper_emb=False): """Format a track dict for API response.""" # Parse JSON array fields genre = json.loads(track.get("genre_tags") or "[]") scene = json.loads(track.get("scene_tags") or "[]") emotion = json.loads(track.get("emotion_tags") or "[]") return { "row_id": track["row_id"], "title": track.get("title") or "Untitled", "audio_url": track.get("audio_url") or "", "subset": track.get("subset") or "", "tags_text": track.get("tags_text") or "", "mood_text": track.get("mood_text") or "", "genre_tags": genre, "scene_tags": scene, "emotion_tags": emotion, "score_average": track.get("score_average"), "score_coherence": track.get("score_coherence"), "score_musicality": track.get("score_musicality"), "score_memorability": track.get("score_memorability"), "score_clarity": track.get("score_clarity"), "score_naturalness": track.get("score_naturalness"), "play_count": track.get("play_count") or 0, "upvote_count": track.get("upvote_count") or 0, "duration_seconds": track.get("duration_seconds"), "music_whisper_caption": track.get("music_whisper_caption") or "", "has_caption": bool(track.get("has_caption")), "has_transcription": bool(track.get("has_transcription")), "is_instrumental": bool(track.get("is_instrumental")), "language": track.get("language") or "unknown", "score": round(float(score), 4) if score is not None else None, "score_type": score_type, "has_whisper_emb": has_whisper_emb, # NSFW safety labels "nsfw_overall_label": track.get("nsfw_overall_label") or "likely_sfw", "nsfw_gore_label": track.get("nsfw_gore_label") or "likely_sfw", "nsfw_sexual_label": track.get("nsfw_sexual_label") or "likely_sfw", "nsfw_hate_label": track.get("nsfw_hate_label") or "likely_sfw", "nsfw_gore_sim": round(float(track["nsfw_gore_sim"]), 4) if track.get("nsfw_gore_sim") is not None else None, "nsfw_sexual_sim": round(float(track["nsfw_sexual_sim"]), 4) if track.get("nsfw_sexual_sim") is not None else None, "nsfw_hate_sim": round(float(track["nsfw_hate_sim"]), 4) if track.get("nsfw_hate_sim") is not None else None, } # ── Search logic ───────────────────────────────────────────────────────────── def vector_search(query_embedding, field, top_k): """Search FAISS index, return (row_ids, similarities).""" if field not in state["faiss"]: return np.array([]), np.array([]) index = state["faiss"][field] id_map = state["id_maps"][field] # Reshape for FAISS qvec = query_embedding.reshape(1, -1).astype(np.float32) k = min(top_k, index.ntotal) if k == 0: return np.array([]), np.array([]) similarities, indices = index.search(qvec, k) similarities = similarities[0] indices = indices[0] # Map FAISS indices to row_ids valid = indices >= 0 row_ids = id_map[indices[valid]] sims = similarities[valid] return row_ids, sims def bm25_search(query_text, field, top_k): """BM25 text search, return (row_ids, scores).""" if field not in state["bm25"]: return np.array([]), np.array([]) # Hash tokens for lyrics search if field == "lyrics_hashed": tokens = tokenize_and_hash(query_text) else: tokens = tokenize(query_text) return state["bm25"][field].search(tokens, top_k=top_k) def apply_filters(tracks_dict, min_aesthetics=None, subset_filter=None, min_similarity=None, scores=None, vocal_filter=None, min_duration=None, languages=None, nsfw_filter=None): """Filter tracks by criteria. Returns filtered row_ids. nsfw_filter: None=all, 'sfw_only'=exclude NSFW, 'nsfw_only'=only NSFW """ filtered = [] for row_id, track in tracks_dict.items(): if min_aesthetics is not None: avg = track.get("score_average") if avg is None or avg < min_aesthetics: continue if subset_filter: if subset_filter == "no_riffusion": if track.get("subset") == "riffusion": continue elif track.get("subset") != subset_filter: continue if min_similarity is not None and scores is not None: sim = scores.get(row_id, 0) if sim < min_similarity: continue if vocal_filter == "instrumental": if not track.get("is_instrumental"): continue elif vocal_filter == "vocals": if track.get("is_instrumental"): continue if min_duration is not None: dur = track.get("duration_seconds") if dur is not None and dur < min_duration: continue if languages: lang = track.get("language") or "unknown" if lang not in languages: continue if nsfw_filter == "sfw_only": label = track.get("nsfw_overall_label") or "likely_sfw" if label != "likely_sfw": continue elif nsfw_filter == "nsfw_only": label = track.get("nsfw_overall_label") or "likely_sfw" if label == "likely_sfw": continue filtered.append(row_id) return filtered def rank_results(row_ids, tracks_dict, rank_by, sim_scores=None): """Re-rank row_ids by the specified field.""" if rank_by in ("similarity", "music_similarity") and sim_scores: return sorted(row_ids, key=lambda r: sim_scores.get(r, 0), reverse=True) elif rank_by == "aesthetics": return sorted(row_ids, key=lambda r: tracks_dict.get(r, {}).get("score_average") or 0, reverse=True) elif rank_by == "plays": return sorted(row_ids, key=lambda r: tracks_dict.get(r, {}).get("play_count") or 0, reverse=True) elif rank_by == "likes": return sorted(row_ids, key=lambda r: tracks_dict.get(r, {}).get("upvote_count") or 0, reverse=True) return row_ids # ── Audio embedding helper ────────────────────────────────────────────────── def compute_audio_embedding(audio_bytes): """Compute mean-pooled Whisper encoder embedding from audio bytes. Returns L2-normalized 768-dim float32 numpy array. """ import torch import librosa import soundfile as sf processor = state["whisper_processor"] encoder = state["whisper_encoder"] device = state["whisper_device"] if processor is None or encoder is None: raise RuntimeError("Whisper encoder not loaded") # Write bytes to temp file, load with librosa with tempfile.NamedTemporaryFile(suffix=".audio", delete=True) as tmp: tmp.write(audio_bytes) tmp.flush() try: audio, sr = librosa.load(tmp.name, sr=16000, mono=True) except Exception: # Try soundfile as fallback audio, sr = sf.read(tmp.name) if len(audio.shape) > 1: audio = audio.mean(axis=1) if sr != 16000: audio = librosa.resample(audio, orig_sr=sr, target_sr=16000) # Trim to first 30 seconds max_samples = 30 * 16000 audio = audio[:max_samples].astype(np.float32) if len(audio) < 1600: # less than 0.1s raise ValueError("Audio too short (< 0.1 seconds)") # Process through Whisper encoder inputs = processor(audio, sampling_rate=16000, return_tensors="pt") input_features = inputs.input_features.to(device) if device != "cpu": input_features = input_features.half() with torch.no_grad(): outputs = encoder(input_features) hidden = outputs.last_hidden_state # (1, seq, 768) mean_pooled = hidden.mean(dim=1) # (1, 768) mean_pooled = torch.nn.functional.normalize(mean_pooled, p=2, dim=1) embedding = mean_pooled.cpu().float().numpy()[0] # (768,) return embedding def search_by_whisper_embedding(query_embedding, top_k, rank_by, min_aesthetics, subset_filter, vocal_filter, min_duration, languages, nsfw_filter=None): """Search whisper FAISS index with a query embedding, apply filters, return formatted results.""" t0 = time.time() has_filters = (subset_filter or vocal_filter or languages or (min_aesthetics and min_aesthetics > 0) or (min_duration and min_duration > 0)) fetch_k = max(top_k * 100, 20000) if has_filters else max(top_k * 10, 2000) # Search FAISS whisper index row_ids, sims = vector_search(query_embedding, "whisper", fetch_k) sim_scores = {} candidate_row_ids = set() for rid, sim in zip(row_ids, sims): rid = int(rid) sim_scores[rid] = float(sim) candidate_row_ids.add(rid) # Fetch metadata conn = sqlite3.connect(state["db_path"]) conn.row_factory = sqlite3.Row tracks_dict = fetch_tracks(list(candidate_row_ids), conn) # Apply filters filtered_ids = apply_filters( tracks_dict, min_aesthetics=min_aesthetics, subset_filter=subset_filter, vocal_filter=vocal_filter, min_duration=min_duration, languages=languages, nsfw_filter=nsfw_filter, ) # Rank ranked_ids = rank_results(filtered_ids, tracks_dict, rank_by, sim_scores) final_ids = ranked_ids[:top_k] # Build response whisper_rid_set = state["whisper_rid_to_idx"] results = [] for rid in final_ids: track = tracks_dict.get(rid) if not track: continue score = sim_scores.get(rid, 0) score_type = "cosine_similarity" if rank_by == "aesthetics": score = track.get("score_average") or 0 score_type = "aesthetics" elif rank_by == "plays": score = track.get("play_count") or 0 score_type = "play_count" elif rank_by == "likes": score = track.get("upvote_count") or 0 score_type = "upvote_count" r = format_result(dict(track), score=score, score_type=score_type, has_whisper_emb=(rid in whisper_rid_set)) results.append(r) conn.close() search_time_ms = (time.time() - t0) * 1000 return { "results": results, "total_candidates": len(candidate_row_ids), "total_filtered": len(filtered_ids), "total_tracks": state["total_tracks"], "search_time_ms": round(search_time_ms, 1), "search_type": "music_similarity", "vector_field": "whisper", } # ── API Endpoints ──────────────────────────────────────────────────────────── @app.get("/", response_class=HTMLResponse) async def serve_frontend(): """Serve the search frontend.""" return HTML_PATH.read_text(encoding="utf-8") @app.get("/nsfw-report", response_class=HTMLResponse) async def serve_nsfw_report(): """Serve the NSFW safety analysis report.""" report = BASE_DIR / "nsfw_safety_report.html" if report.exists(): return report.read_text(encoding="utf-8") return HTMLResponse("