File size: 9,911 Bytes
29f6075
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
"""
Video Intelligence Platform β€” REST API
FastAPI server exposing all platform capabilities as REST endpoints.

Run:
    uvicorn video_intelligence.api:app --host 0.0.0.0 --port 8000

All endpoints return JSON. Upload videos as multipart/form-data.
Frontend (React/Next.js) just makes fetch() calls to these endpoints.
"""
import os
import io
import shutil
import tempfile
from typing import Optional, List
from pathlib import Path

from fastapi import FastAPI, UploadFile, File, HTTPException, Header, Query
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
from contextlib import asynccontextmanager

from .config import Config
from .pipeline import IndexingPipeline
from .query_engine import QueryEngine, QueryResult
from .akinator import AkinatorRefiner
from .gemini_client import GeminiClient
from .index_store import VideoIndex


# ── State ───────────────────────────────────────────────────────────────────
# Initialized on first /init call. Stays alive for the server lifetime.
state = {
    "pipeline": None,
    "query_engine": None,
    "akinator": None,
    "initialized": False,
}


# ── App ─────────────────────────────────────────────────────────────────────
app = FastAPI(
    title="Video Intelligence Platform",
    description="Akinator-style video search with RAG, boolean queries, and tree refinement",
    version="1.0.0",
    docs_url="/docs",       # Swagger UI at /docs
    redoc_url="/redoc",     # ReDoc at /redoc
)

# CORS β€” allow your React frontend to call this API
# In production, replace ["*"] with your actual frontend domain
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],          # e.g. ["http://localhost:3000", "https://yourdomain.com"]
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)


# ── Request/Response Models ─────────────────────────────────────────────────

class InitRequest(BaseModel):
    gemini_api_key: str
    device: str = "cpu"

class InitResponse(BaseModel):
    status: str
    message: str

class SearchRequest(BaseModel):
    query: str
    top_k: int = 20

class SearchResult(BaseModel):
    frame_id: int
    timestamp_sec: float
    time_str: str
    score: float
    caption: str
    detections: List[str]
    match_source: str

class SearchResponse(BaseModel):
    query: str
    results: List[SearchResult]
    count: int
    akinator_active: bool = False
    akinator_question: Optional[str] = None
    akinator_options: Optional[List[str]] = None

class RefineRequest(BaseModel):
    choice: str
    query: str

class RefineResponse(BaseModel):
    status: str  # "refining" or "done"
    count: int
    results: Optional[List[dict]] = None
    question: Optional[str] = None
    options: Optional[List[str]] = None
    history: Optional[List[dict]] = None

class RAGRequest(BaseModel):
    query: str

class RAGResponse(BaseModel):
    query: str
    answer: str

class IndexResponse(BaseModel):
    status: str
    frames: int
    detections: int
    visual_vectors: int
    caption_vectors: int
    elapsed_sec: float

class HealthResponse(BaseModel):
    status: str
    initialized: bool
    version: str


# ── Endpoints ───────────────────────────────────────────────────────────────

@app.get("/health", response_model=HealthResponse)
def health():
    """Health check β€” use for container readiness/liveness probes."""
    return HealthResponse(
        status="ok",
        initialized=state["initialized"],
        version="1.0.0",
    )


@app.post("/init", response_model=InitResponse)
def initialize(req: InitRequest):
    """
    Initialize models with your Gemini API key.
    Call once before indexing/searching. Takes ~30-60s to load models.
    """
    try:
        config = Config(
            gemini_api_key=req.gemini_api_key,
            device=req.device,
        )

        pipeline = IndexingPipeline(config)
        query_engine = QueryEngine(
            index=pipeline.index,
            gemini=pipeline.gemini,
            siglip=pipeline.siglip,
            top_k=20,
        )
        akinator = AkinatorRefiner(
            index=pipeline.index,
            gemini=pipeline.gemini,
            threshold=10,
        )

        state["pipeline"] = pipeline
        state["query_engine"] = query_engine
        state["akinator"] = akinator
        state["initialized"] = True

        return InitResponse(status="ok", message="Models loaded successfully")

    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))


@app.post("/index", response_model=IndexResponse)
async def index_video(
    video: UploadFile = File(...),
    caption_every_n: int = Query(default=3, ge=1, le=20),
):
    """
    Upload and index a video. Extracts frames, runs detection, 
    generates embeddings and captions.
    
    Send as multipart/form-data with field name "video".
    """
    if not state["initialized"]:
        raise HTTPException(status_code=400, detail="Not initialized. Call POST /init first.")

    # Save uploaded video to temp file
    suffix = Path(video.filename).suffix or ".mp4"
    with tempfile.NamedTemporaryFile(delete=False, suffix=suffix) as tmp:
        shutil.copyfileobj(video.file, tmp)
        tmp_path = tmp.name

    try:
        stats = state["pipeline"].index_video(
            tmp_path,
            caption_every_n=caption_every_n,
            detect_every_n=1,
        )
        return IndexResponse(
            status="ok",
            frames=stats["frames"],
            detections=stats["detections"],
            visual_vectors=stats["visual_vectors"],
            caption_vectors=stats["caption_vectors"],
            elapsed_sec=stats["elapsed_sec"],
        )
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))
    finally:
        os.unlink(tmp_path)


@app.post("/search", response_model=SearchResponse)
def search(req: SearchRequest):
    """
    Search the indexed video with natural language.
    Supports boolean: "red car AND person", "dog OR cat"
    """
    if not state["initialized"]:
        raise HTTPException(status_code=400, detail="Not initialized. Call POST /init first.")

    try:
        results = state["query_engine"].search(req.query, top_k=req.top_k)

        search_results = [
            SearchResult(
                frame_id=r.frame_id,
                timestamp_sec=r.timestamp_sec,
                time_str=r.time_str,
                score=round(r.score, 4),
                caption=r.caption or "",
                detections=r.detections,
                match_source=r.match_source,
            )
            for r in results
        ]

        # Store for RAG/Akinator
        state["_last_results"] = results

        # Check if Akinator refinement is needed
        akinator_active = False
        akinator_question = None
        akinator_options = None

        if len(results) > 10 and state["akinator"]:
            ak_result = state["akinator"].start(results, req.query)
            if ak_result["status"] == "refining":
                akinator_active = True
                akinator_question = ak_result["question"]
                akinator_options = ak_result["options"]

        return SearchResponse(
            query=req.query,
            results=search_results,
            count=len(search_results),
            akinator_active=akinator_active,
            akinator_question=akinator_question,
            akinator_options=akinator_options,
        )

    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))


@app.post("/refine", response_model=RefineResponse)
def refine(req: RefineRequest):
    """
    Answer an Akinator refinement question to narrow results.
    Send the chosen option from the previous search/refine response.
    """
    if not state["akinator"]:
        raise HTTPException(status_code=400, detail="No active refinement session")

    try:
        result = state["akinator"].answer(req.choice, req.query)
        return RefineResponse(
            status=result["status"],
            count=result["count"],
            results=result.get("results"),
            question=result.get("question"),
            options=result.get("options"),
            history=result.get("history"),
        )
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))


@app.post("/rag", response_model=RAGResponse)
def rag_answer(req: RAGRequest):
    """
    Generate a RAG answer from the last search results.
    Cites specific timestamps in the response.
    """
    if not state["initialized"]:
        raise HTTPException(status_code=400, detail="Not initialized. Call POST /init first.")

    last_results = state.get("_last_results", [])
    if not last_results:
        raise HTTPException(status_code=400, detail="No search results. Call POST /search first.")

    try:
        contexts = [r.to_dict() for r in last_results[:15]]
        answer = state["pipeline"].gemini.generate_rag_answer(req.query, contexts)
        return RAGResponse(query=req.query, answer=answer)
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))


@app.get("/stats")
def stats():
    """Get current index statistics."""
    if not state["initialized"]:
        raise HTTPException(status_code=400, detail="Not initialized.")
    return state["pipeline"].index.stats()