|
|
|
|
|
""" |
|
|
GraphRAG service: combines vector search + graph expansion to produce context. |
|
|
Environment: |
|
|
- VECTOR_BASE: http://host:7010 |
|
|
- GRAPH_BASE: http://host:7011 |
|
|
Endpoints: |
|
|
- GET /health |
|
|
- POST /graphrag {query_vector, seed_ids?, top_k?, depth?} |
|
|
- POST /query (alias for /graphrag) |
|
|
""" |
|
|
|
|
|
import os |
|
|
from typing import Dict, Any, List |
|
|
|
|
|
import requests |
|
|
from fastapi import FastAPI, Request |
|
|
from fastapi.middleware.cors import CORSMiddleware |
|
|
from fastapi.responses import JSONResponse |
|
|
from prometheus_client import Counter, Histogram, make_asgi_app |
|
|
import uvicorn |
|
|
|
|
|
PORT = int(os.getenv("PORT", "7012")) |
|
|
VECTOR_BASE = os.getenv("VECTOR_BASE", "http://127.0.0.1:7010").rstrip("/") |
|
|
GRAPH_BASE = os.getenv("GRAPH_BASE", "http://127.0.0.1:7011").rstrip("/") |
|
|
RANKER_BASE = os.getenv("RANKER_BASE", "http://127.0.0.1:7014").rstrip("/") |
|
|
|
|
|
app = FastAPI(title="Nova GraphRAG", version="0.1.0") |
|
|
app.add_middleware( |
|
|
CORSMiddleware, |
|
|
allow_origins=["*"], |
|
|
allow_credentials=True, |
|
|
allow_methods=["*"], |
|
|
allow_headers=["*"], |
|
|
) |
|
|
|
|
|
REQUESTS = Counter("graphrag_requests_total", "GraphRAG requests", ["route"]) |
|
|
LATENCY = Histogram("graphrag_request_latency_seconds", "Latency", ["route"]) |
|
|
|
|
|
|
|
|
@app.get("/health") |
|
|
def health(): |
|
|
REQUESTS.labels(route="health").inc() |
|
|
return {"status": "ok", "port": PORT, "vector": VECTOR_BASE, "graph": GRAPH_BASE} |
|
|
|
|
|
|
|
|
@app.post("/graphrag") |
|
|
async def graphrag(req: Request) -> JSONResponse: |
|
|
with LATENCY.labels(route="graphrag").time(): |
|
|
REQUESTS.labels(route="graphrag").inc() |
|
|
body = await req.json() |
|
|
qv = body.get("query_vector") |
|
|
seeds = body.get("seed_ids", []) |
|
|
top_k = int(body.get("top_k", 5)) |
|
|
depth = int(body.get("depth", 1)) |
|
|
|
|
|
|
|
|
vec_res = {"results": []} |
|
|
if qv is not None: |
|
|
try: |
|
|
vr = requests.post(f"{VECTOR_BASE}/search", json={"collection": body.get("collection", "default"), "query_vector": qv, "top_k": top_k}, timeout=10) |
|
|
vec_res = vr.json() |
|
|
except Exception: |
|
|
pass |
|
|
|
|
|
|
|
|
ids = list(seeds) |
|
|
for r in vec_res.get("results", []): |
|
|
if r.get("id") not in ids: |
|
|
ids.append(r.get("id")) |
|
|
|
|
|
|
|
|
neighbors: List[Dict[str, Any]] = [] |
|
|
for sid in ids[:top_k]: |
|
|
try: |
|
|
nr = requests.post(f"{GRAPH_BASE}/neighbors", json={"id": sid, "depth": depth}, timeout=5) |
|
|
nb = nr.json().get("neighbors", []) |
|
|
neighbors.extend(nb) |
|
|
except Exception: |
|
|
continue |
|
|
|
|
|
vector_top = vec_res.get("results", [])[:top_k] |
|
|
|
|
|
try: |
|
|
rr = requests.post(f"{RANKER_BASE}/rerank", json={"items": [ |
|
|
{"id": r.get("id"), "score": r.get("score", 0.0), "recency": 0.5, "authority": 0.5, "coverage": 0.5} |
|
|
for r in vector_top |
|
|
]}, timeout=5) |
|
|
if rr.status_code == 200: |
|
|
order = [it["id"] for it in rr.json().get("items", [])] |
|
|
id2 = {r["id"]: r for r in vector_top} |
|
|
vector_top = [id2[i] for i in order if i in id2] |
|
|
except Exception: |
|
|
pass |
|
|
|
|
|
context = { |
|
|
"seed_ids": ids, |
|
|
"vector_top": vector_top, |
|
|
"neighbors": neighbors, |
|
|
} |
|
|
return JSONResponse(status_code=200, content=context) |
|
|
|
|
|
|
|
|
@app.post("/query") |
|
|
async def query(req: Request) -> JSONResponse: |
|
|
return await graphrag(req) |
|
|
|
|
|
|
|
|
|
|
|
metrics_app = make_asgi_app() |
|
|
app.mount("/metrics", metrics_app) |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
uvicorn.run(app, host="0.0.0.0", port=PORT) |
|
|
|