Spaces:
Sleeping
Sleeping
File size: 2,502 Bytes
0dea19b 7726529 0dea19b ae91091 0dea19b ae91091 0dea19b ae91091 59f9987 0dea19b ae91091 59f9987 ae91091 7726529 ae91091 7726529 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 | import logging
import traceback
import torch
from fastapi import FastAPI, Request
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import JSONResponse
from adapters.api.routers import analysis, insights, admin
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s [%(levelname)s] %(name)s: %(message)s",
)
logger = logging.getLogger(__name__)
app = FastAPI(
title="NLP Intelligence API",
description="Social media content analysis: NER, Topic Modeling, Sentiment Analysis, Network Analysis",
version="1.0.0",
)
@app.exception_handler(Exception)
async def global_exception_handler(request: Request, exc: Exception):
tb = traceback.format_exc()
logger.error(f"Unhandled exception on {request.method} {request.url}\n{tb}")
return JSONResponse(
status_code=500,
content={"detail": f"{type(exc).__name__}: {exc}"},
)
# CORS for frontend
app.add_middleware(
CORSMiddleware,
allow_origins=["*"], # temporarily for testing
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Register routers
app.include_router(analysis.router, prefix="/api", tags=["Analysis"])
app.include_router(insights.router, prefix="/api", tags=["Insights"])
app.include_router(admin.router, prefix="/api/admin", tags=["Admin"])
@app.get("/")
async def root():
return {
"name": "NLP Intelligence API",
"version": "1.0.0",
"endpoints": {
"health": "GET /api/health",
"upload": "POST /api/upload",
"analyze": "POST /api/analyze",
"network": "POST /api/network",
"insights": "POST /api/insights",
"admin_entries": "GET/POST /api/admin/knowledge",
"admin_labels": "GET/POST /api/admin/labels",
"admin_stopwords": "GET/POST /api/admin/stopwords",
},
}
@app.get("/api/health")
async def health():
"""
Quick health check used by the frontend on page load.
Returns GPU availability and which NLP models are loaded.
"""
from adapters.api import services
gpu = torch.cuda.is_available()
gpu_name = torch.cuda.get_device_name(0) if gpu else None
return {
"status": "ok",
"gpu": gpu,
"gpu_name": gpu_name,
"models": {
"ner": services.ner._pipeline is not None,
"sentiment": services.sentiment._pipeline is not None,
"topic": services.topic._model is not None,
},
}
|