""" ╔══════════════════════════════════════════════════════════════════════════╗ ║ MoodLens · Sentiment Intelligence Platform · v2.0 ║ ╚══════════════════════════════════════════════════════════════════════════╝ Run: uvicorn app:app --host 0.0.0.0 --port 8000 --reload """ import sys, time, logging from pathlib import Path from datetime import datetime from contextlib import asynccontextmanager from fastapi import FastAPI, Request from fastapi.responses import JSONResponse, HTMLResponse, FileResponse from fastapi.middleware.cors import CORSMiddleware from fastapi.responses import JSONResponse, HTMLResponse from fastapi.openapi.docs import get_swagger_ui_html from pydantic import BaseModel # ══════════════════════════════════════════════════════════════════════════ # PATH — exact same as original # ══════════════════════════════════════════════════════════════════════════ ROOT = Path(__file__).resolve().parent.parent sys.path.append(str(ROOT / "python")) # ══════════════════════════════════════════════════════════════════════════ # ML IMPORT — direct, no guard, same as original # ══════════════════════════════════════════════════════════════════════════ from roberta_predict import predict, compare_all_models # ══════════════════════════════════════════════════════════════════════════ # LOGGER # ══════════════════════════════════════════════════════════════════════════ RESET = "\033[0m"; BOLD = "\033[1m"; DIM = "\033[2m" CYAN = "\033[36m"; GREEN = "\033[32m"; YELLOW = "\033[33m" RED = "\033[31m" class _Fmt(logging.Formatter): C = {"DEBUG": DIM, "INFO": GREEN, "WARNING": YELLOW, "ERROR": RED, "CRITICAL": f"{BOLD}{RED}"} def format(self, r): ts = datetime.now().strftime("%H:%M:%S") lc = self.C.get(r.levelname, "") return f"{DIM}{ts}{RESET} {lc}{r.levelname:<8}{RESET} {CYAN}{r.name}{RESET} {r.getMessage()}" _h = logging.StreamHandler(); _h.setFormatter(_Fmt()) logging.root.handlers = [_h]; logging.root.setLevel(logging.INFO) log = logging.getLogger("moodlens") # ══════════════════════════════════════════════════════════════════════════ # LIFESPAN # ══════════════════════════════════════════════════════════════════════════ @asynccontextmanager async def lifespan(app: FastAPI): print(f""" {YELLOW}{BOLD} ╔═══════════════════════════════════════════════════════╗ ║ ║ ║ ███╗ ███╗ ██████╗ ██████╗ ██████╗ ║ ║ ████╗ ████║██╔═══██╗██╔═══██╗██╔══██╗ ║ ║ ██╔████╔██║██║ ██║██║ ██║██║ ██║ ║ ║ ██║╚██╔╝██║██║ ██║██║ ██║██║ ██║ ║ ║ ██║ ╚═╝ ██║╚██████╔╝╚██████╔╝██████╔╝ ║ ║ ╚═╝ ╚═╝ ╚═════╝ ╚═════╝ ╚═════╝ LENS v2.0 ║ ║ ║ ╚═══════════════════════════════════════════════════════╝ {RESET} {YELLOW}◆{RESET} Splash → http://127.0.0.1:{YELLOW}8000{RESET} {YELLOW}◆{RESET} Swagger → http://127.0.0.1:{YELLOW}8000/docs{RESET} {YELLOW}◆{RESET} Health → http://127.0.0.1:{YELLOW}8000/health{RESET} {GREEN}✓{RESET} ML Engine → {GREEN}{BOLD}READY{RESET} {GREEN}✓{RESET} Dataset → Zomato Reviews Corpus {GREEN}✓{RESET} Models → RoBERTa · DistilRoBERTa · BERT · ALBERT """) yield print(f"\n {YELLOW}◆{RESET} MoodLens offline {DIM}· bye 👋{RESET}\n") # ══════════════════════════════════════════════════════════════════════════ # APP # ══════════════════════════════════════════════════════════════════════════ app = FastAPI( title = "MoodLens · Sentiment Intelligence API", description = """ ## MoodLens — Multi-Model NLP Sentiment Engine Enterprise-grade sentiment analysis powered by four transformer models, fine-tuned on the **Zomato Reviews** corpus. ### Models | Model | Hugging Face ID | Strength | |---|---|---| | **RoBERTa** | `cardiffnlp/twitter-roberta-base-sentiment-latest` | General sentiment · Default | | **DistilRoBERTa** | `mrm8488/distilroberta-finetuned-...` | Faster · Financial/review domain | | **BERT** | `nlptown/bert-base-multilingual-uncased-sentiment` | Multilingual · 5-star scale | | **ALBERT** | `textattack/albert-base-v2-yelp-polarity` | Efficient · Yelp polarity | ### Quick Start ```bash curl -X POST http://localhost:8000/predict \\ -H "Content-Type: application/json" \\ -d '{"text": "Best biryani I have ever had!"}' ``` """, version = "2.0.0", docs_url = None, redoc_url = "/redoc", lifespan = lifespan, openapi_tags= [ {"name": "Inference", "description": "Sentiment prediction endpoints."}, {"name": "System", "description": "Health and diagnostics."}, ], ) app.add_middleware( CORSMiddleware, allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"], ) @app.middleware("http") async def _timer(req: Request, call_next): t0 = time.perf_counter() res = await call_next(req) ms = (time.perf_counter() - t0) * 1000 c = GREEN if res.status_code < 400 else YELLOW if res.status_code < 500 else RED log.info(f"{c}{req.method:<6}{RESET} {req.url.path:<22} {c}{res.status_code}{RESET} {DIM}{ms:.1f}ms{RESET}") res.headers["X-Response-Time"] = f"{ms:.2f}ms" return res # ══════════════════════════════════════════════════════════════════════════ # SCHEMAS # ══════════════════════════════════════════════════════════════════════════ class TextInput(BaseModel): text: str # ══════════════════════════════════════════════════════════════════════════ # ROUTES # ══════════════════════════════════════════════════════════════════════════ from fastapi.responses import FileResponse @app.get("/favicon.ico", include_in_schema=False) async def favicon(): return FileResponse(Path(__file__).parent / "favicon.ico") @app.get("/", response_class=HTMLResponse, include_in_schema=False) def root(): return HTMLResponse(_splash()) @app.get("/docs", response_class=HTMLResponse, include_in_schema=False) def dark_docs(): return get_swagger_ui_html( openapi_url = app.openapi_url, title = "MoodLens · API Docs", swagger_js_url = "https://cdn.jsdelivr.net/npm/swagger-ui-dist@5/swagger-ui-bundle.js", swagger_css_url = "https://cdn.jsdelivr.net/npm/swagger-ui-dist@5/swagger-ui.css", swagger_favicon_url = "data:,", # blank favicon for docs too swagger_ui_parameters={ "syntaxHighlight.theme" : "monokai", "tryItOutEnabled" : True, "displayRequestDuration" : True, "defaultModelsExpandDepth": -1, }, ) @app.get( "/health", summary="Server & ML Health Check", description=""" Returns real-time status of the API server and ML engine. **Use this endpoint to:** - ✅ Verify all 4 transformer models are loaded and ready - ✅ Confirm server is reachable before sending inference requests - ✅ Monitor uptime in CI/CD pipelines or dashboards - ✅ Check UTC timestamp for server clock sync """, tags=["System"], ) def health(): return { "status" : "ok", "version" : "2.0.0", "timestamp": datetime.utcnow().isoformat() + "Z", "models" : ["roberta", "distilroberta", "bert", "albert"], "dataset" : "Zomato Reviews — Food & Dining Corpus", } @app.post( "/predict", summary="Predict Sentiment (RoBERTa)", description=""" Runs the **default RoBERTa model** on your input text. ### How it works 1. Text tokenised and truncated to **512 tokens** 2. RoBERTa runs a single forward pass 3. Softmax scores mapped → **Positive / Neutral / Negative** 4. Highest-probability class returned as `prediction` ### Response fields | Field | Type | Description | |---|---|---| | `prediction` | string | `Positive`, `Neutral`, or `Negative` | | `confidence` | float | Winning class score (0.0 – 1.0) | | `positive` | float | Raw probability — Positive class | | `neutral` | float | Raw probability — Neutral class | | `negative` | float | Raw probability — Negative class | """, tags=["Inference"], ) def get_prediction(data: TextInput): label, probs = predict(data.text) return { "prediction": label, "confidence": float(max(probs)), "negative" : float(probs[0]), "neutral" : float(probs[1]), "positive" : float(probs[2]), } @app.post( "/compare", summary="Compare All 4 Models", description=""" Runs **all four models** on the same input and returns results sorted by confidence. ### Models compared | Model | Strength | |---|---| | **RoBERTa** | General-purpose · highest accuracy | | **DistilRoBERTa** | 40% faster · financial/review domain | | **BERT** | Multilingual · 5-star scale | | **ALBERT** | Lightweight · Yelp polarity | ### How it works 1. All 4 models process the input **independently** 2. Raw labels normalised → `Positive / Neutral / Negative` 3. Results **sorted by confidence** — best model first 4. Use `/predict` for speed · `/compare` for cross-validation """, tags=["Inference"], ) def compare_models(data: TextInput): result = compare_all_models(data.text) return {"comparison": result} @app.exception_handler(404) async def not_found(_, __): return JSONResponse(status_code=404, content={ "error" : "Route not found", "routes": {"GET": ["/", "/health", "/docs", "/redoc"], "POST": ["/predict", "/compare"]}, "docs" : "http://127.0.0.1:8000/docs", }) # ══════════════════════════════════════════════════════════════════════════ # SPLASH # DARK → Yellow #FFD449 + Zomato Red #E23744 # LIGHT → Uber Navy #09091A + Uber Blue #276EF1 # Font → JetBrains Mono everywhere # Favicon → none # ══════════════════════════════════════════════════════════════════════════ def _splash() -> str: return r"""
Sentiment Intelligence Platform
Enterprise NLP engine · RoBERTa · DistilRoBERTa · BERT · ALBERT
Fine-tuned on 2.9M+ Zomato Reviews · Three-class · Sub-second inference
API Endpoints
Click "Try it live" on any card — opens Swagger directly at that endpoint, ready to test with one click.
Transformer Models
Each model brings a unique specialisation — together they form a robust, cross-validated ensemble.