""" ╔══════════════════════════════════════════════════════════════════════════╗ ║ MoodLens · Sentiment Intelligence Platform · v2.0 ║ ╚══════════════════════════════════════════════════════════════════════════╝ Run: uvicorn app:app --host 0.0.0.0 --port 8000 --reload """ import sys, time, logging from pathlib import Path from datetime import datetime from contextlib import asynccontextmanager from fastapi import FastAPI, Request from fastapi.responses import JSONResponse, HTMLResponse, FileResponse from fastapi.middleware.cors import CORSMiddleware from fastapi.responses import JSONResponse, HTMLResponse from fastapi.openapi.docs import get_swagger_ui_html from pydantic import BaseModel # ══════════════════════════════════════════════════════════════════════════ # PATH — exact same as original # ══════════════════════════════════════════════════════════════════════════ ROOT = Path(__file__).resolve().parent.parent sys.path.append(str(ROOT / "python")) # ══════════════════════════════════════════════════════════════════════════ # ML IMPORT — direct, no guard, same as original # ══════════════════════════════════════════════════════════════════════════ from roberta_predict import predict, compare_all_models # ══════════════════════════════════════════════════════════════════════════ # LOGGER # ══════════════════════════════════════════════════════════════════════════ RESET = "\033[0m"; BOLD = "\033[1m"; DIM = "\033[2m" CYAN = "\033[36m"; GREEN = "\033[32m"; YELLOW = "\033[33m" RED = "\033[31m" class _Fmt(logging.Formatter): C = {"DEBUG": DIM, "INFO": GREEN, "WARNING": YELLOW, "ERROR": RED, "CRITICAL": f"{BOLD}{RED}"} def format(self, r): ts = datetime.now().strftime("%H:%M:%S") lc = self.C.get(r.levelname, "") return f"{DIM}{ts}{RESET} {lc}{r.levelname:<8}{RESET} {CYAN}{r.name}{RESET} {r.getMessage()}" _h = logging.StreamHandler(); _h.setFormatter(_Fmt()) logging.root.handlers = [_h]; logging.root.setLevel(logging.INFO) log = logging.getLogger("moodlens") # ══════════════════════════════════════════════════════════════════════════ # LIFESPAN # ══════════════════════════════════════════════════════════════════════════ @asynccontextmanager async def lifespan(app: FastAPI): print(f""" {YELLOW}{BOLD} ╔═══════════════════════════════════════════════════════╗ ║ ║ ║ ███╗ ███╗ ██████╗ ██████╗ ██████╗ ║ ║ ████╗ ████║██╔═══██╗██╔═══██╗██╔══██╗ ║ ║ ██╔████╔██║██║ ██║██║ ██║██║ ██║ ║ ║ ██║╚██╔╝██║██║ ██║██║ ██║██║ ██║ ║ ║ ██║ ╚═╝ ██║╚██████╔╝╚██████╔╝██████╔╝ ║ ║ ╚═╝ ╚═╝ ╚═════╝ ╚═════╝ ╚═════╝ LENS v2.0 ║ ║ ║ ╚═══════════════════════════════════════════════════════╝ {RESET} {YELLOW}◆{RESET} Splash → http://127.0.0.1:{YELLOW}8000{RESET} {YELLOW}◆{RESET} Swagger → http://127.0.0.1:{YELLOW}8000/docs{RESET} {YELLOW}◆{RESET} Health → http://127.0.0.1:{YELLOW}8000/health{RESET} {GREEN}✓{RESET} ML Engine → {GREEN}{BOLD}READY{RESET} {GREEN}✓{RESET} Dataset → Zomato Reviews Corpus {GREEN}✓{RESET} Models → RoBERTa · DistilRoBERTa · BERT · ALBERT """) yield print(f"\n {YELLOW}◆{RESET} MoodLens offline {DIM}· bye 👋{RESET}\n") # ══════════════════════════════════════════════════════════════════════════ # APP # ══════════════════════════════════════════════════════════════════════════ app = FastAPI( title = "MoodLens · Sentiment Intelligence API", description = """ ## MoodLens — Multi-Model NLP Sentiment Engine Enterprise-grade sentiment analysis powered by four transformer models, fine-tuned on the **Zomato Reviews** corpus. ### Models | Model | Hugging Face ID | Strength | |---|---|---| | **RoBERTa** | `cardiffnlp/twitter-roberta-base-sentiment-latest` | General sentiment · Default | | **DistilRoBERTa** | `mrm8488/distilroberta-finetuned-...` | Faster · Financial/review domain | | **BERT** | `nlptown/bert-base-multilingual-uncased-sentiment` | Multilingual · 5-star scale | | **ALBERT** | `textattack/albert-base-v2-yelp-polarity` | Efficient · Yelp polarity | ### Quick Start ```bash curl -X POST http://localhost:8000/predict \\ -H "Content-Type: application/json" \\ -d '{"text": "Best biryani I have ever had!"}' ``` """, version = "2.0.0", docs_url = None, redoc_url = "/redoc", lifespan = lifespan, openapi_tags= [ {"name": "Inference", "description": "Sentiment prediction endpoints."}, {"name": "System", "description": "Health and diagnostics."}, ], ) app.add_middleware( CORSMiddleware, allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"], ) @app.middleware("http") async def _timer(req: Request, call_next): t0 = time.perf_counter() res = await call_next(req) ms = (time.perf_counter() - t0) * 1000 c = GREEN if res.status_code < 400 else YELLOW if res.status_code < 500 else RED log.info(f"{c}{req.method:<6}{RESET} {req.url.path:<22} {c}{res.status_code}{RESET} {DIM}{ms:.1f}ms{RESET}") res.headers["X-Response-Time"] = f"{ms:.2f}ms" return res # ══════════════════════════════════════════════════════════════════════════ # SCHEMAS # ══════════════════════════════════════════════════════════════════════════ class TextInput(BaseModel): text: str # ══════════════════════════════════════════════════════════════════════════ # ROUTES # ══════════════════════════════════════════════════════════════════════════ from fastapi.responses import FileResponse @app.get("/favicon.ico", include_in_schema=False) async def favicon(): return FileResponse(Path(__file__).parent / "favicon.ico") @app.get("/", response_class=HTMLResponse, include_in_schema=False) def root(): return HTMLResponse(_splash()) @app.get("/docs", response_class=HTMLResponse, include_in_schema=False) def dark_docs(): return get_swagger_ui_html( openapi_url = app.openapi_url, title = "MoodLens · API Docs", swagger_js_url = "https://cdn.jsdelivr.net/npm/swagger-ui-dist@5/swagger-ui-bundle.js", swagger_css_url = "https://cdn.jsdelivr.net/npm/swagger-ui-dist@5/swagger-ui.css", swagger_favicon_url = "data:,", # blank favicon for docs too swagger_ui_parameters={ "syntaxHighlight.theme" : "monokai", "tryItOutEnabled" : True, "displayRequestDuration" : True, "defaultModelsExpandDepth": -1, }, ) @app.get( "/health", summary="Server & ML Health Check", description=""" Returns real-time status of the API server and ML engine. **Use this endpoint to:** - ✅ Verify all 4 transformer models are loaded and ready - ✅ Confirm server is reachable before sending inference requests - ✅ Monitor uptime in CI/CD pipelines or dashboards - ✅ Check UTC timestamp for server clock sync """, tags=["System"], ) def health(): return { "status" : "ok", "version" : "2.0.0", "timestamp": datetime.utcnow().isoformat() + "Z", "models" : ["roberta", "distilroberta", "bert", "albert"], "dataset" : "Zomato Reviews — Food & Dining Corpus", } @app.post( "/predict", summary="Predict Sentiment (RoBERTa)", description=""" Runs the **default RoBERTa model** on your input text. ### How it works 1. Text tokenised and truncated to **512 tokens** 2. RoBERTa runs a single forward pass 3. Softmax scores mapped → **Positive / Neutral / Negative** 4. Highest-probability class returned as `prediction` ### Response fields | Field | Type | Description | |---|---|---| | `prediction` | string | `Positive`, `Neutral`, or `Negative` | | `confidence` | float | Winning class score (0.0 – 1.0) | | `positive` | float | Raw probability — Positive class | | `neutral` | float | Raw probability — Neutral class | | `negative` | float | Raw probability — Negative class | """, tags=["Inference"], ) def get_prediction(data: TextInput): label, probs = predict(data.text) return { "prediction": label, "confidence": float(max(probs)), "negative" : float(probs[0]), "neutral" : float(probs[1]), "positive" : float(probs[2]), } @app.post( "/compare", summary="Compare All 4 Models", description=""" Runs **all four models** on the same input and returns results sorted by confidence. ### Models compared | Model | Strength | |---|---| | **RoBERTa** | General-purpose · highest accuracy | | **DistilRoBERTa** | 40% faster · financial/review domain | | **BERT** | Multilingual · 5-star scale | | **ALBERT** | Lightweight · Yelp polarity | ### How it works 1. All 4 models process the input **independently** 2. Raw labels normalised → `Positive / Neutral / Negative` 3. Results **sorted by confidence** — best model first 4. Use `/predict` for speed · `/compare` for cross-validation """, tags=["Inference"], ) def compare_models(data: TextInput): result = compare_all_models(data.text) return {"comparison": result} @app.exception_handler(404) async def not_found(_, __): return JSONResponse(status_code=404, content={ "error" : "Route not found", "routes": {"GET": ["/", "/health", "/docs", "/redoc"], "POST": ["/predict", "/compare"]}, "docs" : "http://127.0.0.1:8000/docs", }) # ══════════════════════════════════════════════════════════════════════════ # SPLASH # DARK → Yellow #FFD449 + Zomato Red #E23744 # LIGHT → Uber Navy #09091A + Uber Blue #276EF1 # Font → JetBrains Mono everywhere # Favicon → none # ══════════════════════════════════════════════════════════════════════════ def _splash() -> str: return r""" MoodLens · API
Zomato Dataset  ·  4 Models  ·  v2.0

Sentiment Intelligence Platform

Decode Sentiment.

Enterprise NLP engine · RoBERTa · DistilRoBERTa · BERT · ALBERT
Fine-tuned on 2.9M+ Zomato Reviews · Three-class · Sub-second inference

Explore API Docs Health Check
4
Transformers
3
Classes
2.9M+
Reviews
512
Max Tokens
● API Online | RoBERTa Ready |
moodlens · api demo
$ curl -X POST http://localhost:8000/predict \
     -H "Content-Type: application/json" \
     -d '{"text": "Best biryani I have ever had!"}'

# 200 OK · 284ms
{
  "prediction": "Positive",
  "confidence": 0.978,
  "positive":  0.978,
  "neutral":   0.015,
  "negative":  0.007
}

$

API Endpoints

Four Routes.
Zero Confusion.

Click "Try it live" on any card — opens Swagger directly at that endpoint, ready to test with one click.

POST
/predict
Single-model RoBERTa sentiment prediction
  • Text is tokenised and truncated to 512 tokens, passed through RoBERTa in a single forward pass
  • Softmax output mapped to Positive / Neutral / Negative with raw probability for each class
  • Returns prediction label, confidence score, all three class probabilities in one clean JSON
  • Best for high-throughput pipelines where one best-in-class model is sufficient
InferenceRoBERTaJSON
Try it live
POST
/compare
All 4 models — parallel inference & consensus
  • Runs RoBERTa, DistilRoBERTa, BERT, and ALBERT on the same text independently
  • Each model's raw labels normalised to the same three-class schema before comparison
  • Results sorted by confidence — highest-confidence model ranked first in the response array
  • Use when you need cross-model validation or the most reliable possible prediction
InferenceMulti-ModelEnsemble
Try it live
GET
/health
Server status & ML engine diagnostics
  • Returns server liveness, API version, and active model list in a single JSON response
  • Lists all four active model names and training dataset for at-a-glance verification
  • Ideal for CI/CD pipelines, uptime monitors, and pre-flight checks before batch inference jobs
  • Returns UTC timestamp to verify server clock is correctly synchronised
SystemMonitoringDevOps
View live
GET
/docs
Interactive Swagger UI — test every endpoint live
  • Full OpenAPI 3.1 schema auto-generated from Pydantic models — every field typed and described
  • "Try it out" lets you fire real requests to /predict and /compare without writing any code
  • Every request and response schema documented inline with constraints and live example values
  • Dark-themed Swagger with Monokai syntax highlighting and request duration display
DocsOpenAPISwagger
Open Docs

Transformer Models

Four Engines.
One Verdict.

Each model brings a unique specialisation — together they form a robust, cross-validated ensemble.

🧠
RoBERTa
cardiffnlp/twitter-roberta-base-sentiment-latest
Default model. Trained on 124M tweets. Best general-purpose accuracy across review types and domains.
⭐ DEFAULT
DistilRoBERTa
mrm8488/distilroberta-finetuned-financial-news-sentiment-analysis
40% faster than RoBERTa. Fine-tuned on financial news and consumer reviews. Low-latency inference.
FAST
🌍
BERT
nlptown/bert-base-multilingual-uncased-sentiment
Multilingual BERT fine-tuned in 6 languages. 5-star scale mapped to Positive / Neutral / Negative.
MULTILINGUAL
🎯
ALBERT
textattack/albert-base-v2-yelp-polarity
Parameter-efficient architecture fine-tuned on Yelp reviews. Excellent on short, punchy restaurant feedback.
EFFICIENT
""" # ══════════════════════════════════════════════════════════════════════════ if __name__ == "__main__": import os import uvicorn port = int(os.environ.get("PORT", 7860)) uvicorn.run("app:app", host="0.0.0.0", port=port)