|
|
from fastapi import FastAPI, HTTPException |
|
|
from pydantic import BaseModel |
|
|
import torch |
|
|
from transformers import AutoTokenizer, AutoModelForSequenceClassification |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
MODEL_NAME = "j-hartmann/emotion-english-distilroberta-base" |
|
|
|
|
|
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) |
|
|
model = AutoModelForSequenceClassification.from_pretrained(MODEL_NAME) |
|
|
|
|
|
model.to(device) |
|
|
model.eval() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app = FastAPI(title="Emotion Detection API") |
|
|
|
|
|
|
|
|
@app.get("/") |
|
|
def health(): |
|
|
""" |
|
|
Health / wake-up endpoint. |
|
|
Hugging Face uses this to wake the Space. |
|
|
""" |
|
|
return {"status": "ok"} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class EmotionRequest(BaseModel): |
|
|
text: str |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@app.post("/emotion") |
|
|
def classify_emotion(payload: EmotionRequest): |
|
|
text = payload.text.strip() |
|
|
if not text: |
|
|
raise HTTPException(status_code=400, detail="Text cannot be empty") |
|
|
|
|
|
inputs = tokenizer( |
|
|
text, |
|
|
return_tensors="pt", |
|
|
truncation=True, |
|
|
padding=True, |
|
|
max_length=128, |
|
|
) |
|
|
|
|
|
inputs = {k: v.to(device) for k, v in inputs.items()} |
|
|
|
|
|
with torch.no_grad(): |
|
|
outputs = model(**inputs) |
|
|
|
|
|
probs = torch.softmax(outputs.logits, dim=-1) |
|
|
pred_id = torch.argmax(probs, dim=-1).item() |
|
|
|
|
|
return { |
|
|
"emotion": model.config.id2label[pred_id], |
|
|
"confidence": round(probs[0][pred_id].item(), 4), |
|
|
} |
|
|
|