Spaces:
Running
Running
File size: 3,885 Bytes
6dbcfca efd64a8 6dbcfca 623acdb 07f4880 efd64a8 46b54ff efd64a8 6dbcfca d90884e 6dbcfca 3b41f32 6dbcfca 623acdb 07f4880 6dbcfca 623acdb 6dbcfca 623acdb 6dbcfca 87f57b2 623acdb d827e6a 623acdb 87f57b2 623acdb 87f57b2 623acdb 6dbcfca 07f4880 6dbcfca 87f57b2 6dbcfca 46b54ff c55fa26 623acdb 6dbcfca 623acdb 6dbcfca 623acdb 6dbcfca 623acdb 6dbcfca 623acdb 6dbcfca 623acdb 6dbcfca 87f57b2 c55fa26 623acdb 87f57b2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 | from fastapi import FastAPI
from pydantic import BaseModel
import joblib
import logging
import json
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import pipeline
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
app = FastAPI()
models = None
en_emotion_map = None # Will store the json mapping
def load_models():
global models, en_emotion_map
if models is not None:
return
logging.info("Loading models...")
# ====================== NEW ENGLISH MODEL ======================
en_repo = "E-motionAssistant/English_LR_Model_New"
en_vectorizer = joblib.load(hf_hub_download(en_repo, "tfidf_vectorizer.joblib"))
en_classifier = joblib.load(hf_hub_download(en_repo, "logreg_model.joblib"))
en_label_encoder = joblib.load(hf_hub_download(en_repo, "label_encoder.joblib"))
# Load emotion_map.json
try:
map_path = hf_hub_download(en_repo, "emotion_map.json")
with open(map_path, "r", encoding="utf-8") as f:
en_emotion_map = json.load(f)
logging.info("✅ emotion_map.json loaded successfully for English")
except Exception as e:
logging.warning(f"Failed to load emotion_map.json: {e}")
en_emotion_map = None
# ====================== Sinhala Model ======================
si_vectorizer = joblib.load(hf_hub_download("E-motionAssistant/Sinhala_Text_Emotion_Model_LR", "tfidf_vectorizer.joblib"))
si_classifier = joblib.load(hf_hub_download("E-motionAssistant/Sinhala_Text_Emotion_Model_LR", "logreg_model.joblib"))
si_label_encoder = joblib.load(hf_hub_download("E-motionAssistant/Sinhala_Text_Emotion_Model_LR", "label_encoder.joblib"))
# ====================== Tamil Model ======================
tamil_pipe = pipeline("text-classification",
model="E-motionAssistant/Tamil_Emotion_Recognition_Model",
device=-1)
models = (en_vectorizer, en_classifier, en_label_encoder,
si_vectorizer, si_classifier, si_label_encoder, tamil_pipe)
logging.info("✅ All models loaded successfully.")
@app.on_event("startup")
def startup_event():
load_models()
class PredictRequest(BaseModel):
text: str
language: str
@app.get("/")
def root():
return {"status": "ok", "message": "Emotion Detector API is running"}
@app.post("/predict")
def predict(req: PredictRequest):
if not req.text.strip():
return {"error": "Text cannot be empty"}
en_vec, en_clf, en_le, si_vec, si_clf, si_le, tamil_pipe = models
try:
lang = req.language.lower()
if lang == "english":
X = en_vec.transform([req.text])
pred = int(en_clf.predict(X)[0]) # convert to int
# Use emotion_map.json (preferred for new model)
if en_emotion_map is not None:
emotion = en_emotion_map.get(str(pred), f"unknown_{pred}")
else:
# fallback
emotion = en_le.inverse_transform([pred])[0]
return {"emotion": emotion, "language": "English"}
elif lang == "sinhala":
X = si_vec.transform([req.text])
pred = si_clf.predict(X)[0]
emotion = si_le.inverse_transform([pred])[0]
return {"emotion": str(emotion), "language": "Sinhala"}
elif lang == "tamil":
res = tamil_pipe(req.text)[0]
return {
"emotion": res["label"],
"confidence": round(float(res["score"]), 3),
"language": "Tamil"
}
else:
return {"error": f"Unsupported language: {req.language}"}
except Exception as e:
logging.error(f"Prediction error: {type(e).__name__} - {e}")
return {"error": "Prediction failed. Please try again."} |