Spaces:
Running
Running
| from fastapi import FastAPI | |
| from pydantic import BaseModel | |
| import joblib | |
| import logging | |
| from huggingface_hub import hf_hub_download | |
| from transformers import pipeline | |
| logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s") | |
| app = FastAPI() | |
| models = None | |
| def load_models(): | |
| global models | |
| if models is not None: | |
| return | |
| logging.info("Loading models...") | |
| en_vectorizer = joblib.load(hf_hub_download("E-motionAssistant/Englsih_Trained_Model_LR", "tfidf_vectorizer.joblib")) | |
| en_classifier = joblib.load(hf_hub_download("E-motionAssistant/Englsih_Trained_Model_LR", "logreg_model.joblib")) | |
| en_label_encoder = joblib.load(hf_hub_download("E-motionAssistant/Englsih_Trained_Model_LR", "label_encoder.joblib")) | |
| si_vectorizer = joblib.load(hf_hub_download("E-motionAssistant/Sinhala_Text_Emotion_Model_LR", "tfidf_vectorizer.joblib")) | |
| si_classifier = joblib.load(hf_hub_download("E-motionAssistant/Sinhala_Text_Emotion_Model_LR", "logreg_model.joblib")) | |
| si_label_encoder = joblib.load(hf_hub_download("E-motionAssistant/Sinhala_Text_Emotion_Model_LR", "label_encoder.joblib")) | |
| tamil_pipe = pipeline("text-classification", model="E-motionAssistant/Tamil_Emotion_Recognition_Model", device=-1) | |
| models = (en_vectorizer, en_classifier, en_label_encoder, | |
| si_vectorizer, si_classifier, si_label_encoder, tamil_pipe) | |
| logging.info("✅ All models loaded.") | |
| def startup_event(): | |
| load_models() | |
| class PredictRequest(BaseModel): | |
| text: str | |
| language: str # "English", "Sinhala", or "Tamil" | |
| def root(): | |
| return {"status": "ok", "message": "Emotion Detector API is running"} | |
| def predict(req: PredictRequest): | |
| if not req.text.strip(): | |
| return {"error": "Text cannot be empty"} | |
| en_vec, en_clf, en_le, si_vec, si_clf, si_le, tamil_pipe = models | |
| try: | |
| if req.language == "English": | |
| X = en_vec.transform([req.text]) | |
| pred = en_clf.predict(X)[0] | |
| emotion = en_le.inverse_transform([pred])[0] | |
| return {"emotion": emotion, "language": "English"} | |
| elif req.language == "Sinhala": | |
| X = si_vec.transform([req.text]) | |
| pred = si_clf.predict(X)[0] | |
| emotion = si_le.inverse_transform([pred])[0] | |
| return {"emotion": emotion, "language": "Sinhala"} | |
| elif req.language == "Tamil": | |
| res = tamil_pipe(req.text)[0] | |
| return {"emotion": res["label"], "confidence": round(res["score"], 3), "language": "Tamil"} | |
| else: | |
| return {"error": f"Unsupported language: {req.language}"} | |
| except Exception as e: | |
| logging.error(f"Prediction error: {e}") | |
| return {"error": str(e)} |