Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from transformers import AutoTokenizer, AutoModelForSequenceClassification, TextClassificationPipeline | |
| import torch | |
| # Device setup | |
| # Use GPU if available, otherwise CPU (-1) | |
| DEVICE = 0 if torch.cuda.is_available() else -1 | |
| # Models configuration | |
| MODELS = { | |
| "FABSA": "Anudeep-Narala/fabsa-roberta-sentiment", | |
| "MoodMeter": "Priyanshuchaudhary2425/MoodMeter-sentimental-analysis", | |
| "Twitter": "cardiffnlp/twitter-roberta-base-sentiment-latest", | |
| } | |
| # Load models and pipelines once when the application starts | |
| pipes = {} | |
| for name, mid in MODELS.items(): | |
| try: | |
| tok = AutoTokenizer.from_pretrained(mid, use_fast=False) | |
| mdl = AutoModelForSequenceClassification.from_pretrained(mid) | |
| pipes[name] = TextClassificationPipeline(model=mdl, tokenizer=tok, device=DEVICE, top_k=None) | |
| except Exception as e: | |
| print(f"Error loading model {name}: {e}") | |
| pipes[name] = None # Indicate failure to load | |
| def normalize(scores): | |
| out = {"negative": 0.0, "neutral": 0.0, "positive": 0.0} | |
| for e in scores: | |
| lbl = e["label"].lower(); s = float(e["score"]) | |
| if "neg" in lbl or lbl == "label_0": out["negative"] = s | |
| elif "neu" in lbl or lbl == "label_1": out["neutral"] = s | |
| elif "pos" in lbl or lbl == "label_2": out["positive"] = s | |
| pred = max(out, key=out.get) | |
| return pred, out | |
| def run_models(text: str): | |
| text = (text or "").strip() | |
| if not text: # Handle empty text input | |
| return {"FABSA": {"label": "N/A", "scores": {}}, "MoodMeter": {"label": "N/A", "scores": {}}, "Twitter": {"label": "N/A", "scores": {}}, "Ensemble": {"label": "N/A"}, "text": ""} | |
| res = {} | |
| for name, pipe in pipes.items(): | |
| if pipe is None: # Skip if model failed to load | |
| res[name] = {"label": "Error: Model failed to load", "scores": {}} | |
| continue | |
| try: | |
| raw = pipe(text)[0] | |
| pred, probs = normalize(raw) | |
| res[name] = {"label": pred, "scores": probs} | |
| except Exception as e: | |
| # Handle potential errors during inference | |
| res[name] = {"label": f"Error during inference: {e}", "scores": {}} | |
| # Ensure Ensemble key exists even if other models had errors | |
| fabsa_label = res.get("FABSA", {}).get("label", "N/A") | |
| twitter_label = res.get("Twitter", {}).get("label", "N/A") | |
| if fabsa_label != "N/A" and twitter_label != "N/A" and "Error" not in fabsa_label and "Error" not in twitter_label: | |
| ensemble = "negative" if fabsa_label == "negative" else ("neutral" if twitter_label == "neutral" else twitter_label) | |
| else: | |
| ensemble = "N/A" | |
| res["Ensemble"] = {"label": ensemble} | |
| res["text"] = text | |
| return res | |
| def ui_fn(text: str): | |
| r = run_models(text) | |
| # Ensure all expected keys are present in the returned dictionary with default structures | |
| fabsa_output = r.get("FABSA", {"label": "N/A", "scores": {}}) | |
| moodmeter_output = r.get("MoodMeter", {"label": "N/A", "scores": {}}) | |
| twitter_output = r.get("Twitter", {"label": "N/A", "scores": {}}) | |
| ensemble_output = r.get("Ensemble", {"label": "N/A"}) | |
| # Ensure outputs are dictionaries for gr.JSON | |
| if not isinstance(fabsa_output, dict): fabsa_output = {"label": str(fabsa_output), "scores": {}} | |
| if not isinstance(moodmeter_output, dict): moodmeter_output = {"label": str(moodmeter_output), "scores": {}} | |
| if not isinstance(twitter_output, dict): twitter_output = {"label": str(twitter_output), "scores": {}} | |
| if not isinstance(ensemble_output, dict): ensemble_output = {"label": str(ensemble_output)} | |
| return fabsa_output, moodmeter_output, twitter_output, ensemble_output | |
| demo = gr.Interface( | |
| fn=ui_fn, | |
| inputs=gr.Textbox(label="Enter text", placeholder="Type something emotional…"), | |
| outputs=[gr.JSON(label="FABSA"), gr.JSON(label="MoodMeter"), gr.JSON(label="Twitter"), gr.JSON(label="Ensemble")], | |
| title="Mental Health Sentiment Analyzer (UI Only)", | |
| description="Gradio UI for Mental Health Sentiment Analysis.", | |
| ) | |
| if __name__ == "__main__": | |
| # For local runs; on Hugging Face Spaces (sdk: gradio) this block can be ignored | |
| demo.launch() |