File size: 4,184 Bytes
3bbf888
5d2e3a4
3bbf888
 
5d2e3a4
4279bdf
3bbf888
5d2e3a4
 
3bbf888
 
a6d7a94
 
3bbf888
 
4279bdf
 
 
 
 
 
 
 
 
 
 
5d2e3a4
3bbf888
 
c4d8711
 
 
 
3bbf888
 
 
5d2e3a4
4a56444
5d2e3a4
 
 
a6d7a94
4279bdf
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5d2e3a4
 
a6d7a94
 
 
3ad8b31
5d2e3a4
4279bdf
 
 
 
 
 
 
 
 
 
 
3bbf888
4279bdf
5d2e3a4
4a56444
3bbf888
4a56444
a6d7a94
4279bdf
 
3bbf888
 
4279bdf
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
import gradio as gr
from transformers import AutoTokenizer, AutoModelForSequenceClassification, TextClassificationPipeline
import torch

# Device setup
# Use GPU if available, otherwise CPU (-1)
DEVICE = 0 if torch.cuda.is_available() else -1

# Models configuration
MODELS = {
    "FABSA": "Anudeep-Narala/fabsa-roberta-sentiment",
    "MoodMeter": "Priyanshuchaudhary2425/MoodMeter-sentimental-analysis",
    "Twitter": "cardiffnlp/twitter-roberta-base-sentiment-latest",
}

# Load models and pipelines once when the application starts
pipes = {}
for name, mid in MODELS.items():
    try:
        tok = AutoTokenizer.from_pretrained(mid, use_fast=False)
        mdl = AutoModelForSequenceClassification.from_pretrained(mid)
        pipes[name] = TextClassificationPipeline(model=mdl, tokenizer=tok, device=DEVICE, top_k=None)
    except Exception as e:
        print(f"Error loading model {name}: {e}")
        pipes[name] = None # Indicate failure to load

def normalize(scores):
    out = {"negative": 0.0, "neutral": 0.0, "positive": 0.0}
    for e in scores:
        lbl = e["label"].lower(); s = float(e["score"])
        if "neg" in lbl or lbl == "label_0": out["negative"] = s
        elif "neu" in lbl or lbl == "label_1": out["neutral"] = s
        elif "pos" in lbl or lbl == "label_2": out["positive"] = s
    pred = max(out, key=out.get)
    return pred, out

def run_models(text: str):
    text = (text or "").strip()
    if not text: # Handle empty text input
        return {"FABSA": {"label": "N/A", "scores": {}}, "MoodMeter": {"label": "N/A", "scores": {}}, "Twitter": {"label": "N/A", "scores": {}}, "Ensemble": {"label": "N/A"}, "text": ""}

    res = {}
    for name, pipe in pipes.items():
        if pipe is None: # Skip if model failed to load
             res[name] = {"label": "Error: Model failed to load", "scores": {}}
             continue
        try:
            raw = pipe(text)[0]
            pred, probs = normalize(raw)
            res[name] = {"label": pred, "scores": probs}
        except Exception as e:
            # Handle potential errors during inference
            res[name] = {"label": f"Error during inference: {e}", "scores": {}}


    # Ensure Ensemble key exists even if other models had errors
    fabsa_label = res.get("FABSA", {}).get("label", "N/A")
    twitter_label = res.get("Twitter", {}).get("label", "N/A")

    if fabsa_label != "N/A" and twitter_label != "N/A" and "Error" not in fabsa_label and "Error" not in twitter_label:
         ensemble = "negative" if fabsa_label == "negative" else ("neutral" if twitter_label == "neutral" else twitter_label)
    else:
         ensemble = "N/A"

    res["Ensemble"] = {"label": ensemble}
    res["text"] = text
    return res

def ui_fn(text: str):
    r = run_models(text)
    # Ensure all expected keys are present in the returned dictionary with default structures
    fabsa_output = r.get("FABSA", {"label": "N/A", "scores": {}})
    moodmeter_output = r.get("MoodMeter", {"label": "N/A", "scores": {}})
    twitter_output = r.get("Twitter", {"label": "N/A", "scores": {}})
    ensemble_output = r.get("Ensemble", {"label": "N/A"})

    # Ensure outputs are dictionaries for gr.JSON
    if not isinstance(fabsa_output, dict): fabsa_output = {"label": str(fabsa_output), "scores": {}}
    if not isinstance(moodmeter_output, dict): moodmeter_output = {"label": str(moodmeter_output), "scores": {}}
    if not isinstance(twitter_output, dict): twitter_output = {"label": str(twitter_output), "scores": {}}
    if not isinstance(ensemble_output, dict): ensemble_output = {"label": str(ensemble_output)}

    return fabsa_output, moodmeter_output, twitter_output, ensemble_output

demo = gr.Interface(
    fn=ui_fn,
    inputs=gr.Textbox(label="Enter text", placeholder="Type something emotional…"),
    outputs=[gr.JSON(label="FABSA"), gr.JSON(label="MoodMeter"), gr.JSON(label="Twitter"), gr.JSON(label="Ensemble")],
    title="Mental Health Sentiment Analyzer (UI Only)",
    description="Gradio UI for Mental Health Sentiment Analysis.",
)

if __name__ == "__main__":
    # For local runs; on Hugging Face Spaces (sdk: gradio) this block can be ignored
    demo.launch()