File size: 1,651 Bytes
e5b4d1c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
import gradio as gr
from transformers import AutoTokenizer, AutoModelForSequenceClassification
from transformers import pipeline
import torch


pipe = pipeline("text-classification", model="s1143700/Internship")

tokenizer = AutoTokenizer.from_pretrained("s1143700/Internship")
model = AutoModelForSequenceClassification.from_pretrained("s1143700/Internship")


def preprocess_text(text):
    inputs = tokenizer(
        text,
        truncation=True,
        padding="max_length",
        max_length=100,
        return_tensors="pt"
    )
    return inputs

def predict_semantics(text):
    # Tokenize
    inputs = preprocess_text(text)
    
    # Forward pass
    with torch.no_grad():
        outputs = model(**inputs)
    
    # Get probabilities
    logits = outputs.logits
    probabilities = torch.softmax(logits, dim=1)
    
    # Create a label dictionary for Gradio
    emotion_labels = ["sadness", "joy", "love", "anger", "fear", "surprise"]
    result = {
        emotion_labels[i]: float(probabilities[0][i])
        for i in range(len(emotion_labels))
    }
    
    return result

# Create the interface
iface = gr.Interface(
    fn=predict_semantics,
    inputs=gr.Textbox(label="Input Text", placeholder="Enter your text here..."),
    outputs=gr.Label(label="Emotion Probabilities"),
    title="Emotion Analysis",
    description="Enter text to analyze its emotional content (sadness, joy, love, anger, fear, surprise).",
    examples=[
        ["I'm so happy today!"],
        ["This situation makes me anxious"],
        ["I feel loved by my family"],
        ["That movie scared me"]
    ]
)

# Launch the interface
iface.launch()