import gradio as gr from transformers import AutoTokenizer, AutoModelForSequenceClassification import torch # Model name (you can swap this for another emotion model if you like) model_name = "j-hartmann/emotion-english-distilroberta-base" #minoosh/finetuned_bert-base-on-IEMOCAP_1 # Device device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # Load tokenizer and model tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForSequenceClassification.from_pretrained(model_name).to(device) model.eval() # Prediction function def predict_emotion(text: str): # Handle empty input if not text or not text.strip(): return {"Error": "Please enter some text."} # Tokenize inputs = tokenizer( text, return_tensors="pt", truncation=True, padding=True, max_length=256, # you can adjust this if needed ).to(device) with torch.no_grad(): outputs = model(**inputs) probs = outputs.logits.softmax(dim=-1)[0] # Map id -> label using model config id2label = model.config.id2label scores = {id2label[i]: float(probs[i]) for i in range(len(probs))} # Sort by highest probability first (optional but nice in the UI) scores = dict(sorted(scores.items(), key=lambda x: x[1], reverse=True)) return scores # Gradio interface demo = gr.Interface( fn=predict_emotion, inputs=gr.Textbox(lines=4, label="Enter text"), outputs=gr.Label(label="Emotion Probabilities"), title="Emotion Classifier", description="Enter a sentence and see the predicted emotion distribution.", flagging_mode="never", ) if __name__ == "__main__": demo.launch()