Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import torch | |
| import torch.nn as nn | |
| import pandas as pd | |
| import numpy as np | |
| import re | |
| from sklearn.preprocessing import LabelEncoder | |
| from collections import Counter | |
| from textblob import TextBlob | |
| import random | |
| # 1. Sample Data | |
| data = { | |
| 'text': [ | |
| "I'm so happy today!", | |
| "This makes me really angry", | |
| "Feeling the love", | |
| "I'm so sad right now", | |
| "Just a normal day" | |
| ], | |
| 'label': ["joy", "anger", "love", "sadness", "neutral"] | |
| } | |
| df = pd.DataFrame(data) | |
| # 2. Simple Model | |
| class EmotionModel(nn.Module): | |
| def __init__(self, vocab_size=100, embed_dim=32, num_classes=5): | |
| super().__init__() | |
| self.embedding = nn.Embedding(vocab_size, embed_dim) | |
| self.fc = nn.Linear(embed_dim, num_classes) | |
| def forward(self, x): | |
| x = self.embedding(x) | |
| return self.fc(x.mean(1)) | |
| # 3. Initialize Components | |
| def clean_text(text): | |
| return re.sub(r'[^a-z\s]', '', str(text).lower()) | |
| vocab = {word:i+2 for i,word in enumerate(set(word for text in df['text'] for word in clean_text(text).split()))} | |
| vocab['<PAD>'] = 0 | |
| vocab['<UNK>'] = 1 | |
| le = LabelEncoder() | |
| le.fit(df['label']) | |
| model = EmotionModel(len(vocab), num_classes=len(le.classes_)) | |
| model.eval() | |
| # 4. Prediction | |
| def predict(text): | |
| tokens = [vocab.get(word, vocab['<UNK>']) for word in clean_text(text).split()[:32]] | |
| inp = torch.tensor([tokens + [0]*(32-len(tokens))], dtype=torch.long) | |
| with torch.no_grad(): | |
| return le.classes_[model(inp).argmax().item()] | |
| # 5. Chat Interface | |
| def chat(message, history, waiting): | |
| if isinstance(waiting, bool) and waiting: | |
| if message.lower() == "continue": | |
| return "", history + [(message, "Ready to continue!")], False | |
| return "", history + [(message, "Still waiting... say 'continue'")], True | |
| if message.lower() in ["wait", "pause"]: | |
| return "", history + [(message, "I'll wait. Say 'continue' when ready")], True | |
| emotion = predict(message) | |
| responses = { | |
| "joy": ["Great to hear you're happy!", "😊"], | |
| "anger": ["I hear your frustration", "Would you like to talk about it?"], | |
| "love": ["Love is wonderful!", "❤️"], | |
| "sadness": ["I'm here for you", "It's okay to feel sad"], | |
| "neutral": ["I see", "Tell me more"] | |
| } | |
| reply = random.choice(responses.get(emotion, ["I understand"])) | |
| return "", history + [(message, reply)], False | |
| # 6. Gradio App | |
| with gr.Blocks() as app: | |
| gr.Markdown("# Emotion Chatbot") | |
| chatbot = gr.Chatbot() | |
| message = gr.Textbox(label="Your Message") | |
| state = gr.State(value=False) # Initialize with explicit value | |
| message.submit( | |
| chat, | |
| inputs=[message, chatbot, state], | |
| outputs=[message, chatbot, state] | |
| ) | |
| gr.Button("Send").click( | |
| chat, | |
| inputs=[message, chatbot, state], | |
| outputs=[message, chatbot, state] | |
| ) | |
| app.launch() |