opinder2906 commited on
Commit
9377b1e
·
verified ·
1 Parent(s): 76b1088

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +219 -0
app.py CHANGED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+ import random
5
+ from textblob import TextBlob
6
+ import pandas as pd
7
+ import requests
8
+ from io import StringIO
9
+ import gradio as gr
10
+ import speech_recognition as sr
11
+ import json
12
+ import re
13
+ from sklearn.preprocessing import LabelEncoder
14
+ from collections import Counter
15
+ import numpy as np
16
+
17
+ # ---------------------------
18
+ # Prepare dummy vocab, label encoder, and model (replace with real model if you want)
19
+ vocab = {'<PAD>': 0, '<UNK>': 1, 'i': 2, 'am': 3, 'feeling': 4, 'sad': 5, 'happy': 6, 'angry': 7, 'love': 8, 'stressed': 9, 'anxious': 10}
20
+ MAX_LEN = 16
21
+
22
+ class DummyLabelEncoder:
23
+ def __init__(self):
24
+ self.classes_ = ['sadness', 'anger', 'love', 'happiness', 'neutral']
25
+ def transform(self, x): return [self.classes_.index(i) for i in x]
26
+ def inverse_transform(self, x): return [self.classes_[i] for i in x]
27
+
28
+ le = DummyLabelEncoder()
29
+
30
+ class DummyModel(nn.Module):
31
+ def __init__(self):
32
+ super().__init__()
33
+ self.embedding = nn.Embedding(len(vocab), 8)
34
+ self.fc = nn.Linear(8, len(le.classes_))
35
+ def forward(self, x):
36
+ x = self.embedding(x)
37
+ x = x.mean(dim=1)
38
+ return self.fc(x)
39
+
40
+ model = DummyModel()
41
+
42
+ def preprocess_input(text):
43
+ tokens = text.lower().split()
44
+ encoded = [vocab.get(token, vocab['<UNK>']) for token in tokens]
45
+ padded = encoded[:MAX_LEN] + [vocab['<PAD>']] * max(0, MAX_LEN - len(encoded))
46
+ return torch.tensor([padded], dtype=torch.long).to(next(model.parameters()).device)
47
+
48
+ # ---------------------------
49
+ # Load solutions CSV from URL (you can update link if you want)
50
+ file_id = "1yVJh_NVL4Y4YqEXGym47UCK5ZNZgVZYv"
51
+ url = f"https://drive.google.com/uc?export=download&id={file_id}"
52
+ response = requests.get(url)
53
+ csv_text = response.text
54
+
55
+ if csv_text.strip().startswith('<'):
56
+ raise Exception("ERROR: Google Drive link is not returning CSV! Check your sharing settings.")
57
+
58
+ solutions_df = pd.read_csv(StringIO(csv_text), header=0, on_bad_lines='skip')
59
+
60
+ used_solutions = {emotion: set() for emotion in solutions_df['emotion'].unique()}
61
+
62
+ negative_words = [
63
+ "not", "bad", "sad", "anxious", "anxiety", "depressed", "upset", "shit", "stress",
64
+ "worried", "unwell", "struggling", "low", "down", "terrible", "awful",
65
+ "nervous", "panic", "afraid", "scared", "tense", "overwhelmed", "fear", "uneasy"
66
+ ]
67
+
68
+ responses = {
69
+ "sadness": [
70
+ "It’s okay to feel down sometimes. I’m here to support you.",
71
+ "I'm really sorry you're going through this. Want to talk more about it?",
72
+ "You're not alone — I’m here for you."
73
+ ],
74
+ "anger": [
75
+ "That must have been frustrating. Want to vent about it?",
76
+ "It's okay to feel this way. I'm listening.",
77
+ "Would it help to talk through it?"
78
+ ],
79
+ "love": [
80
+ "That’s beautiful to hear! What made you feel that way?",
81
+ "It’s amazing to experience moments like that.",
82
+ "Sounds like something truly meaningful."
83
+ ],
84
+ "happiness": [
85
+ "That's awesome! What’s bringing you joy today?",
86
+ "I love hearing good news. 😊",
87
+ "Yay! Want to share more about it?"
88
+ ],
89
+ "neutral": [
90
+ "Got it. I’m here if you want to dive deeper.",
91
+ "Thanks for sharing that. Tell me more if you’d like.",
92
+ "I’m listening. How else can I support you?"
93
+ ]
94
+ }
95
+
96
+ def get_unique_solution(emotion):
97
+ available = solutions_df[solutions_df['emotion'] == emotion]
98
+ unused = available[~available['solution'].isin(used_solutions[emotion])]
99
+ if unused.empty:
100
+ used_solutions[emotion] = set()
101
+ unused = available
102
+ solution_row = unused.sample(1).iloc[0]
103
+ used_solutions[emotion].add(solution_row['solution'])
104
+ return solution_row['solution']
105
+
106
+ def correct_spelling(text):
107
+ return str(TextBlob(text).correct())
108
+
109
+ def get_sentiment(text):
110
+ blob = TextBlob(text)
111
+ return blob.sentiment.polarity
112
+
113
+ def is_negative_input(text):
114
+ text_lower = text.lower()
115
+ return any(word in text_lower for word in negative_words)
116
+
117
+ def get_emotion(user_input):
118
+ if is_negative_input(user_input):
119
+ return "sadness"
120
+ sentiment = get_sentiment(user_input)
121
+ x = preprocess_input(user_input)
122
+ model.train()
123
+ with torch.no_grad():
124
+ probs = torch.stack([F.softmax(model(x), dim=1) for _ in range(5)])
125
+ avg_probs = probs.mean(dim=0)
126
+ prob, idx = torch.max(avg_probs, dim=1)
127
+ pred_emotion = le.classes_[idx.item()]
128
+ if prob.item() < 0.6:
129
+ return "neutral"
130
+ if sentiment < -0.25 and pred_emotion == "happiness":
131
+ return "sadness"
132
+ if sentiment > 0.25 and pred_emotion == "sadness":
133
+ return "happiness"
134
+ return pred_emotion
135
+
136
+ def audio_to_text(audio_file):
137
+ if audio_file is None:
138
+ return ""
139
+ recog = sr.Recognizer()
140
+ with sr.AudioFile(audio_file) as source:
141
+ audio = recog.record(source)
142
+ try:
143
+ text = recog.recognize_google(audio)
144
+ return text
145
+ except Exception:
146
+ return ""
147
+
148
+ # Chatbot conversation history and feedback state
149
+ GLOBAL_CONVO_HISTORY = []
150
+ USER_FEEDBACK_STATE = {}
151
+
152
+ def emoti_chat(audio, text, history_json=""):
153
+ if text and text.strip():
154
+ user_input = text
155
+ elif audio is not None:
156
+ user_input = audio_to_text(audio)
157
+ else:
158
+ user_input = ""
159
+ if not user_input.strip():
160
+ return "Please say something or type your message.", json.dumps(GLOBAL_CONVO_HISTORY[-5:], indent=2), ""
161
+
162
+ user_input = correct_spelling(user_input)
163
+
164
+ # Exit phrases
165
+ exit_phrases = ["exit", "quit", "goodbye", "bye", "close"]
166
+ if user_input.lower().strip() in exit_phrases:
167
+ return "Take care! I’m here whenever you want to talk. 👋", json.dumps(GLOBAL_CONVO_HISTORY[-5:], indent=2), gr.update(visible=False)
168
+
169
+ # Feedback logic
170
+ user_id = "default_user"
171
+ state = USER_FEEDBACK_STATE.get(user_id, {"emotion": None, "pending": False})
172
+
173
+ if state["pending"]:
174
+ feedback = user_input.lower().strip()
175
+ GLOBAL_CONVO_HISTORY[-1]["feedback"] = feedback
176
+ if feedback == "no":
177
+ suggestion = get_unique_solution(state["emotion"])
178
+ reply = f"Here's another suggestion for you: {suggestion}\nDid this help? (yes/no/skip)"
179
+ USER_FEEDBACK_STATE[user_id]["pending"] = True
180
+ return reply, json.dumps(GLOBAL_CONVO_HISTORY[-5:], indent=2), ""
181
+ else:
182
+ USER_FEEDBACK_STATE[user_id] = {"emotion": None, "pending": False}
183
+ return "How can I help you further?", json.dumps(GLOBAL_CONVO_HISTORY[-5:], indent=2), ""
184
+
185
+ pred_emotion = get_emotion(user_input)
186
+ support = random.choice(responses.get(pred_emotion, responses["neutral"]))
187
+ try:
188
+ suggestion = get_unique_solution(pred_emotion)
189
+ except Exception:
190
+ suggestion = get_unique_solution("neutral")
191
+
192
+ reply = f"{support}\n\nHere's a suggestion for you: {suggestion}\nDid this help? (yes/no/skip)"
193
+ GLOBAL_CONVO_HISTORY.append({
194
+ "user_input": user_input,
195
+ "emotion": pred_emotion,
196
+ "bot_support": support,
197
+ "bot_suggestion": suggestion,
198
+ "feedback": ""
199
+ })
200
+ USER_FEEDBACK_STATE[user_id] = {"emotion": pred_emotion, "pending": True}
201
+ return reply, json.dumps(GLOBAL_CONVO_HISTORY[-5:], indent=2), ""
202
+
203
+ # Gradio Interface
204
+ iface = gr.Interface(
205
+ fn=emoti_chat,
206
+ inputs=[
207
+ gr.Audio(type="filepath", label="🎤 Speak your message"),
208
+ gr.Textbox(lines=2, placeholder="Or type your message here...", label="💬 Type message"),
209
+ gr.Textbox(lines=1, value="", visible=False) # Hidden history state
210
+ ],
211
+ outputs=[
212
+ gr.Textbox(label="EmotiBot Reply"),
213
+ gr.Textbox(label="Hidden", visible=False)
214
+ ],
215
+ title="EmotiBot Connect",
216
+ description="Talk to EmotiBot using your voice or by typing. Detects your emotion and offers dynamic support."
217
+ )
218
+
219
+ iface.launch()