opinder2906 commited on
Commit
7d9841e
·
verified ·
1 Parent(s): fe0d65e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -46
app.py CHANGED
@@ -10,7 +10,7 @@ import gradio as gr
10
  import speech_recognition as sr
11
  import json
12
 
13
- # --- Dummy vocab and label encoder ---
14
  vocab = {'<PAD>': 0, '<UNK>': 1, 'i': 2, 'am': 3, 'feeling': 4, 'sad': 5, 'happy': 6,
15
  'angry': 7, 'love': 8, 'stressed': 9, 'anxious': 10}
16
  MAX_LEN = 16
@@ -42,50 +42,36 @@ def preprocess_input(text):
42
  padded = encoded[:MAX_LEN] + [vocab['<PAD>']] * max(0, MAX_LEN - len(encoded))
43
  return torch.tensor([padded], dtype=torch.long)
44
 
45
- # --- Load solutions CSV from Google Drive ---
46
- file_id = "1yVJh_NVL4Y4YqEXGym47UCK5ZNZgVZYv" # Replace with your file ID
47
  url = f"https://drive.google.com/uc?export=download&id={file_id}"
48
  response = requests.get(url)
49
  csv_text = response.text
50
-
51
  if csv_text.strip().startswith('<'):
52
- raise Exception("ERROR: Google Drive link is not returning CSV! Check your sharing settings.")
53
-
54
  solutions_df = pd.read_csv(StringIO(csv_text), header=0, on_bad_lines='skip')
55
  used_solutions = {emotion: set() for emotion in solutions_df['emotion'].unique()}
56
 
57
- negative_words = [
58
- "not", "bad", "sad", "anxious", "anxiety", "depressed", "upset", "shit", "stress",
59
- "worried", "unwell", "struggling", "low", "down", "terrible", "awful",
60
- "nervous", "panic", "afraid", "scared", "tense", "overwhelmed", "fear", "uneasy"
61
- ]
62
 
63
  responses = {
64
- "sadness": [
65
- "It’s okay to feel down sometimes. I’m here to support you.",
66
- "I'm really sorry you're going through this. Want to talk more about it?",
67
- "You're not alone I’m here for you."
68
- ],
69
- "anger": [
70
- "That must have been frustrating. Want to vent about it?",
71
- "It's okay to feel this way. I'm listening.",
72
- "Would it help to talk through it?"
73
- ],
74
- "love": [
75
- "That’s beautiful to hear! What made you feel that way?",
76
- "Its amazing to experience moments like that.",
77
- "Sounds like something truly meaningful."
78
- ],
79
- "happiness": [
80
- "That's awesome! What’s bringing you joy today?",
81
- "I love hearing good news. 😊",
82
- "Yay! Want to share more about it?"
83
- ],
84
- "neutral": [
85
- "Got it. I’m here if you want to dive deeper.",
86
- "Thanks for sharing that. Tell me more if you’d like.",
87
- "I’m listening. How else can I support you?"
88
- ]
89
  }
90
 
91
  def get_unique_solution(emotion):
@@ -125,8 +111,7 @@ def audio_to_text(audio_file):
125
  with sr.AudioFile(audio_file) as source:
126
  audio = recog.record(source)
127
  try:
128
- text = recog.recognize_google(audio)
129
- return text
130
  except Exception:
131
  return ""
132
 
@@ -134,7 +119,6 @@ GLOBAL_CONVO_HISTORY = []
134
  USER_FEEDBACK_STATE = {}
135
 
136
  def emoti_chat(audio, text, history_json=""):
137
- # Get user input from voice or text
138
  if text and text.strip():
139
  user_input = text
140
  elif audio is not None:
@@ -147,11 +131,9 @@ def emoti_chat(audio, text, history_json=""):
147
 
148
  user_input = correct_spelling(user_input)
149
 
150
- # Exit phrases
151
  if user_input.lower().strip() in ["exit", "quit", "goodbye", "bye", "close"]:
152
  return "Take care! I’m here whenever you want to talk. 👋", json.dumps(GLOBAL_CONVO_HISTORY[-5:], indent=2), gr.update(visible=False)
153
 
154
- # Feedback handling
155
  user_id = "default_user"
156
  state = USER_FEEDBACK_STATE.get(user_id, {"emotion": None, "pending": False})
157
 
@@ -167,7 +149,6 @@ def emoti_chat(audio, text, history_json=""):
167
  USER_FEEDBACK_STATE[user_id] = {"emotion": None, "pending": False}
168
  return "How can I help you further?", json.dumps(GLOBAL_CONVO_HISTORY[-5:], indent=2), ""
169
 
170
- # Normal user message
171
  pred_emotion = get_emotion(user_input)
172
  support = random.choice(responses.get(pred_emotion, responses["neutral"]))
173
  try:
@@ -187,21 +168,19 @@ def emoti_chat(audio, text, history_json=""):
187
  USER_FEEDBACK_STATE[user_id] = {"emotion": pred_emotion, "pending": True}
188
  return reply, json.dumps(GLOBAL_CONVO_HISTORY[-5:], indent=2), ""
189
 
190
- import gradio as gr
191
-
192
  iface = gr.Interface(
193
  fn=emoti_chat,
194
  inputs=[
195
  gr.Audio(type="filepath", label="🎤 Speak your message"),
196
  gr.Textbox(lines=2, placeholder="Or type your message here...", label="💬 Type message"),
197
- gr.Textbox(lines=1, value="", visible=False) # Hidden, conversation history
198
  ],
199
  outputs=[
200
  gr.Textbox(label="EmotiBot Reply"),
201
  gr.Textbox(label="Conversation History (JSON)", visible=False)
202
  ],
203
  title="EmotiBot Connect",
204
- description="Talk to EmotiBot using your voice or by typing. Detects your emotion, gives dynamic suggestions, and keeps conversation history!"
205
  )
206
 
207
  iface.launch()
 
10
  import speech_recognition as sr
11
  import json
12
 
13
+ # Dummy vocab and label encoder
14
  vocab = {'<PAD>': 0, '<UNK>': 1, 'i': 2, 'am': 3, 'feeling': 4, 'sad': 5, 'happy': 6,
15
  'angry': 7, 'love': 8, 'stressed': 9, 'anxious': 10}
16
  MAX_LEN = 16
 
42
  padded = encoded[:MAX_LEN] + [vocab['<PAD>']] * max(0, MAX_LEN - len(encoded))
43
  return torch.tensor([padded], dtype=torch.long)
44
 
45
+ # Load solutions CSV from Google Drive
46
+ file_id = "1yVJh_NVL4Y4YqEXGym47UCK5ZNZgVZYv" # replace with your CSV file ID
47
  url = f"https://drive.google.com/uc?export=download&id={file_id}"
48
  response = requests.get(url)
49
  csv_text = response.text
 
50
  if csv_text.strip().startswith('<'):
51
+ raise Exception("ERROR: Google Drive link is not returning CSV! Check sharing settings.")
 
52
  solutions_df = pd.read_csv(StringIO(csv_text), header=0, on_bad_lines='skip')
53
  used_solutions = {emotion: set() for emotion in solutions_df['emotion'].unique()}
54
 
55
+ negative_words = ["not", "bad", "sad", "anxious", "anxiety", "depressed", "upset", "shit", "stress",
56
+ "worried", "unwell", "struggling", "low", "down", "terrible", "awful",
57
+ "nervous", "panic", "afraid", "scared", "tense", "overwhelmed", "fear", "uneasy"]
 
 
58
 
59
  responses = {
60
+ "sadness": ["It’s okay to feel down sometimes. I’m here to support you.",
61
+ "I'm really sorry you're going through this. Want to talk more about it?",
62
+ "You're not alone I’m here for you."],
63
+ "anger": ["That must have been frustrating. Want to vent about it?",
64
+ "It's okay to feel this way. I'm listening.",
65
+ "Would it help to talk through it?"],
66
+ "love": ["That’s beautiful to hear! What made you feel that way?",
67
+ "Its amazing to experience moments like that.",
68
+ "Sounds like something truly meaningful."],
69
+ "happiness": ["That's awesome! What’s bringing you joy today?",
70
+ "I love hearing good news. 😊",
71
+ "Yay! Want to share more about it?"],
72
+ "neutral": ["Got it. Im here if you want to dive deeper.",
73
+ "Thanks for sharing that. Tell me more if you’d like.",
74
+ "I’m listening. How else can I support you?"]
 
 
 
 
 
 
 
 
 
 
75
  }
76
 
77
  def get_unique_solution(emotion):
 
111
  with sr.AudioFile(audio_file) as source:
112
  audio = recog.record(source)
113
  try:
114
+ return recog.recognize_google(audio)
 
115
  except Exception:
116
  return ""
117
 
 
119
  USER_FEEDBACK_STATE = {}
120
 
121
  def emoti_chat(audio, text, history_json=""):
 
122
  if text and text.strip():
123
  user_input = text
124
  elif audio is not None:
 
131
 
132
  user_input = correct_spelling(user_input)
133
 
 
134
  if user_input.lower().strip() in ["exit", "quit", "goodbye", "bye", "close"]:
135
  return "Take care! I’m here whenever you want to talk. 👋", json.dumps(GLOBAL_CONVO_HISTORY[-5:], indent=2), gr.update(visible=False)
136
 
 
137
  user_id = "default_user"
138
  state = USER_FEEDBACK_STATE.get(user_id, {"emotion": None, "pending": False})
139
 
 
149
  USER_FEEDBACK_STATE[user_id] = {"emotion": None, "pending": False}
150
  return "How can I help you further?", json.dumps(GLOBAL_CONVO_HISTORY[-5:], indent=2), ""
151
 
 
152
  pred_emotion = get_emotion(user_input)
153
  support = random.choice(responses.get(pred_emotion, responses["neutral"]))
154
  try:
 
168
  USER_FEEDBACK_STATE[user_id] = {"emotion": pred_emotion, "pending": True}
169
  return reply, json.dumps(GLOBAL_CONVO_HISTORY[-5:], indent=2), ""
170
 
 
 
171
  iface = gr.Interface(
172
  fn=emoti_chat,
173
  inputs=[
174
  gr.Audio(type="filepath", label="🎤 Speak your message"),
175
  gr.Textbox(lines=2, placeholder="Or type your message here...", label="💬 Type message"),
176
+ gr.Textbox(lines=1, value="", visible=False)
177
  ],
178
  outputs=[
179
  gr.Textbox(label="EmotiBot Reply"),
180
  gr.Textbox(label="Conversation History (JSON)", visible=False)
181
  ],
182
  title="EmotiBot Connect",
183
+ description="Talk to EmotiBot using your voice or by typing. Detects your emotion, gives suggestions, and keeps history!"
184
  )
185
 
186
  iface.launch()