opinder2906 commited on
Commit
fe0d65e
·
verified ·
1 Parent(s): 8ef5231

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +87 -85
app.py CHANGED
@@ -1,16 +1,18 @@
1
- import streamlit as st
2
  import torch
 
3
  import torch.nn.functional as F
 
4
  from textblob import TextBlob
5
  import pandas as pd
6
  import requests
7
  from io import StringIO
 
8
  import speech_recognition as sr
9
- import random
10
-
11
- # --- Your Dummy Model and Helpers (same as before) ---
12
 
13
- vocab = {'<PAD>': 0, '<UNK>': 1, 'i': 2, 'am': 3, 'feeling': 4, 'sad': 5, 'happy': 6, 'angry': 7, 'love': 8, 'stressed': 9, 'anxious': 10}
 
 
14
  MAX_LEN = 16
15
 
16
  class DummyLabelEncoder:
@@ -21,17 +23,18 @@ class DummyLabelEncoder:
21
 
22
  le = DummyLabelEncoder()
23
 
24
- class DummyModel(torch.nn.Module):
25
  def __init__(self):
26
  super().__init__()
27
- self.embedding = torch.nn.Embedding(len(vocab), 8)
28
- self.fc = torch.nn.Linear(8, len(le.classes_))
29
  def forward(self, x):
30
  x = self.embedding(x)
31
  x = x.mean(dim=1)
32
  return self.fc(x)
33
 
34
  model = DummyModel()
 
35
 
36
  def preprocess_input(text):
37
  tokens = text.lower().split()
@@ -39,26 +42,16 @@ def preprocess_input(text):
39
  padded = encoded[:MAX_LEN] + [vocab['<PAD>']] * max(0, MAX_LEN - len(encoded))
40
  return torch.tensor([padded], dtype=torch.long)
41
 
42
- # Load CSV from Google Drive
43
- file_id = "1yVJh_NVL4Y4qEXGym47UCK5ZNZgVZYv"
44
  url = f"https://drive.google.com/uc?export=download&id={file_id}"
45
  response = requests.get(url)
46
  csv_text = response.text
47
 
48
- # Read CSV and clean column names
49
- solutions_df = pd.read_csv(StringIO(csv_text), header=0, on_bad_lines='skip')
50
- solutions_df.columns = solutions_df.columns.str.strip().str.lower() # clean columns
51
-
52
- # Show debug info in Streamlit UI (remove later if you want)
53
- st.write("Columns found in CSV:", list(solutions_df.columns))
54
- st.write("First few rows of CSV:")
55
- st.write(solutions_df.head())
56
-
57
- # Stop if 'emotion' column missing
58
- if 'emotion' not in solutions_df.columns:
59
- st.error("CSV is missing the 'emotion' column. Please check your file or rename the column.")
60
- st.stop()
61
 
 
62
  used_solutions = {emotion: set() for emotion in solutions_df['emotion'].unique()}
63
 
64
  negative_words = [
@@ -108,10 +101,6 @@ def get_unique_solution(emotion):
108
  def correct_spelling(text):
109
  return str(TextBlob(text).correct())
110
 
111
- def get_sentiment(text):
112
- blob = TextBlob(text)
113
- return blob.sentiment.polarity
114
-
115
  def is_negative_input(text):
116
  text_lower = text.lower()
117
  return any(word in text_lower for word in negative_words)
@@ -119,20 +108,14 @@ def is_negative_input(text):
119
  def get_emotion(user_input):
120
  if is_negative_input(user_input):
121
  return "sadness"
122
- sentiment = get_sentiment(user_input)
123
  x = preprocess_input(user_input)
124
- model.eval()
125
  with torch.no_grad():
126
- probs = torch.stack([F.softmax(model(x), dim=1) for _ in range(5)])
127
- avg_probs = probs.mean(dim=0)
128
- prob, idx = torch.max(avg_probs, dim=1)
129
  pred_emotion = le.classes_[idx.item()]
130
  if prob.item() < 0.6:
131
  return "neutral"
132
- if sentiment < -0.25 and pred_emotion == "happiness":
133
- return "sadness"
134
- if sentiment > 0.25 and pred_emotion == "sadness":
135
- return "happiness"
136
  return pred_emotion
137
 
138
  def audio_to_text(audio_file):
@@ -147,59 +130,78 @@ def audio_to_text(audio_file):
147
  except Exception:
148
  return ""
149
 
150
- # --- Streamlit UI and Logic ---
 
151
 
152
- st.title("EmotiBot Connect (Streamlit)")
153
-
154
- if "history" not in st.session_state:
155
- st.session_state.history = []
 
 
 
 
156
 
157
- # User input
158
- audio_input = st.file_uploader("🎤 Upload audio message (wav, mp3)", type=["wav", "mp3"])
159
- text_input = st.text_input("💬 Or type your message here")
160
 
161
- if st.button("Send"):
162
 
163
- # Get text from audio or text input
164
- if text_input.strip():
165
- user_input = text_input
166
- elif audio_input is not None:
167
- user_input = audio_to_text(audio_input)
168
- if not user_input:
169
- st.warning("Sorry, could not recognize speech from audio.")
170
- user_input = ""
171
- else:
172
- user_input = ""
173
 
174
- if user_input.strip() == "":
175
- st.warning("Please say something or type your message.")
176
- else:
177
- # Correct spelling
178
- user_input_corrected = correct_spelling(user_input)
179
 
180
- # Handle exit phrases
181
- if user_input_corrected.lower() in ["exit", "quit", "goodbye", "bye", "close"]:
182
- st.success("Take care! I’m here whenever you want to talk. 👋")
 
 
 
 
 
183
  else:
184
- # Get emotion and response
185
- pred_emotion = get_emotion(user_input_corrected)
186
- support = random.choice(responses.get(pred_emotion, responses["neutral"]))
187
- try:
188
- suggestion = get_unique_solution(pred_emotion)
189
- except Exception:
190
- suggestion = get_unique_solution("neutral")
191
-
192
- reply = f"{support}\n\nHere's a suggestion for you: {suggestion}\nDid this help? (yes/no/skip)"
193
-
194
- # Save in history
195
- st.session_state.history.append({
196
- "user": user_input_corrected,
197
- "emotion": pred_emotion,
198
- "bot": reply,
199
- "feedback": ""
200
- })
201
-
202
- # Show conversation history (last 5)
203
- for chat in st.session_state.history[-5:]:
204
- st.markdown(f"**You:** {chat['user']}")
205
- st.markdown(f"**EmotiBot:** {chat['bot']}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import torch
2
+ import torch.nn as nn
3
  import torch.nn.functional as F
4
+ import random
5
  from textblob import TextBlob
6
  import pandas as pd
7
  import requests
8
  from io import StringIO
9
+ import gradio as gr
10
  import speech_recognition as sr
11
+ import json
 
 
12
 
13
+ # --- Dummy vocab and label encoder ---
14
+ vocab = {'<PAD>': 0, '<UNK>': 1, 'i': 2, 'am': 3, 'feeling': 4, 'sad': 5, 'happy': 6,
15
+ 'angry': 7, 'love': 8, 'stressed': 9, 'anxious': 10}
16
  MAX_LEN = 16
17
 
18
  class DummyLabelEncoder:
 
23
 
24
  le = DummyLabelEncoder()
25
 
26
+ class DummyModel(nn.Module):
27
  def __init__(self):
28
  super().__init__()
29
+ self.embedding = nn.Embedding(len(vocab), 8)
30
+ self.fc = nn.Linear(8, len(le.classes_))
31
  def forward(self, x):
32
  x = self.embedding(x)
33
  x = x.mean(dim=1)
34
  return self.fc(x)
35
 
36
  model = DummyModel()
37
+ model.eval()
38
 
39
  def preprocess_input(text):
40
  tokens = text.lower().split()
 
42
  padded = encoded[:MAX_LEN] + [vocab['<PAD>']] * max(0, MAX_LEN - len(encoded))
43
  return torch.tensor([padded], dtype=torch.long)
44
 
45
+ # --- Load solutions CSV from Google Drive ---
46
+ file_id = "1yVJh_NVL4Y4YqEXGym47UCK5ZNZgVZYv" # Replace with your file ID
47
  url = f"https://drive.google.com/uc?export=download&id={file_id}"
48
  response = requests.get(url)
49
  csv_text = response.text
50
 
51
+ if csv_text.strip().startswith('<'):
52
+ raise Exception("ERROR: Google Drive link is not returning CSV! Check your sharing settings.")
 
 
 
 
 
 
 
 
 
 
 
53
 
54
+ solutions_df = pd.read_csv(StringIO(csv_text), header=0, on_bad_lines='skip')
55
  used_solutions = {emotion: set() for emotion in solutions_df['emotion'].unique()}
56
 
57
  negative_words = [
 
101
  def correct_spelling(text):
102
  return str(TextBlob(text).correct())
103
 
 
 
 
 
104
  def is_negative_input(text):
105
  text_lower = text.lower()
106
  return any(word in text_lower for word in negative_words)
 
108
  def get_emotion(user_input):
109
  if is_negative_input(user_input):
110
  return "sadness"
 
111
  x = preprocess_input(user_input)
 
112
  with torch.no_grad():
113
+ logits = model(x)
114
+ probs = F.softmax(logits, dim=1)
115
+ prob, idx = torch.max(probs, dim=1)
116
  pred_emotion = le.classes_[idx.item()]
117
  if prob.item() < 0.6:
118
  return "neutral"
 
 
 
 
119
  return pred_emotion
120
 
121
  def audio_to_text(audio_file):
 
130
  except Exception:
131
  return ""
132
 
133
+ GLOBAL_CONVO_HISTORY = []
134
+ USER_FEEDBACK_STATE = {}
135
 
136
+ def emoti_chat(audio, text, history_json=""):
137
+ # Get user input from voice or text
138
+ if text and text.strip():
139
+ user_input = text
140
+ elif audio is not None:
141
+ user_input = audio_to_text(audio)
142
+ else:
143
+ user_input = ""
144
 
145
+ if not user_input.strip():
146
+ return "Please say something or type your message.", json.dumps(GLOBAL_CONVO_HISTORY[-5:], indent=2), ""
 
147
 
148
+ user_input = correct_spelling(user_input)
149
 
150
+ # Exit phrases
151
+ if user_input.lower().strip() in ["exit", "quit", "goodbye", "bye", "close"]:
152
+ return "Take care! I’m here whenever you want to talk. 👋", json.dumps(GLOBAL_CONVO_HISTORY[-5:], indent=2), gr.update(visible=False)
 
 
 
 
 
 
 
153
 
154
+ # Feedback handling
155
+ user_id = "default_user"
156
+ state = USER_FEEDBACK_STATE.get(user_id, {"emotion": None, "pending": False})
 
 
157
 
158
+ if state["pending"]:
159
+ feedback = user_input.lower().strip()
160
+ GLOBAL_CONVO_HISTORY[-1]["feedback"] = feedback
161
+ if feedback == "no":
162
+ suggestion = get_unique_solution(state["emotion"])
163
+ reply = f"Here's another suggestion for you: {suggestion}\nDid this help? (yes/no/skip)"
164
+ USER_FEEDBACK_STATE[user_id]["pending"] = True
165
+ return reply, json.dumps(GLOBAL_CONVO_HISTORY[-5:], indent=2), ""
166
  else:
167
+ USER_FEEDBACK_STATE[user_id] = {"emotion": None, "pending": False}
168
+ return "How can I help you further?", json.dumps(GLOBAL_CONVO_HISTORY[-5:], indent=2), ""
169
+
170
+ # Normal user message
171
+ pred_emotion = get_emotion(user_input)
172
+ support = random.choice(responses.get(pred_emotion, responses["neutral"]))
173
+ try:
174
+ suggestion = get_unique_solution(pred_emotion)
175
+ except Exception:
176
+ suggestion = get_unique_solution("neutral")
177
+
178
+ reply = f"{support}\n\nHere's a suggestion for you: {suggestion}\nDid this help? (yes/no/skip)"
179
+
180
+ GLOBAL_CONVO_HISTORY.append({
181
+ "user_input": user_input,
182
+ "emotion": pred_emotion,
183
+ "bot_support": support,
184
+ "bot_suggestion": suggestion,
185
+ "feedback": ""
186
+ })
187
+ USER_FEEDBACK_STATE[user_id] = {"emotion": pred_emotion, "pending": True}
188
+ return reply, json.dumps(GLOBAL_CONVO_HISTORY[-5:], indent=2), ""
189
+
190
+ import gradio as gr
191
+
192
+ iface = gr.Interface(
193
+ fn=emoti_chat,
194
+ inputs=[
195
+ gr.Audio(type="filepath", label="🎤 Speak your message"),
196
+ gr.Textbox(lines=2, placeholder="Or type your message here...", label="💬 Type message"),
197
+ gr.Textbox(lines=1, value="", visible=False) # Hidden, conversation history
198
+ ],
199
+ outputs=[
200
+ gr.Textbox(label="EmotiBot Reply"),
201
+ gr.Textbox(label="Conversation History (JSON)", visible=False)
202
+ ],
203
+ title="EmotiBot Connect",
204
+ description="Talk to EmotiBot using your voice or by typing. Detects your emotion, gives dynamic suggestions, and keeps conversation history!"
205
+ )
206
+
207
+ iface.launch()