opinder2906 commited on
Commit
9744d58
·
verified ·
1 Parent(s): 240342b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +63 -89
app.py CHANGED
@@ -1,21 +1,16 @@
 
1
  import torch
2
- import torch.nn as nn
3
  import torch.nn.functional as F
4
- import random
5
  from textblob import TextBlob
6
  import pandas as pd
7
  import requests
8
  from io import StringIO
9
- import gradio as gr
10
  import speech_recognition as sr
11
  import json
12
- import re
13
- from sklearn.preprocessing import LabelEncoder
14
- from collections import Counter
15
- import numpy as np
16
 
17
- # ---------------------------
18
- # Prepare dummy vocab, label encoder, and model (replace with real model if you want)
19
  vocab = {'<PAD>': 0, '<UNK>': 1, 'i': 2, 'am': 3, 'feeling': 4, 'sad': 5, 'happy': 6, 'angry': 7, 'love': 8, 'stressed': 9, 'anxious': 10}
20
  MAX_LEN = 16
21
 
@@ -27,11 +22,11 @@ class DummyLabelEncoder:
27
 
28
  le = DummyLabelEncoder()
29
 
30
- class DummyModel(nn.Module):
31
  def __init__(self):
32
  super().__init__()
33
- self.embedding = nn.Embedding(len(vocab), 8)
34
- self.fc = nn.Linear(8, len(le.classes_))
35
  def forward(self, x):
36
  x = self.embedding(x)
37
  x = x.mean(dim=1)
@@ -43,20 +38,14 @@ def preprocess_input(text):
43
  tokens = text.lower().split()
44
  encoded = [vocab.get(token, vocab['<UNK>']) for token in tokens]
45
  padded = encoded[:MAX_LEN] + [vocab['<PAD>']] * max(0, MAX_LEN - len(encoded))
46
- return torch.tensor([padded], dtype=torch.long).to(next(model.parameters()).device)
47
 
48
- # ---------------------------
49
- # Load solutions CSV from URL (you can update link if you want)
50
- file_id = "1yVJh_NVL4Y4YqEXGym47UCK5ZNZgVZYv"
51
  url = f"https://drive.google.com/uc?export=download&id={file_id}"
52
  response = requests.get(url)
53
  csv_text = response.text
54
-
55
- if csv_text.strip().startswith('<'):
56
- raise Exception("ERROR: Google Drive link is not returning CSV! Check your sharing settings.")
57
-
58
  solutions_df = pd.read_csv(StringIO(csv_text), header=0, on_bad_lines='skip')
59
-
60
  used_solutions = {emotion: set() for emotion in solutions_df['emotion'].unique()}
61
 
62
  negative_words = [
@@ -119,7 +108,7 @@ def get_emotion(user_input):
119
  return "sadness"
120
  sentiment = get_sentiment(user_input)
121
  x = preprocess_input(user_input)
122
- model.train()
123
  with torch.no_grad():
124
  probs = torch.stack([F.softmax(model(x), dim=1) for _ in range(5)])
125
  avg_probs = probs.mean(dim=0)
@@ -145,75 +134,60 @@ def audio_to_text(audio_file):
145
  except Exception:
146
  return ""
147
 
148
- # Chatbot conversation history and feedback state
149
- GLOBAL_CONVO_HISTORY = []
150
- USER_FEEDBACK_STATE = {}
 
 
 
 
 
 
 
151
 
152
- def emoti_chat(audio, text, history_json=""):
153
- if text and text.strip():
154
- user_input = text
155
- elif audio is not None:
156
- user_input = audio_to_text(audio)
 
 
 
 
 
157
  else:
158
  user_input = ""
159
- if not user_input.strip():
160
- return "Please say something or type your message.", json.dumps(GLOBAL_CONVO_HISTORY[-5:], indent=2), ""
161
-
162
- user_input = correct_spelling(user_input)
163
-
164
- # Exit phrases
165
- exit_phrases = ["exit", "quit", "goodbye", "bye", "close"]
166
- if user_input.lower().strip() in exit_phrases:
167
- return "Take care! I’m here whenever you want to talk. 👋", json.dumps(GLOBAL_CONVO_HISTORY[-5:], indent=2), gr.update(visible=False)
168
-
169
- # Feedback logic
170
- user_id = "default_user"
171
- state = USER_FEEDBACK_STATE.get(user_id, {"emotion": None, "pending": False})
172
-
173
- if state["pending"]:
174
- feedback = user_input.lower().strip()
175
- GLOBAL_CONVO_HISTORY[-1]["feedback"] = feedback
176
- if feedback == "no":
177
- suggestion = get_unique_solution(state["emotion"])
178
- reply = f"Here's another suggestion for you: {suggestion}\nDid this help? (yes/no/skip)"
179
- USER_FEEDBACK_STATE[user_id]["pending"] = True
180
- return reply, json.dumps(GLOBAL_CONVO_HISTORY[-5:], indent=2), ""
181
- else:
182
- USER_FEEDBACK_STATE[user_id] = {"emotion": None, "pending": False}
183
- return "How can I help you further?", json.dumps(GLOBAL_CONVO_HISTORY[-5:], indent=2), ""
184
 
185
- pred_emotion = get_emotion(user_input)
186
- support = random.choice(responses.get(pred_emotion, responses["neutral"]))
187
- try:
188
- suggestion = get_unique_solution(pred_emotion)
189
- except Exception:
190
- suggestion = get_unique_solution("neutral")
191
-
192
- reply = f"{support}\n\nHere's a suggestion for you: {suggestion}\nDid this help? (yes/no/skip)"
193
- GLOBAL_CONVO_HISTORY.append({
194
- "user_input": user_input,
195
- "emotion": pred_emotion,
196
- "bot_support": support,
197
- "bot_suggestion": suggestion,
198
- "feedback": ""
199
- })
200
- USER_FEEDBACK_STATE[user_id] = {"emotion": pred_emotion, "pending": True}
201
- return reply, json.dumps(GLOBAL_CONVO_HISTORY[-5:], indent=2), ""
202
-
203
- # Gradio Interface
204
- iface = gr.Interface(
205
- fn=emoti_chat,
206
- inputs=[
207
- gr.Audio(type="filepath", label="🎤 Speak your message"),
208
- gr.Textbox(lines=2, placeholder="Or type your message here...", label="💬 Type message"),
209
- gr.Textbox(lines=1, value="", visible=False) # Hidden history state
210
- ],
211
- outputs=[
212
- gr.Textbox(label="EmotiBot Reply"),
213
- gr.Textbox(label="Hidden", visible=False)
214
- ],
215
- title="EmotiBot Connect",
216
- description="Talk to EmotiBot using your voice or by typing. Detects your emotion and offers dynamic support."
217
- )
218
 
219
- iface.launch()
 
1
+ import streamlit as st
2
  import torch
 
3
  import torch.nn.functional as F
 
4
  from textblob import TextBlob
5
  import pandas as pd
6
  import requests
7
  from io import StringIO
 
8
  import speech_recognition as sr
9
  import json
10
+ import random
11
+
12
+ # --- Your Dummy Model and Helpers (same as before) ---
 
13
 
 
 
14
  vocab = {'<PAD>': 0, '<UNK>': 1, 'i': 2, 'am': 3, 'feeling': 4, 'sad': 5, 'happy': 6, 'angry': 7, 'love': 8, 'stressed': 9, 'anxious': 10}
15
  MAX_LEN = 16
16
 
 
22
 
23
  le = DummyLabelEncoder()
24
 
25
+ class DummyModel(torch.nn.Module):
26
  def __init__(self):
27
  super().__init__()
28
+ self.embedding = torch.nn.Embedding(len(vocab), 8)
29
+ self.fc = torch.nn.Linear(8, len(le.classes_))
30
  def forward(self, x):
31
  x = self.embedding(x)
32
  x = x.mean(dim=1)
 
38
  tokens = text.lower().split()
39
  encoded = [vocab.get(token, vocab['<UNK>']) for token in tokens]
40
  padded = encoded[:MAX_LEN] + [vocab['<PAD>']] * max(0, MAX_LEN - len(encoded))
41
+ return torch.tensor([padded], dtype=torch.long)
42
 
43
+ # Load CSV from Google Drive
44
+ file_id = "1yVJh_NVL4Y4qEXGym47UCK5ZNZgVZYv"
 
45
  url = f"https://drive.google.com/uc?export=download&id={file_id}"
46
  response = requests.get(url)
47
  csv_text = response.text
 
 
 
 
48
  solutions_df = pd.read_csv(StringIO(csv_text), header=0, on_bad_lines='skip')
 
49
  used_solutions = {emotion: set() for emotion in solutions_df['emotion'].unique()}
50
 
51
  negative_words = [
 
108
  return "sadness"
109
  sentiment = get_sentiment(user_input)
110
  x = preprocess_input(user_input)
111
+ model.eval()
112
  with torch.no_grad():
113
  probs = torch.stack([F.softmax(model(x), dim=1) for _ in range(5)])
114
  avg_probs = probs.mean(dim=0)
 
134
  except Exception:
135
  return ""
136
 
137
+ # --- Streamlit UI and Logic ---
138
+
139
+ st.title("EmotiBot Connect (Streamlit)")
140
+
141
+ if "history" not in st.session_state:
142
+ st.session_state.history = []
143
+
144
+ # User input
145
+ audio_input = st.file_uploader("🎤 Upload audio message (wav, mp3)", type=["wav", "mp3"])
146
+ text_input = st.text_input("💬 Or type your message here")
147
 
148
+ if st.button("Send"):
149
+
150
+ # Get text from audio or text input
151
+ if text_input.strip():
152
+ user_input = text_input
153
+ elif audio_input is not None:
154
+ user_input = audio_to_text(audio_input)
155
+ if not user_input:
156
+ st.warning("Sorry, could not recognize speech from audio.")
157
+ user_input = ""
158
  else:
159
  user_input = ""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
160
 
161
+ if user_input.strip() == "":
162
+ st.warning("Please say something or type your message.")
163
+ else:
164
+ # Correct spelling
165
+ user_input_corrected = correct_spelling(user_input)
166
+
167
+ # Handle exit phrases
168
+ if user_input_corrected.lower() in ["exit", "quit", "goodbye", "bye", "close"]:
169
+ st.success("Take care! I’m here whenever you want to talk. 👋")
170
+ else:
171
+ # Get emotion and response
172
+ pred_emotion = get_emotion(user_input_corrected)
173
+ support = random.choice(responses.get(pred_emotion, responses["neutral"]))
174
+ try:
175
+ suggestion = get_unique_solution(pred_emotion)
176
+ except Exception:
177
+ suggestion = get_unique_solution("neutral")
178
+
179
+ reply = f"{support}\n\nHere's a suggestion for you: {suggestion}\nDid this help? (yes/no/skip)"
180
+
181
+ # Save in history
182
+ st.session_state.history.append({
183
+ "user": user_input_corrected,
184
+ "emotion": pred_emotion,
185
+ "bot": reply,
186
+ "feedback": ""
187
+ })
188
+
189
+ # Show conversation history
190
+ for chat in st.session_state.history[-5:]:
191
+ st.markdown(f"**You:** {chat['user']}")
192
+ st.markdown(f"**EmotiBot:** {chat['bot']}")
 
193