shanusherly commited on
Commit
cb7ccbd
·
verified ·
1 Parent(s): 4f4a419

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +34 -41
app.py CHANGED
@@ -1,6 +1,5 @@
1
  import os
2
  import time
3
- import json
4
  import requests
5
  import gradio as gr
6
  import google.generativeai as genai
@@ -25,9 +24,9 @@ gemini_model = genai.GenerativeModel("gemini-2.5-flash")
25
  # Simple in-memory chat memory
26
  # -----------------------
27
  class SimpleMemory:
28
- def __init__(self, max_messages=20):
29
  self.max_messages = max_messages
30
- self.history = [] # list of tuples (role, text) with role in {"user","bot"}
31
 
32
  def add(self, role, text):
33
  self.history.append((role, text))
@@ -43,7 +42,7 @@ class SimpleMemory:
43
  lines.append(f"Chatbot: {txt}")
44
  return "\n".join(lines)
45
 
46
- memory = SimpleMemory(max_messages=20)
47
 
48
  # -----------------------
49
  # Prompt template
@@ -54,14 +53,14 @@ User: {user_message}
54
  Chatbot:"""
55
 
56
  # -----------------------
57
- # Robust Gemini generator (tries multiple message formats)
58
  # Returns: (text_or_None, error_message_or_None)
59
  # -----------------------
60
  def generate_text_with_gemini(user_message):
61
  chat_history_text = memory.as_prompt_text()
62
  full_prompt = PROMPT_TEMPLATE.format(chat_history=chat_history_text, user_message=user_message)
63
 
64
- # 1) Try the simplest call (raw prompt)
65
  try:
66
  resp = gemini_model.generate_content(full_prompt)
67
  text = getattr(resp, "text", None)
@@ -69,10 +68,10 @@ def generate_text_with_gemini(user_message):
69
  text = str(resp)
70
  return text, None
71
  except ResourceExhausted as e:
72
- print("Gemini quota exhausted (raw prompt):", e)
73
  return None, "Gemini quota exceeded. Please try again later."
74
  except Exception as e1:
75
- print("generate_content(raw) failed, will retry with messages format:", repr(e1))
76
 
77
  # 2) Try messages with content as plain string
78
  try:
@@ -89,9 +88,9 @@ def generate_text_with_gemini(user_message):
89
  print("Gemini quota exhausted (messages):", e)
90
  return None, "Gemini quota exceeded. Please try again later."
91
  except Exception as e2:
92
- print("generate_content(messages) failed, will retry with typed content:", repr(e2))
93
 
94
- # 3) Try messages where content is a list of typed chunks
95
  try:
96
  messages2 = [
97
  {"role": "system", "content": [{"type": "text", "text": "You are a helpful assistant."}]},
@@ -106,11 +105,11 @@ def generate_text_with_gemini(user_message):
106
  print("Gemini quota exhausted (messages2):", e)
107
  return None, "Gemini quota exceeded. Please try again later."
108
  except Exception as efinal:
109
- print("Gemini generate_content all attempts failed:", repr(efinal))
110
  return None, f"Gemini error: {repr(efinal)}"
111
 
112
  # -----------------------
113
- # ElevenLabs HTTP TTS (fallback, robust)
114
  # Returns: (output_path_or_empty, error_or_empty)
115
  # -----------------------
116
  def generate_audio_elevenlabs_http(text):
@@ -156,71 +155,65 @@ def generate_audio_elevenlabs_http(text):
156
  print(err)
157
  return "", err
158
 
 
 
 
 
 
 
 
 
 
 
 
159
  # -----------------------
160
  # Combined workflow
 
161
  # -----------------------
162
  def process_user_message(user_message):
163
- # 1) Generate text (robust)
164
  text, gen_err = generate_text_with_gemini(user_message)
165
  if gen_err:
166
  # safe fallback: store user and friendly message
167
  memory.add("user", user_message)
168
  fallback = "Sorry — the assistant is temporarily unavailable: " + gen_err
169
  memory.add("bot", fallback)
170
- history_pairs = convert_memory_to_pairs(memory.history)
171
- return history_pairs, "", gen_err
172
 
173
  # 2) update memory
174
  memory.add("user", user_message)
175
  memory.add("bot", text)
176
 
177
- # 3) try audio (HTTP fallback)
178
  audio_path, audio_err = generate_audio_elevenlabs_http(text)
179
  if audio_err:
180
  print("Audio generation error:", audio_err)
181
 
182
- history_pairs = convert_memory_to_pairs(memory.history)
183
- return history_pairs, audio_path or "", audio_err or ""
184
-
185
- def convert_memory_to_pairs(history):
186
- """
187
- Convert memory list of tuples into chat pairs for gr.Chatbot.
188
- memory.history is [(role, text), ...] where roles alternate.
189
- Returns list of (user_text, bot_text) pairs.
190
- """
191
- pairs = []
192
- temp_user = None
193
- for role, msg in history:
194
- if role == "user":
195
- temp_user = msg
196
- else: # bot
197
- pairs.append((temp_user or "", msg))
198
- temp_user = None
199
- return pairs
200
 
201
  # -----------------------
202
- # Gradio UI
203
  # -----------------------
204
  with gr.Blocks() as demo:
205
  gr.Markdown("## 🤖 Gemini + ElevenLabs Chatbot (Text + Audio replies)")
206
- chatbot = gr.Chatbot()
207
  with gr.Row():
208
  txt = gr.Textbox(show_label=False, placeholder="Type your message and press Enter")
209
  send_btn = gr.Button("Send")
210
  audio_player = gr.Audio(label="Last reply audio (if available)", visible=False)
211
 
212
  def submit_message(message):
213
- # Run the combined workflow
214
- pairs, audio_path, err = process_user_message(message)
215
- # The chat UI expects list of pairs (user, bot)
216
  if audio_path:
217
- return pairs, gr.update(value=audio_path, visible=True)
218
  else:
219
- return pairs, gr.update(value=None, visible=False)
220
 
221
  send_btn.click(fn=submit_message, inputs=[txt], outputs=[chatbot, audio_player])
222
  txt.submit(fn=submit_message, inputs=[txt], outputs=[chatbot, audio_player])
223
 
224
  # Launch app
225
  if __name__ == "__main__":
 
226
  demo.launch(debug=True)
 
1
  import os
2
  import time
 
3
  import requests
4
  import gradio as gr
5
  import google.generativeai as genai
 
24
  # Simple in-memory chat memory
25
  # -----------------------
26
  class SimpleMemory:
27
+ def __init__(self, max_messages=40):
28
  self.max_messages = max_messages
29
+ self.history = [] # list of tuples (role, text) where role in {"user","bot"}
30
 
31
  def add(self, role, text):
32
  self.history.append((role, text))
 
42
  lines.append(f"Chatbot: {txt}")
43
  return "\n".join(lines)
44
 
45
+ memory = SimpleMemory(max_messages=40)
46
 
47
  # -----------------------
48
  # Prompt template
 
53
  Chatbot:"""
54
 
55
  # -----------------------
56
+ # Robust Gemini generator (tries several message formats)
57
  # Returns: (text_or_None, error_message_or_None)
58
  # -----------------------
59
  def generate_text_with_gemini(user_message):
60
  chat_history_text = memory.as_prompt_text()
61
  full_prompt = PROMPT_TEMPLATE.format(chat_history=chat_history_text, user_message=user_message)
62
 
63
+ # 1) Try raw prompt string
64
  try:
65
  resp = gemini_model.generate_content(full_prompt)
66
  text = getattr(resp, "text", None)
 
68
  text = str(resp)
69
  return text, None
70
  except ResourceExhausted as e:
71
+ print("Gemini quota exhausted (raw):", e)
72
  return None, "Gemini quota exceeded. Please try again later."
73
  except Exception as e1:
74
+ print("generate_content(raw) failed, will try messages:", repr(e1))
75
 
76
  # 2) Try messages with content as plain string
77
  try:
 
88
  print("Gemini quota exhausted (messages):", e)
89
  return None, "Gemini quota exceeded. Please try again later."
90
  except Exception as e2:
91
+ print("generate_content(messages) failed, will try typed content:", repr(e2))
92
 
93
+ # 3) Try messages where content is list of typed parts
94
  try:
95
  messages2 = [
96
  {"role": "system", "content": [{"type": "text", "text": "You are a helpful assistant."}]},
 
105
  print("Gemini quota exhausted (messages2):", e)
106
  return None, "Gemini quota exceeded. Please try again later."
107
  except Exception as efinal:
108
+ print("Gemini all attempts failed:", repr(efinal))
109
  return None, f"Gemini error: {repr(efinal)}"
110
 
111
  # -----------------------
112
+ # ElevenLabs HTTP TTS
113
  # Returns: (output_path_or_empty, error_or_empty)
114
  # -----------------------
115
  def generate_audio_elevenlabs_http(text):
 
155
  print(err)
156
  return "", err
157
 
158
+ # -----------------------
159
+ # Convert memory -> messages list (dicts) for Gradio
160
+ # Each message is {"role":"user"/"assistant", "content": "..."}
161
+ # -----------------------
162
+ def convert_memory_to_messages(history):
163
+ messages = []
164
+ for role, msg in history:
165
+ role_out = "assistant" if role == "bot" else "user"
166
+ messages.append({"role": role_out, "content": msg})
167
+ return messages
168
+
169
  # -----------------------
170
  # Combined workflow
171
+ # Returns: (messages_list, audio_path, error_message)
172
  # -----------------------
173
  def process_user_message(user_message):
174
+ # 1) generate text
175
  text, gen_err = generate_text_with_gemini(user_message)
176
  if gen_err:
177
  # safe fallback: store user and friendly message
178
  memory.add("user", user_message)
179
  fallback = "Sorry — the assistant is temporarily unavailable: " + gen_err
180
  memory.add("bot", fallback)
181
+ return convert_memory_to_messages(memory.history), "", gen_err
 
182
 
183
  # 2) update memory
184
  memory.add("user", user_message)
185
  memory.add("bot", text)
186
 
187
+ # 3) generate audio (optional)
188
  audio_path, audio_err = generate_audio_elevenlabs_http(text)
189
  if audio_err:
190
  print("Audio generation error:", audio_err)
191
 
192
+ return convert_memory_to_messages(memory.history), audio_path or "", audio_err or ""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
193
 
194
  # -----------------------
195
+ # Gradio UI (Blocks)
196
  # -----------------------
197
  with gr.Blocks() as demo:
198
  gr.Markdown("## 🤖 Gemini + ElevenLabs Chatbot (Text + Audio replies)")
199
+ chatbot = gr.Chatbot() # expects messages as dicts with role/content
200
  with gr.Row():
201
  txt = gr.Textbox(show_label=False, placeholder="Type your message and press Enter")
202
  send_btn = gr.Button("Send")
203
  audio_player = gr.Audio(label="Last reply audio (if available)", visible=False)
204
 
205
  def submit_message(message):
206
+ messages, audio_path, err = process_user_message(message)
207
+ # Return messages list (dicts) for the chatbot; audio shown if available
 
208
  if audio_path:
209
+ return messages, gr.update(value=audio_path, visible=True)
210
  else:
211
+ return messages, gr.update(value=None, visible=False)
212
 
213
  send_btn.click(fn=submit_message, inputs=[txt], outputs=[chatbot, audio_player])
214
  txt.submit(fn=submit_message, inputs=[txt], outputs=[chatbot, audio_player])
215
 
216
  # Launch app
217
  if __name__ == "__main__":
218
+ # debug=True recommended while testing
219
  demo.launch(debug=True)