raz-135 commited on
Commit
cbc6a41
·
verified ·
1 Parent(s): 4f5e7bd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -23
app.py CHANGED
@@ -5,21 +5,18 @@ from gtts import gTTS
5
  from groq import Groq
6
 
7
  GROQ_API_KEY = os.getenv("GORQ_API_KEY")
8
- client = Groq(api_key= GROQ_API_KEY)
9
 
10
  # Load Whisper model
11
  model = whisper.load_model("base")
12
 
13
- def chatbot(audio=None, text_input=""):
14
- if audio is None and not text_input.strip():
15
- return "No input detected. Please provide either an audio or text input.", None
16
 
17
- if audio:
18
- # Transcribe the audio input using Whisper
19
- transcription = model.transcribe(audio)
20
- user_input = transcription.get("text", "")
21
- else:
22
- user_input = text_input.strip()
23
 
24
  if not user_input:
25
  return "Could not understand input.", None
@@ -42,27 +39,25 @@ def chatbot(audio=None, text_input=""):
42
 
43
  # Create a custom interface
44
  def build_interface():
45
- with gr.Blocks() as demo:
46
  gr.Markdown(
47
  """
48
  <h1 style="text-align: center; color: #000000;">VoiceToVoice Chatbot</h1>
49
  """
50
  )
51
  with gr.Row():
52
- with gr.Column(scale=1):
53
- audio_input = gr.Audio(type="filepath", label="Record Your Voice")
54
- with gr.Column(scale=2):
55
- text_input = gr.Textbox(label="Enter your prompt")
56
- chatbot_output_text = gr.Textbox(label="Chatbot Response")
57
- chatbot_output_audio = gr.Audio(label="Audio Response")
58
 
59
- submit_button = gr.Button("Submit")
 
60
 
61
- submit_button.click(
62
- fn=chatbot,
63
- inputs=[audio_input, text_input],
64
- outputs=[chatbot_output_text, chatbot_output_audio]
65
- )
66
 
67
  return demo
68
 
 
5
  from groq import Groq
6
 
7
  GROQ_API_KEY = os.getenv("GORQ_API_KEY")
8
+ client = Groq(api_key=GROQ_API_KEY)
9
 
10
  # Load Whisper model
11
  model = whisper.load_model("base")
12
 
13
+ def chatbot(audio=None):
14
+ if audio is None:
15
+ return "No input detected. Please provide an audio input.", None
16
 
17
+ # Transcribe the audio input using Whisper
18
+ transcription = model.transcribe(audio)
19
+ user_input = transcription.get("text", "")
 
 
 
20
 
21
  if not user_input:
22
  return "Could not understand input.", None
 
39
 
40
  # Create a custom interface
41
  def build_interface():
42
+ with gr.Blocks(css=".gradio-container {background-color: #F5F5F5;}") as demo:
43
  gr.Markdown(
44
  """
45
  <h1 style="text-align: center; color: #000000;">VoiceToVoice Chatbot</h1>
46
  """
47
  )
48
  with gr.Row():
49
+ audio_input = gr.Audio(type="filepath", label="Record Your Voice")
50
+ chatbot_output_text = gr.Textbox(label="Chatbot Response")
51
+ chatbot_output_audio = gr.Audio(label="Audio Response")
 
 
 
52
 
53
+ # Add a clear button
54
+ clear_button = gr.Button("Clear")
55
 
56
+ # Handle the clear button click
57
+ clear_button.click(lambda: None, None, [audio_input, chatbot_output_text, chatbot_output_audio])
58
+
59
+ # Make it real-time
60
+ audio_input.change(fn=chatbot, inputs=audio_input, outputs=[chatbot_output_text, chatbot_output_audio])
61
 
62
  return demo
63