EngrGullu commited on
Commit
26d136d
·
verified ·
1 Parent(s): 8de0267

Create app.py import os import whisper from gtts import gTTS from groq import Groq import gradio as gr # Initialize Whisper model model = whisper.load_model("base") # Initialize Groq API (set your GROQ_API_KEY in the environment) client = Groq(api_key=os.environ.get("GROQ_API_KEY")) # Step 1: Transcribe Audio (Speech-to-Text using Whisper) def transcribe_audio(audio_path): result = model.transcribe(audio_path) return result['text'] # Step 2: Interact with LLM (Groq API) def interact_with_llm(user_input): chat_completion = client.chat.completions.create( messages=[ { "role": "user", "content": user_input, } ], model="llama3-8b-8192", stream=False, ) response = chat_completion.choices[0].message.content return response # Step 3: Convert Text to Speech using gTTS def text_to_speech(text): tts = gTTS(text, lang="en") audio_file = "response.mp3" tts.save(audio_file) return audio_file # Combined workflow: Transcribe -> Interact with LLM -> Convert to Speech def chatbot(audio): # Step 1: Transcribe Audio to Text transcription = transcribe_audio(audio) # Step 2: Get LLM response based on transcription llm_response = interact_with_llm(transcription) # Step 3: Convert LLM response to audio (text-to-speech) audio_output = text_to_speech(llm_response) return transcription, llm_response, audio_output # Gradio Interface setup interface = gr.Interface( fn=chatbot, inputs=gr.Audio(type="filepath", label="Speak into the microphone"), outputs=[ "text", # Transcription output "text", # LLM response output gr.Audio(type="filepath", label="Response Audio") # Final audio output ], live=True, title="Real-Time Voice-to-Voice Chatbot", description="Talk to an AI in real-time! Speak into the microphone, get a response, and hear it back.", ) # Launch Gradio app interface.launch()

Browse files
Files changed (1) hide show
  1. app.py import os import whisper from gtts import gTTS from groq import Groq import gradio as gr # Initialize Whisper model model = whisper.load_model(/"base/") # Initialize Groq API (set your GROQ_API_KEY in the environment) client = Groq(api_key=os.environ.get(/"GROQ_API_KEY/")) # Step 1: Transcribe Audio (Speech-to-Text using Whisper) def transcribe_audio(audio_path): result = model.transcribe(audio_path) return result['text'] # Step 2: Interact with LLM (Groq API) def interact_with_llm(user_input): chat_completion = client.chat.completions.create( messages=[ { /"role/": /"user/", /"content/": user_input, } ], model=/"llama3-8b-8192/", stream=False, ) response = chat_completion.choices[0].message.content return response # Step 3: Convert Text to Speech using gTTS def text_to_speech(text): tts = gTTS(text, lang=/"en/") audio_file = /"response.mp3/" tts.save(audio_file) return audio_file # Combined workflow: Transcribe -> Interact with LLM -> Convert to Speech def chatbot(audio): # Step 1: Transcribe Audio to Text transcription = transcribe_audio(audio) # Step 2: Get LLM response based on transcription llm_response = interact_with_llm(transcription) # Step 3: Convert LLM response to audio (text-to-speech) audio_output = text_to_speech(llm_response) return transcription, llm_response, audio_output # Gradio Interface setup interface = gr.Interface( fn=chatbot, inputs=gr.Audio(type=/"filepath/", label=/"Speak into the microphone/"), outputs=[ /"text/", # Transcription output /"text/", # LLM response output gr.Audio(type=/"filepath/", label=/"Response Audio/") # Final audio output ], live=True, title=/"Real-Time Voice-to-Voice Chatbot/", description=/"Talk to an AI in real-time! Speak into the microphone, get a response, and hear it back./", ) # Launch Gradio app interface.launch() +68 -0
app.py import os import whisper from gtts import gTTS from groq import Groq import gradio as gr # Initialize Whisper model model = whisper.load_model(/"base/") # Initialize Groq API (set your GROQ_API_KEY in the environment) client = Groq(api_key=os.environ.get(/"GROQ_API_KEY/")) # Step 1: Transcribe Audio (Speech-to-Text using Whisper) def transcribe_audio(audio_path): result = model.transcribe(audio_path) return result['text'] # Step 2: Interact with LLM (Groq API) def interact_with_llm(user_input): chat_completion = client.chat.completions.create( messages=[ { /"role/": /"user/", /"content/": user_input, } ], model=/"llama3-8b-8192/", stream=False, ) response = chat_completion.choices[0].message.content return response # Step 3: Convert Text to Speech using gTTS def text_to_speech(text): tts = gTTS(text, lang=/"en/") audio_file = /"response.mp3/" tts.save(audio_file) return audio_file # Combined workflow: Transcribe -> Interact with LLM -> Convert to Speech def chatbot(audio): # Step 1: Transcribe Audio to Text transcription = transcribe_audio(audio) # Step 2: Get LLM response based on transcription llm_response = interact_with_llm(transcription) # Step 3: Convert LLM response to audio (text-to-speech) audio_output = text_to_speech(llm_response) return transcription, llm_response, audio_output # Gradio Interface setup interface = gr.Interface( fn=chatbot, inputs=gr.Audio(type=/"filepath/", label=/"Speak into the microphone/"), outputs=[ /"text/", # Transcription output /"text/", # LLM response output gr.Audio(type=/"filepath/", label=/"Response Audio/") # Final audio output ], live=True, title=/"Real-Time Voice-to-Voice Chatbot/", description=/"Talk to an AI in real-time! Speak into the microphone, get a response, and hear it back./", ) # Launch Gradio app interface.launch() ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import whisper
3
+ from gtts import gTTS
4
+ from groq import Groq
5
+ import gradio as gr
6
+
7
+ # Initialize Whisper model
8
+ model = whisper.load_model("base")
9
+
10
+ GROQ_API_KEY = "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"
11
+
12
+ client = Groq(api_key=GROQ_API_KEY)
13
+ # Step 1: Transcribe Audio (Speech-to-Text using Whisper)
14
+ def transcribe_audio(audio_path):
15
+ result = model.transcribe(audio_path)
16
+ return result['text']
17
+
18
+ # Step 2: Interact with LLM (Groq API)
19
+ def interact_with_llm(user_input):
20
+ chat_completion = client.chat.completions.create(
21
+ messages=[
22
+ {
23
+ "role": "user",
24
+ "content": user_input,
25
+ }
26
+ ],
27
+ model="llama3-8b-8192",
28
+ stream=False,
29
+ )
30
+ response = chat_completion.choices[0].message.content
31
+ return response
32
+
33
+ # Step 3: Convert Text to Speech using gTTS
34
+ def text_to_speech(text):
35
+ tts = gTTS(text, lang="en")
36
+ audio_file = "response.mp3"
37
+ tts.save(audio_file)
38
+ return audio_file
39
+
40
+ # Combined workflow: Transcribe -> Interact with LLM -> Convert to Speech
41
+ def chatbot(audio):
42
+ # Step 1: Transcribe Audio to Text
43
+ transcription = transcribe_audio(audio)
44
+
45
+ # Step 2: Get LLM response based on transcription
46
+ llm_response = interact_with_llm(transcription)
47
+
48
+ # Step 3: Convert LLM response to audio (text-to-speech)
49
+ audio_output = text_to_speech(llm_response)
50
+
51
+ return transcription, llm_response, audio_output
52
+
53
+ # Gradio Interface setup
54
+ interface = gr.Interface(
55
+ fn=chatbot,
56
+ inputs=gr.Audio(type="filepath", label="Speak into the microphone"),
57
+ outputs=[
58
+ "text", # Transcription output
59
+ "text", # LLM response output
60
+ gr.Audio(type="filepath", label="Response Audio") # Final audio output
61
+ ],
62
+ live=True,
63
+ title="Real-Time Voice-to-Voice Chatbot",
64
+ description="Talk to an AI in real-time! Speak into the microphone, get a response, and hear it back.",
65
+ )
66
+
67
+ # Launch Gradio app
68
+ interface.launch()