Spaces:
Runtime error
Runtime error
| import whisper | |
| import numpy as np | |
| from groq import Groq | |
| import os | |
| from gtts import gTTS | |
| import gradio as gr | |
| # Initialize Whisper model | |
| model = whisper.load_model("base") | |
| # Function to transcribe audio using Whisper | |
| def transcribe_audio(audio_file): | |
| result = model.transcribe(audio_file) | |
| return result['text'] | |
| # Function to interact with Groq LLM | |
| def generate_response(transcription): | |
| client = Groq(api_key=os.environ.get("GROQ_API_KEY")) | |
| chat_completion = client.chat.completions.create( | |
| messages=[{"role": "user", "content": transcription}], | |
| model="llama3-groq-8b-8192-tool-use-preview", | |
| ) | |
| response = chat_completion.choices[0].message.content | |
| return response | |
| # Function to convert text into speech using gTTS | |
| def text_to_speech(text): | |
| tts = gTTS(text) | |
| tts.save("output.mp3") | |
| return "output.mp3" | |
| # Main function to handle the chatbot interaction | |
| def chatbot_interaction(audio): | |
| transcription = transcribe_audio(audio) | |
| print(f"Transcription: {transcription}") | |
| response = generate_response(transcription) | |
| print(f"LLM Response: {response}") | |
| output_audio = text_to_speech(response) | |
| return output_audio | |
| # Gradio interface to deploy the chatbot | |
| def chatbot_ui(): | |
| gr.Interface(fn=chatbot_interaction, inputs="audio", outputs="audio").launch() | |
| # Run the chatbot UI | |
| if __name__ == "__main__": | |
| chatbot_ui() | |