develops20 commited on
Commit
266eee8
·
verified ·
1 Parent(s): 6953335

Created Sample app.py

Browse files
Files changed (1) hide show
  1. app.py +43 -0
app.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import pipeline
3
+ from gtts import gTTS
4
+ import os
5
+
6
+ # Initialize Whisper for speech-to-text
7
+ whisper = pipeline("automatic-speech-recognition", model="openai/whisper-tiny")
8
+
9
+ # Hardcoded knowledge base for Q&A
10
+ knowledge_base = {
11
+ "what cars are available": "We have Toyota Camry, Honda Civic, and Ford Mustang.",
12
+ "price of camry": "The Toyota Camry starts at $25,000."
13
+ }
14
+
15
+ def transcribe(audio):
16
+ return whisper(audio)["text"]
17
+
18
+ def text_to_speech(text):
19
+ tts = gTTS(text, lang="en")
20
+ tts.save("response.mp3")
21
+ return "response.mp3"
22
+
23
+ def answer_question(text):
24
+ for key in knowledge_base:
25
+ if key in text.lower():
26
+ return knowledge_base[key]
27
+ return "Sorry, I can help with car availability and prices. Try again!"
28
+
29
+ def process_audio(audio):
30
+ text = transcribe(audio)
31
+ response = answer_question(text)
32
+ audio_response = text_to_speech(response)
33
+ return response, audio_response
34
+
35
+ # Gradio interface
36
+ with gr.Blocks() as demo:
37
+ gr.Markdown("# AI Support Agent: Car Dealership")
38
+ audio_input = gr.Audio(source="microphone", label="Speak to the Agent")
39
+ text_output = gr.Textbox(label="Agent Response")
40
+ audio_output = gr.Audio(label="Listen to Response")
41
+ gr.Interface(fn=process_audio, inputs=audio_input, outputs=[text_output, audio_output]).launch()
42
+
43
+ demo.launch()