Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,70 +1,45 @@
|
|
| 1 |
-
import
|
| 2 |
-
import speech_recognition as sr
|
| 3 |
-
import torch
|
| 4 |
import os
|
|
|
|
|
|
|
| 5 |
from transformers import pipeline
|
| 6 |
from gtts import gTTS
|
| 7 |
-
import time
|
| 8 |
|
| 9 |
-
|
|
|
|
|
|
|
|
|
|
| 10 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 11 |
speech_to_text = pipeline("automatic-speech-recognition", model="openai/whisper-base", device=0 if device == "cuda" else -1)
|
| 12 |
|
| 13 |
-
#
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
# Function to Play Audio Prompt
|
| 17 |
-
def play_audio(text):
|
| 18 |
-
tts = gTTS(text=text, lang='en')
|
| 19 |
-
filename = "prompt.mp3"
|
| 20 |
tts.save(filename)
|
| 21 |
-
os.system(f"mpg321 {filename}" if os.name != "nt" else f"start {filename}") # Works on Linux & Windows
|
| 22 |
-
time.sleep(2) # Give some time for the speech to play
|
| 23 |
|
| 24 |
-
|
| 25 |
-
def
|
| 26 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 27 |
try:
|
| 28 |
-
text = speech_to_text(
|
| 29 |
-
return
|
| 30 |
except Exception as e:
|
| 31 |
-
return
|
| 32 |
-
|
| 33 |
-
# Function to Capture Email
|
| 34 |
-
def capture_email(audio):
|
| 35 |
-
play_audio("Please provide your email address")
|
| 36 |
-
try:
|
| 37 |
-
text = speech_to_text(audio)["text"]
|
| 38 |
-
return f"📧 Email Captured: {text}"
|
| 39 |
-
except Exception as e:
|
| 40 |
-
return f"❌ Error: {str(e)}"
|
| 41 |
-
|
| 42 |
-
# Gradio Interface
|
| 43 |
-
def gradio_interface():
|
| 44 |
-
with gr.Blocks() as demo:
|
| 45 |
-
gr.Markdown("<h1 style='text-align: center;'>🍽️ AI Dining Assistant</h1>")
|
| 46 |
-
|
| 47 |
-
with gr.Column():
|
| 48 |
-
gr.Image("/mnt/data/image.png", elem_id="header_image", show_label=False) # Upload the image you provided
|
| 49 |
-
gr.Markdown("<p style='text-align: center;'>Press the mic button to start...</p>")
|
| 50 |
-
|
| 51 |
-
gr.Markdown("#### 🎤 Step 1: Tell me your name")
|
| 52 |
-
mic_button = gr.Button("🎙️ Tap to Speak Your Name")
|
| 53 |
-
audio_input_name = gr.Audio(type="filepath", visible=False)
|
| 54 |
-
name_output = gr.Textbox(label="Your Name:")
|
| 55 |
-
email_prompt_output = gr.Textbox(label="Next Step:", interactive=False)
|
| 56 |
-
|
| 57 |
-
mic_button.click(capture_name, inputs=audio_input_name, outputs=[name_output, email_prompt_output])
|
| 58 |
-
|
| 59 |
-
gr.Markdown("#### 🎤 Step 2: Provide your email")
|
| 60 |
-
mic_button_email = gr.Button("🎙️ Tap to Speak Your Email")
|
| 61 |
-
audio_input_email = gr.Audio(type="filepath", visible=False)
|
| 62 |
-
email_output = gr.Textbox(label="Your Email:")
|
| 63 |
-
|
| 64 |
-
mic_button_email.click(capture_email, inputs=audio_input_email, outputs=email_output)
|
| 65 |
-
|
| 66 |
-
return demo
|
| 67 |
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
demo.launch(debug=True)
|
|
|
|
| 1 |
+
from flask import Flask, render_template, request, jsonify
|
|
|
|
|
|
|
| 2 |
import os
|
| 3 |
+
import torch
|
| 4 |
+
import speech_recognition as sr
|
| 5 |
from transformers import pipeline
|
| 6 |
from gtts import gTTS
|
|
|
|
| 7 |
|
| 8 |
+
app = Flask(__name__)
|
| 9 |
+
recognizer = sr.Recognizer()
|
| 10 |
+
|
| 11 |
+
# Load Hugging Face Whisper Model
|
| 12 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 13 |
speech_to_text = pipeline("automatic-speech-recognition", model="openai/whisper-base", device=0 if device == "cuda" else -1)
|
| 14 |
|
| 15 |
+
# Function to convert text to speech
|
| 16 |
+
def generate_audio(text, filename="static/output.mp3"):
|
| 17 |
+
tts = gTTS(text=text, lang="en")
|
|
|
|
|
|
|
|
|
|
|
|
|
| 18 |
tts.save(filename)
|
|
|
|
|
|
|
| 19 |
|
| 20 |
+
@app.route("/")
|
| 21 |
+
def home():
|
| 22 |
+
return render_template("index.html")
|
| 23 |
+
|
| 24 |
+
@app.route("/get_prompt")
|
| 25 |
+
def get_prompt():
|
| 26 |
+
generate_audio("Welcome to Biryani Hub. Please tell me your name.", "static/welcome.mp3")
|
| 27 |
+
return jsonify({"audio_url": "/static/welcome.mp3"})
|
| 28 |
+
|
| 29 |
+
@app.route("/process_audio", methods=["POST"])
|
| 30 |
+
def process_audio():
|
| 31 |
+
if "audio" not in request.files:
|
| 32 |
+
return jsonify({"error": "No audio file"}), 400
|
| 33 |
+
|
| 34 |
+
audio_file = request.files["audio"]
|
| 35 |
+
audio_path = "static/temp.wav"
|
| 36 |
+
audio_file.save(audio_path)
|
| 37 |
+
|
| 38 |
try:
|
| 39 |
+
text = speech_to_text(audio_path)["text"]
|
| 40 |
+
return jsonify({"text": text})
|
| 41 |
except Exception as e:
|
| 42 |
+
return jsonify({"error": str(e)}), 500
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 43 |
|
| 44 |
+
if __name__ == "__main__":
|
| 45 |
+
app.run(host="0.0.0.0", port=7860, debug=True)
|
|
|