tescochatbot / app.py
ogflash's picture
Update app.py
a3fbbbd verified
import openai
import pandas as pd
import faiss
import numpy as np
from sentence_transformers import SentenceTransformer
import gradio as gr
from gtts import gTTS
import tempfile
import os
import speech_recognition as sr
openai.api_key = os.getenv("OPENAI_API_KEY")
# Load embedding model
embed_model = SentenceTransformer("all-MiniLM-L6-v2")
# Load FAQ CSV
faq_df = pd.read_csv("fact-base-tesco.csv")
questions = faq_df["Question"].tolist()
answers = faq_df["Answer"].tolist()
# Create FAISS index
faq_embeddings = embed_model.encode(questions, convert_to_numpy=True)
index = faiss.IndexFlatL2(faq_embeddings.shape[1])
index.add(faq_embeddings)
def ask_faq(query):
query_vec = embed_model.encode([query], convert_to_numpy=True)
D, I = index.search(query_vec, k=1)
similarity = D[0][0]
if similarity < 0.5:
answer = answers[I[0][0]]
response = answer
else:
try:
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": query}],
max_tokens=200
)
response = completion.choices[0].message.content
except:
response = "Sorry, I couldn't generate a response right now."
tts = gTTS(response)
tts_fp = tempfile.NamedTemporaryFile(delete=False, suffix=".mp3")
tts.save(tts_fp.name)
return response, tts_fp.name
def process_input(text, audio):
if audio is not None:
recognizer = sr.Recognizer()
with sr.AudioFile(audio) as source:
audio_data = recognizer.record(source)
try:
text = recognizer.recognize_google(audio_data)
except:
return "Sorry, could not understand audio", None
if text:
return ask_faq(text)
else:
return "Please provide a question.", None
with gr.Blocks() as demo:
gr.Markdown("# πŸŽ™οΈ FAQ + GPT Chatbot with Voice")
gr.Markdown("Ask via text or mic. Falls back to GPT if FAQ match is not found.")
text_input = gr.Textbox(label="Type your question or click mic below ⬇")
mic_input = gr.Audio(type="filepath", label="🎀 Speak your question")
submit_btn = gr.Button("Ask")
text_output = gr.Textbox(label="Answer")
audio_output = gr.Audio(label="πŸ”Š Voice Answer", autoplay=True)
submit_btn.click(fn=process_input, inputs=[text_input, mic_input], outputs=[text_output, audio_output])
demo.launch()