arssite's picture
Update app.py
d2f5de3 verified
from sentence_transformers import SentenceTransformer, util
import gradio as gr
import torch
# Load a pre-trained sentence-transformer model
model = SentenceTransformer('all-MiniLM-L6-v2')
# Define your dataset
conversations = [
{"user": "What are your store hours?", "bot": "Our store is open from 9 AM to 9 PM, Monday to Saturday."},
{"user": "Do you sell laptops?", "bot": "Yes, we offer a range of laptops from brands like Dell, HP, and Lenovo."},
{"user": "What is the price of the iPhone 14?", "bot": "The iPhone 14 starts at $799."},
{"user": "Can I return a product I bought last week?", "bot": "You can return products within 30 days of purchase with a valid receipt."},
{"user": "Do you have any discounts available?", "bot": "Yes, we currently have a 10% discount on selected electronics."},
{"user": "What is your exchange policy?", "bot": "You can exchange items within 14 days of purchase, as long as they are in original condition with a receipt."},
{"user": "How can I track my order?", "bot": "You can track your order by logging into your account and clicking 'Track Order' under 'My Orders'."},
{"user": "Do you offer home delivery?", "bot": "Yes, we offer home delivery for most items. Delivery charges may apply based on your location."},
{"user": "Can I cancel my order?", "bot": "Yes, you can cancel your order within 24 hours of placing it by going to your account and selecting the cancel option."},
{"user": "Do you have any new arrivals in smartphones?", "bot": "Yes, we have the latest models from Apple, Samsung, and OnePlus available in store and online."}
]
# Precompute embeddings for the dataset
conversation_texts = [conv['user'] for conv in conversations]
conversation_embeddings = model.encode(conversation_texts, convert_to_tensor=True)
def chatbot_response(user_input):
# Compute embedding for the user input
user_embedding = model.encode(user_input, convert_to_tensor=True)
# Compute cosine similarity between the user input and all predefined conversations
similarities = util.pytorch_cos_sim(user_embedding, conversation_embeddings)
# Find the conversation with the highest similarity
best_match_idx = torch.argmax(similarities)
# Return the bot response from the best matching conversation
return conversations[best_match_idx]['bot']
# Create Gradio interface
iface = gr.Interface(fn=chatbot_response, inputs="text", outputs="text", title="Retail Store Chatbot",description="Ask me anything about our retail store! I can provide information about store hours, product availability, return policies, and more.",
examples=[
["What are your store hours?"],
["Do you sell laptops?"],
["What is the price of the iPhone 14?"],
["Can I return a product I bought last week?"],
["Do you have any discounts available?"]
]
)
# Launch the chatbot
iface.launch()