barkbites / app.py
gracexf's picture
adding image under dropdown
f8641c9 verified
import gradio as gr
import random
from huggingface_hub import InferenceClient
from sentence_transformers import SentenceTransformer
import torch
import glob
import re
client = InferenceClient("Qwen/Qwen2.5-72B-Instruct")
def respond(message, history):
global brand_chunks, safe_chunks, health_chunks, nutrition_chunks, all_chunks
lower_msg = message.lower()
if any(word in lower_msg for word in ["unsafe", "toxic", "harmful", "not safe", "poison"]):
search_chunks = safe_chunks
search_embeddings = safe_embeddings
elif any(word in lower_msg for word in ["nutrition", "diet", "nutrient", "protein", "calories", "feed"]):
search_chunks = nutrition_chunks
search_embeddings = nutrition_embeddings
elif any(word in lower_msg for word in ["brand", "brands", "dog food brand"]):
search_chunks = brand_chunks
search_embeddings = brand_embeddings
elif any(word in lower_msg for word in ["health risk", "disease", "illness"]):
search_chunks = health_chunks
search_embeddings = health_embeddings
else:
search_chunks = all_chunks
search_embeddings = all_embeddings
print("DEBUG: respond() called with:", message)
top_results = get_top_chunks(message, search_embeddings, search_chunks)
print("These are top results", top_results)
urgent_keywords = [
"puke", "vomit", "throw up", "seizure", "bleeding", "choking",
"can't breathe", "emergency", "poison", "collapsed", "trauma", "injury"
]
if any(word in message.lower() for word in urgent_keywords):
return ("This sounds like a possible medical emergency. "
"Please contact your veterinarian or an emergency animal hospital immediately. "
"Do not rely solely on online advice."
)
# ✅ Format context for LLM
if top_results:
formatted_info = "\n".join(f"- {chunk}" for chunk in top_results)
system_prompt = (
f"You are a friendly chatbot that gives advice about nutrition for dogs.\n"
f"Using the provided information from multiple sources \n{formatted_info}\n"
f"Respond in 3-5 complete sentences and apply common sense based on the user's question."
f"If the user asks about something you were not trained on, "
f"give a cautious answer and suggest checking with a vet."
)
else:
system_prompt = (
"You are a friendly chatbot that gives advice about what dogs can eat.\n"
"If the user asks about a food not in your database. Respond cautiously and suggest checking with a vet."
)
messages = [{"role": "system", "content": system_prompt}]
if history:
messages.extend(history)
messages.append({"role": "user", "content": message})
response = client.chat_completion(messages, max_tokens=500, temperature=0.2)
return response['choices'][0]['message']['content'].strip()
print("hello world")
#chatbot = gr.ChatInterface(respond, type="messages", title = "LLM Chatbox", theme = "gradio/soft")
# declaring chatbot so that user can interact and see their conversation history and send new messages
# ===== LOAD & PROCESS YOUR NEW CONTENT =====
#with open("toxic_foods_for_dogs.txt", "r", encoding="utf-8") as file:
# Read the entire contents of the file and store it in a variable
# toxic_food_text = file.read()
#all_texts = []
#for filepath in glob.glob("data/*.txt"):
# with open(filepath, "r", encoding="utf-8") as file:
# all_texts.append(file.read())
#combined_text = "\n".join(all_texts)
with open("food_brand_options.txt", "r", encoding="utf-8") as f:
brand_options = f.read()
with open("foods_not_safe.txt", "r", encoding="utf-8") as file:
not_safe = file.read()
with open("health_risks.txt", "r", encoding="utf-8") as fi:
health_risks = fi.read()
with open("nutrition.txt", "r", encoding="utf-8") as fil:
nutrition = fil.read()
#def preprocess_text(text):
# cleaned_text = text.strip()
# chunks = cleaned_text.split("\n")
# cleaned_chunks = [chunk.strip() for chunk in chunks if chunk.strip()]
# print(cleaned_chunks)
# print(len(cleaned_chunks))
# return cleaned_chunks
def preprocess_text(text, chunk_size=200, overlap=50):
words = text.strip().split()
cleaned_chunks = []
for i in range(0, len(words), chunk_size - overlap):
chunk_words = words[i:i + chunk_size]
chunk_text = " ".join(chunk_words).strip()
if chunk_text:
cleaned_chunks.append(chunk_text)
print(f"Total chunks created: {len(cleaned_chunks)}")
return cleaned_chunks
def split_by_breed(text):
breeds = [
"Beagle", "Bulldog", "Rottweiler", "Siberian Husky",
"French Bulldog", "Labrador Retriever", "German Shepherd", "Poodle"
]
pattern = r"(?:Breed:\s*)?(" + "|".join(breeds) + r")"
sections = re.split(pattern, text)
chunks = []
for i in range(1, len(sections), 2):
breed_name = sections[i].strip()
breed_info = sections[i+1].strip() if i+1 < len(sections) else ""
if breed_info:
chunks.append(f"Breed: {breed_name}\n{breed_info}")
print(f"Total chunks created: {len(chunks)}")
return chunks
#def preprocess_text(text):
# cleaned_text = text.strip()
# chunks = cleaned_text.split("\n")
# cleaned_chunks = []
# for chunk in chunks:
# stripped_chunk = chunk.strip()
# cleaned_chunks.append(stripped_chunk)
# print(len(cleaned_chunks))
# return cleaned_chunks
model = SentenceTransformer('all-MiniLM-L6-v2')
def create_embeddings(text_chunks):
embeddings = model.encode(text_chunks, convert_to_tensor=True)
if embeddings.ndim == 1:
embeddings = embeddings.unsqueeze(0)
return embeddings
brand_chunks = preprocess_text(brand_options)
safe_chunks = preprocess_text(not_safe)
health_chunks = preprocess_text(health_risks)
nutrition_chunks = split_by_breed(nutrition)
all_chunks = brand_chunks + safe_chunks + health_chunks + nutrition_chunks
brand_embeddings = create_embeddings(brand_chunks)
safe_embeddings = create_embeddings(safe_chunks)
health_embeddings = create_embeddings(health_chunks)
nutrition_embeddings = create_embeddings(nutrition_chunks)
all_embeddings = create_embeddings(all_chunks)
# Load the pre-trained embedding model that converts text to vectors
model = SentenceTransformer('all-MiniLM-L6-v2')
def create_embeddings(text_chunks):
# Convert each text chunk into a vector embedding and store as a tensor
chunk_embeddings = model.encode(text_chunks, convert_to_tensor=True) # Replace ... with the text_chunks list
#replace ... with text_chunks
# Print the chunk embeddings
print(chunk_embeddings)
# Print the shape of chunk_embeddings
print(chunk_embeddings.shape)
# Return the chunk_embeddings
return chunk_embeddings
# Call the create_embeddings function and store the result in a new chunk_embeddings variable
chunk_embeddings = create_embeddings(brand_chunks)
# Define a function to find the most relevant text chunks for a given query, chunk_embeddings, and text_chunks
def get_top_chunks(query, chunk_embeddings, text_chunks, top_k=7, similarity_threshold=0.4):
if not text_chunks or chunk_embeddings is None or chunk_embeddings.size(0) == 0:
return []
# Convert the query text into a vector embedding
query_embedding = model.encode(query, convert_to_tensor=True) # Complete this line
# Normalize the query embedding to unit length for accurate similarity comparison. Normalize = bring to a length of 1
query_embedding_normalized = query_embedding / query_embedding.norm()
# chunk_embeddings_normalized = chunk_embeddings / chunk_embeddings.norm(dim=1, keepdim=True)
if chunk_embeddings.ndim == 1:
chunk_embeddings_normalized = chunk_embeddings / chunk_embeddings.norm()
else:
chunk_embeddings_normalized = chunk_embeddings / chunk_embeddings.norm(dim=1, keepdim=True)
# Calculate cosine similarity between query and all chunks using matrix multiplication
similarities = torch.matmul(chunk_embeddings_normalized, query_embedding_normalized)
# Print the similarities
print(similarities)
# Find the indices of the 3 chunks with highest similarity scores
top_indices = torch.topk(similarities, k= min(3, len(text_chunks))).indices
candidate_chunks = [(i.item(), similarities[i].item()) for i in top_indices]
# Print the top indices
print(top_indices)
filtered_chunks = [(idx, score) for idx, score in candidate_chunks if score >= similarity_threshold]
def keyword_score(chunk_text, query_text):
q_words = set(query_text.lower().split())
c_words = set(chunk_text.lower().split())
return len(q_words & c_words)
reranked = sorted(
filtered_chunks,
key=lambda x: keyword_score(text_chunks[x[0]], query),
reverse=True
)
final_chunks = [text_chunks[idx] for idx, _ in reranked]
return final_chunks
# Create an empty list to store the most relevant chunks
# top_chunks = []
# Loop through the top indices and retrieve the corresponding text chunks
# for i in top_indices:
# relevant_info = brand_chunks[i]
# top_chunks.append(relevant_info)
# Return the list of most relevant chunks
# return top_chunks
# theme
custom_theme = gr.themes.Soft(
primary_hue="purple",
secondary_hue="purple",
neutral_hue="purple",
spacing_size="lg",
radius_size="lg",
text_size="lg",
font=[gr.themes.GoogleFont("Intel One Mono"), "serif"],
)
about_text = "## About this bot Our bot will tell how to care for your dog's nutrition. Use the chat box on the right to try it out!"
with gr.Blocks(theme=custom_theme) as chatbot:
with gr.Row(scale=1):
gr.Image(
value="BarkBites.png",
show_label=False,
show_share_button = False,
show_download_button = False
)
with gr.Row(scale=3):
with gr.Column(scale=1):
with gr.Row():
level = gr.Dropdown(
choices = ["Small", "Medium", "Large"],
label="Dog Size",
info="What is your dog's size?",
interactive=True
)
gr.Image(
value="BarkBot.png",
show_label=False,
show_share_button=False,
show_download_button=False
)
with gr.Column(scale=4):
gr.ChatInterface(
fn=respond,
type="messages",
examples=["What should I feed my pet husky?", "Give me a meal plan for my labrador.", "Help! My dog is puking everywhere!"],
title="BarkBites",
theme="gradio/soft",
description="Are you worried that something isn’t safe to eat for your dog? Or that they aren’t getting enough nutrition? Look no further, BarkBites is here to help!"
)
chatbot.launch()