Spaces:
Sleeping
Sleeping
File size: 9,007 Bytes
acb3810 b7d653b 6a3d2df acb3810 b7d653b 12652e3 acb3810 b7d653b 6a3d2df b7d653b 6a3d2df b7d653b 4c36b4a b7d653b e1a1ab2 b7d653b 4c36b4a b7d653b da19ed2 b7d653b da19ed2 b7d653b da19ed2 b7d653b da19ed2 b7d653b 6af4bf2 b7d653b af77948 b7d653b c62f04e fde7a73 0d171a4 b7d653b 1837e3e 6a1bf4d d7b48ec b7d653b e68e521 acf9304 6af4bf2 e68e521 593c610 0b67044 d387989 308ee7e e68e521 b4280a1 593c610 b4280a1 e68e521 b536bdf b7d653b acb3810 b7d653b af77948 c7b1ab4 b7d653b f67ee40 b7d653b acb3810 01e6b52 29811ea 38957a5 3f90023 31f3836 59e7446 d78e653 5ca6bae 46ab40c 31f3836 3a9b299 dc2a36a 31f3836 3f90023 b3b5774 9f7d556 2e2e5ec 314a686 2e2e5ec 314a686 9f7d556 c7b1ab4 1b9f06a aa18135 2b90208 314a686 29811ea |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 |
from huggingface_hub import InferenceClient
#STEP1FROMSEMANTICSEARCH (import libraries)
from sentence_transformers import SentenceTransformer
import torch
import gradio as gr
import random
client=InferenceClient("mistralai/Mistral-7B-Instruct-v0.2")
#deepseek-ai/DeepSeek-R1-Distill-Qwen-32B
# Open the water_cycle.txt file in read mode with UTF-8 encoding - step 2 from semantic search
with open("recipes.txt", "r", encoding="utf-8") as file:
# Read the entire contents of the file and store it in a variable
recipes_text = file.read()
# Print the text below
print(recipes_text)
#Step 3
def preprocess_text(text):
# Strip extra whitespace from the beginning and the end of the text
cleaned_text = text.strip()
# Split the cleaned_text by every newline character (\n)
chunks = cleaned_text.split("[END]")
# Create an empty list to store cleaned chunks
cleaned_chunks = []
# Write your for-in loop below to clean each chunk and add it to the cleaned_chunks list
for chunk in chunks:
clean = chunk.strip()
if len(chunk)>0:
cleaned_chunks.append(clean)
# Print cleaned_chunks
print(cleaned_chunks)
# Print the length of cleaned_chunks
print(len(cleaned_chunks))
# Return the cleaned_chunks
return cleaned_chunks
# Call the preprocess_text function and store the result in a cleaned_chunks variable
cleaned_chunks = preprocess_text(recipes_text) # Complete this line
#Step 4
# Load the pre-trained embedding model that converts text to vectors
model = SentenceTransformer('all-MiniLM-L6-v2')
def create_embeddings(text_chunks):
# Convert each text chunk into a vector embedding and store as a tensor
chunk_embeddings = model.encode(text_chunks, convert_to_tensor=True) # Replace ... with the text_chunks list
# Print the chunk embeddings
print(chunk_embeddings)
# Print the shape of chunk_embeddings
print(chunk_embeddings.shape)
# Return the chunk_embeddings
return chunk_embeddings
# Call the create_embeddings function and store the result in a new chunk_embeddings variable
chunk_embeddings = create_embeddings(cleaned_chunks) # Complete this line
#Step 5
# Define a function to find the most relevant text chunks for a given query, chunk_embeddings, and text_chunks
def get_top_chunks(query, chunk_embeddings, text_chunks):
# Convert the query text into a vector embedding
query_embedding = model.encode(query,convert_to_tensor=True) # Complete this line
# Normalize the query embedding to unit length for accurate similarity comparison
query_embedding_normalized = query_embedding / query_embedding.norm()
# Normalize all chunk embeddings to unit length for consistent comparison
chunk_embeddings_normalized = chunk_embeddings / chunk_embeddings.norm(dim=1, keepdim=True)
# Calculate cosine similarity between query and all chunks using matrix multiplication
similarities = torch.matmul(chunk_embeddings_normalized,query_embedding_normalized) # Complete this line
# Print the similarities
print(similarities)
# Find the indices of the 3 chunks with highest similarity scores
top_indices = torch.topk(similarities, k=1).indices
# Print the top indices
print(top_indices)
# Create an empty list to store the most relevant chunks
top_chunks = []
# Loop through the top indices and retrieve the corresponding text chunks
for i in top_indices:
top_chunks.append(text_chunks[i])
print(top_chunks)
# Return the list of most relevant chunks
return top_chunks
def respond(message, history, checkboxes, checkboxes2, checkboxes3):
response = ""
# Combine message + checkboxes into one search query
query = message
if checkboxes:
query += " " + " ".join(checkboxes)
if checkboxes2:
query += " " + " ".join(checkboxes2)
if checkboxes3:
query += " " + " ".join(checkboxes3)
print("FINAL SEMANTIC SEARCH QUERY:", query)
best_recipes_chunk = get_top_chunks(query, chunk_embeddings, cleaned_chunks)
print(best_recipes_chunk)
str_recipes_chunk = "\n".join([str(chunk) for chunk in best_recipes_chunk])
print("RECIPES!!!!!!: " + str_recipes_chunk)
messages = [
{
"role": "system",
"content": (
"You are a helpful recipe assistant. You are only allowed to use the recipes listed below."
f"Available Recipes:\n\n{str_recipes_chunk}\n\n(Use ONLY these to answer)"
"You must NOT invent or guess any new recipes or ingredients. "
f"The user wants something of {checkboxes} cuisine, and the meal should be for {checkboxes2} with {checkboxes3} dietary restriction."
"Don't format the recipe as it is in the available recipes. Format your response like this:"
"Here is a recipe that matches your needs: [recipe name]. It is [cuisine] cuisine. You can enjoy it for [time of day]. Its main ingredients are [core ingredients]. It fits a [dietary restriction] diet. This dish is [description]. To prepare it: [steps]"
"Switch up the format a bit to make sure the responses are varied. Make sure you use proper grammar and don't have random capitals."
"If the user’s question doesn’t match any of the recipes exactly or semantically, politely say: "
"‘Sorry, I couldn’t find a match. Can you rephrase or ask about another dish?’"
)
},
{
"role": "user",
"content": (
message
)
}]
if history:
messages.extend(history)
messages.append({"role":"user","content": message})
#shloka is cool
response = client.chat_completion(messages, max_tokens = 700, temperature = 0.2, top_p = 0.3)
#temperature and top_p control randomness
return response['choices'][0]['message']['content'].strip()
def vote(data: gr.LikeData):
if data.liked:
print("You upvoted this response: " + data.value["value"])
else:
print("You downvoted this response: " + data.value["value"])
chat_theme = gr.themes.Monochrome(
primary_hue = "orange",
secondary_hue = "rose",
neutral_hue = "rose").set(
background_fill_primary = "*primary_50",
input_background_fill = "*neutral_100",
input_border_color_focus = "*neutral_100",
button_secondary_background_fill = "*neutral_50",
button_secondary_background_fill_hover = "*neutral_100")
title = """# 🐑 NutriAssist 🌱"""
with gr.Blocks(theme = chat_theme) as demo:
# chatbot = gr.Chatbot(label="NutriAssist")
# chatbot.like(vote, None, None)
with gr.Row(scale=1):
gr.Image(
value="NutriAssistBanner.png",
show_label=False,
show_share_button = False,
show_download_button = False)
with gr.Row(scale=2):
with gr.Column(scale=1):
gr.Markdown(title)
gr.Image(
"NutriAssistInstructions.png",
show_label = False,
show_share_button = False,
show_download_button = False)
gr.Image(
"NutriAssistTeam.png",
show_label = False,
show_share_button = False,
show_download_button = False)
with gr.Column(scale=2):
with gr.Row():
# msg = gr.Textbox(placeholder="Ask about an item (e.g., banana peel)", label="Your Question")
checkboxes = gr.CheckboxGroup(
choices=["Thai", "Chinese", "Indian", "Mexican", "Italian"],
label="Type of cuisine"
)
with gr.Row():
# msg = gr.Textbox(placeholder="Ask about an item (e.g., banana peel)", label="Your Question")
checkboxes2 = gr.CheckboxGroup(
choices=["Breakfast", "Lunch", "Dinner", "Snack"],
label="Time of day"
)
with gr.Row():
# msg = gr.Textbox(placeholder="Ask about an item (e.g., banana peel)", label="Your Question")
checkboxes3 = gr.CheckboxGroup(
choices=["Vegetarian", "Vegan", "Dairy Free", "Gluten Free", "Nut Free"],
label="Dietary Restriction"
)
gr.ChatInterface(respond, additional_inputs=[checkboxes, checkboxes2, checkboxes3], type="messages")
# send_btn = gr.Button("Send")
# history_state = gr.State([])
# send_btn.click(
# fn=respond,
# # additional_inputs=[checkboxes, checkboxes2, checkboxes3],
# outputs=[chatbot]
# )
demo.launch()
#chatbot = gr.ChatInterface(respond, type="messages")
#chatbot.launch() |