Test / app.py
jsminenguyen's picture
Update app.py
aae207f verified
from huggingface_hub import InferenceClient
# Step 1 from the Semantic Search
from sentence_transformers import SentenceTransformer
import torch
import gradio as gr
import random
# Making requests to the model to generate responses:
client = InferenceClient('Qwen/Qwen2.5-72B-Instruct')
# ============================================
# Step 2 from the semantic search
# Open the water_cycle.txt file in read mode with UTF-8 encoding
with open("Joy_Scout_info.txt", "r", encoding="utf-8") as file:
# Read the entire contents of the file and store it in a variable
joyscout_info_text = file.read()
# Print the text below
print(joyscout_info_text)
# =============================================
# Step 3:
def preprocess_text(text):
# Strip extra whitespace from the beginning and the end of the text
cleaned_text = text.strip()
# Split the cleaned_text by every newline character (\n)
chunks = cleaned_text.split("\n")
# Create an empty list to store cleaned chunks
cleaned_chunks = []
# Write your for-in loop below to clean each chunk and add it to the cleaned_chunks list
for chunk in chunks:
cleaned_chunks.append(chunk)
# Print cleaned_chunks
print(cleaned_chunks)
# Print the length of cleaned_chunks
print(len(cleaned_chunks))
# Return the cleaned_chunks
return cleaned_chunks
# Call the preprocess_text function and store the result in a cleaned_chunks variable
cleaned_chunks = preprocess_text(joyscout_info_text) # Complete this line
# Load the pre-trained embedding model that converts text to vectors
model = SentenceTransformer('all-MiniLM-L6-v2')
# ============================================
# Step 4:
def create_embeddings(text_chunks):
# Convert each text chunk into a vector embedding and store as a tensor
chunk_embeddings = model.encode(text_chunks, convert_to_tensor=True) # Replace ... with the text_chunks list
# Print the chunk embeddings
print(chunk_embeddings)
# Print the shape of chunk_embeddings
print(len(chunk_embeddings))
# Return the chunk_embeddings
return chunk_embeddings
# Call the create_embeddings function and store the result in a new chunk_embeddings variable
chunk_embeddings = create_embeddings(cleaned_chunks) # Complete this line
# =====================================
# Step 5:
# Define a function to find the most relevant text chunks for a given query, chunk_embeddings, and text_chunks
def get_top_chunks(query, chunk_embeddings, text_chunks):
# Convert the query text into a vector embedding
query_embedding = model.encode(query, convert_to_tensor=True) # Complete this line
# Normalize the query embedding to unit length for accurate similarity comparison
query_embedding_normalized = query_embedding / query_embedding.norm()
# Normalize all chunk embeddings to unit length for consistent comparison
chunk_embeddings_normalized = chunk_embeddings / chunk_embeddings.norm(dim=1, keepdim=True)
# Calculate cosine similarity between query and all chunks using matrix multiplication
similarities = torch.matmul(chunk_embeddings_normalized, query_embedding_normalized) # Complete this line
# Print the similarities
print(similarities)
# Find the indices of the 3 chunks with highest similarity scores
top_indices = torch.topk(similarities, k=3).indices
# Print the top indices
print(top_indices)
# Create an empty list to store the most relevant chunks
top_chunks = []
# Creating an empty list to now store our most SIMILAR indices
# Loop through the top indices and retrieve the corresponding text chunks
for index in top_indices: # Looping through where our chunks are currently stored and now appending the most similar to be in our new list
top_chunks.append(text_chunks[index])
# List of the actual chunks needs to be created based on the index values that the top indices list consists of
# Return the list of most relevant chunks
return top_chunks
# =====================================
# Step 7: Putting data into the dictionary:
# ======================================
def respond(message, history):
best_chunks = get_top_chunks(message, chunk_embeddings, cleaned_chunks)
print(best_chunks)
str_chunks = "/n".join(best_chunks)
messages = [{'role':'system', 'content': 'You are a very kind chatbot giving people hobby suggestions to help them spend less time on their electronic devices. You answer their questions based on ' + str_chunks + '.'}]
if history:
messages.extend(history)
messages.append({'role':'user', 'content': message})
response = client.chat_completion(messages, max_tokens=100, temperature=1.7, top_p=.3)
# Temp and top_p control randomness
return response['choices'][0]['message']['content'].strip()
chat_theme = gr.themes.Soft(
primary_hue="orange",
secondary_hue="purple",
neutral_hue="yellow",
spacing_size="lg",
radius_size="lg",
text_size="lg",
font=[gr.themes.GoogleFont("IBM Plex Sans"), "sans-serif"],
font_mono=[gr.themes.GoogleFont("IBM Plex Mono"), "monospace"]
).set(
# Input area
input_background_fill="*neutral_50",
input_border_color_focus="*primary_300",
# Button styling
button_primary_background_fill="*primary_500",
button_primary_background_fill_hover="*primary_400"
)
chatbot = gr.ChatInterface(respond, type="messages", theme=chat_theme)
chatbot.launch(ssr_mode=False)
with gr.Blocks() as chatbot:
gr.Image(
value="icecream.jpg",
show_label=False,
show_share_button = False,
show_download_button = False)
gr.ChatInterface(respond, type="messages")
chatbot = gr.ChatInterface(respond, type="messages")
chatbot.launch()