Spaces:
Sleeping
Sleeping
File size: 5,591 Bytes
3e01343 81a362b 725b1ff 3e01343 c1813ef 3e01343 7e38377 2cbfd6a 5ff5256 70d8c86 2cbfd6a 3e01343 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 |
from huggingface_hub import InferenceClient
#step 1 from semantic search
from sentence_transformers import SentenceTransformer
import torch
import gradio as gr
import random
client = InferenceClient("Qwen/Qwen2.5-72B-Instruct")
#step 2 from semantic search read file
# Open the water_cycle.txt file in read mode with UTF-8 encoding
with open("books_file.txt", "r", encoding="utf-8") as file:
# Read the entire contents of the file and store it in a variable
books_file_text = file.read()
# Print the text below
print(books_file_text)
#step 3 from semantix search
def preprocess_text(text):
# Strip extra whitespace from the beginning and the end of the text
cleaned_text = text.strip()
# Split the cleaned_text by every newline character (\n)
chunks = cleaned_text.split("\n")
# Create an empty list to store cleaned chunks
cleaned_chunks = []
# Write your for-in loop below to clean each chunk and add it to the cleaned_chunks list
for chunk in chunks:
clean_chunk = chunk.strip()
if(len(clean_chunk) >= 0):
cleaned_chunks.append(clean_chunk)
# Print cleaned_chunks
print(cleaned_chunks)
# Print the length of cleaned_chunks
print(len(cleaned_chunks))
# Return the cleaned_chunks
return cleaned_chunks
# Call the preprocess_text function and store the result in a cleaned_chunks variable
cleaned_chunks = preprocess_text(books_file_text) # Complete this line
#step 4 from semantic search
# Load the pre-trained embedding model that converts text to vectors
model = SentenceTransformer('all-MiniLM-L6-v2')
def create_embeddings(text_chunks):
# Convert each text chunk into a vector embedding and store as a tensor
chunk_embeddings = model.encode(text_chunks, convert_to_tensor=True) # Replace ... with the text_chunks list
# Print the chunk embeddings
print(chunk_embeddings)
# Print the shape of chunk_embeddings
print(chunk_embeddings.shape)
# Return the chunk_embeddings
return chunk_embeddings
# Call the create_embeddings function and store the result in a new chunk_embeddings variable
chunk_embeddings = create_embeddings(cleaned_chunks) # Complete this line
#step 5 from semantic search
# Define a function to find the most relevant text chunks for a given query, chunk_embeddings, and text_chunks
def get_top_chunks(query, chunk_embeddings, text_chunks):
# Convert the query text into a vector embedding
query_embedding = model.encode(query, convert_to_tensor=True) # Complete this line
# Normalize the query embedding to unit length for accurate similarity comparison
query_embedding_normalized = query_embedding / query_embedding.norm()
# Normalize all chunk embeddings to unit length for consistent comparison
chunk_embeddings_normalized = chunk_embeddings / chunk_embeddings.norm(dim=1, keepdim=True)
# Calculate cosine similarity between query and all chunks using matrix multiplication
similarities = torch.matmul(chunk_embeddings_normalized, query_embedding_normalized) # Complete this line
# Print the similarities
print(similarities)
# Find the indices of the 3 chunks with highest similarity scores
top_indices = torch.topk(similarities, k=3).indices
# Print the top indices
print(top_indices)
# Create an empty list to store the most relevant chunks
top_chunks = []
# Loop through the top indices and retrieve the corresponding text chunks
for i in top_indices:
top_chunks.append(text_chunks[i])
# Return the list of most relevant chunks
return top_chunks
def respond(message, history):
best_next_read = get_top_chunks(message, chunk_embeddings, cleaned_chunks)
print(best_next_read)
str_read_chunks = "\n".join(best_next_read)
messages = [
{"role":"system",
"content": "You are a Gen Z and Gen Alpha-friendly chatbot that helps teenagers find their next best book to read. Speak naturally and casually, like someone from Gen Z. Only recommend books, never anything else. Use only the books in our database YOU CAN NEVER USE OUTSIDE DATA ONLY TAKE DATA FROM OUR DATABASE! Match show suggestions to the user's age using. If they don’t share their age, assume they’re Gen Z or Gen Alpha and use those guidelines. If the user is not Gen Z or Gen Alpha, you can recommend any book from the database. If they give you a genre, use it to guide your recommendation. If they don’t, pick something fun or relevant. If they mention a book they liked, match the genre of that book to recommend something similar. If nothing matches all their preferences, suggest the most similar book from the database. You got this! Remember you can ONLY take data from " + str_read_chunks + " ."
}
]
if history:
messages.extend(history)
messages.append(
{'role':'user',
'content':message}
)
response = client.chat_completion(
messages, max_tokens = 700, temperature=1.3, top_p=0.6
)
return response['choices'][0]['message']['content'].strip()
chat_theme = gr.themes.Soft(
primary_hue="pink",
secondary_hue="rose",
neutral_hue="indigo",
spacing_size="lg",
radius_size="lg"
).set(
input_background_fill="*neutral_50",
input_border_color_focus="*primary_300",
button_primary_background_fill="*primary_500",
button_primary_background_fill_hover="*primary_400"
)
chatbot = gr.ChatInterface(
respond,
type="messages",
theme=chat_theme,
title="Book Bot from RecoNext",
description="Hey! I’m your book bot \nI help you find your next favorite book based on your age and taste. Just tell me what you're into!"
)
chatbot.launch() |