Spaces:
Sleeping
Sleeping
File size: 4,670 Bytes
f84145b a59aff4 61c1f65 0ab8474 61c1f65 be6f1eb 61c1f65 50db1d8 61c1f65 71d100c a59aff4 0ab8474 61c1f65 f84145b 0ab8474 f84145b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 |
import gradio as gr
import random
from huggingface_hub import InferenceClient
#STEP 1: (Import Sentence Transformer Library and Torch)
from sentence_transformers import SentenceTransformer
import torch
# ===== LOAD & PROCESS YOUR NEW CONTENT =====
#STEP 2: (Load/process text file)
# Open the tooth_brushin_text.txt file in read mode with UTF-8 encoding
with open("tooth_brushin_text.txt", "r", encoding="utf-8") as file:
# Read the entire contents of the file and store it in a variable
tooth_brushin_text = file.read()
# Print the text below
print(tooth_brushin_text)
# ===== APPLY THE COMPLETE WORKFLOW =====
#STEP 3: (Split text file by chunk (BY SENTENCE) clean/strip chunks)
def preprocess_text(text):
# Strip extra whitespace from the beginning and the end of the text
cleaned_text = text.strip()
# Split the cleaned_text by every period
chunks = cleaned_text.split(".")
# Create an empty list to store cleaned chunks
cleaned_chunks = []
# Write your for-in loop below to clean each chunk and add it to the cleaned_chunks list
for chunk in chunks:
stripped_chunk = chunk.strip()
if len(stripped_chunk) > 0:
cleaned_chunks.append(chunk)
# Print cleaned_chunks
print(cleaned_chunks)
num_of_chunks = len(cleaned_chunks)
# Print the length of cleaned_chunks
print(f"There are {num_of_chunks} chunks.")
# Return the cleaned_chunks
return cleaned_chunks
# Call the preprocess_text function and store the result in a cleaned_chunks variable
cleaned_chunks = preprocess_text(tooth_brushin_text)
#STEP 4: (Convert Chunks into vectors)
# Load the pre-trained embedding model that converts text to vectors
model = SentenceTransformer('all-MiniLM-L6-v2')
def create_embeddings(text_chunks):
# Convert each text chunk into a vector embedding and store as a tensor
chunk_embeddings = model.encode(text_chunks, convert_to_tensor=True)
# Print the chunk embeddings
print(chunk_embeddings)
# Print the shape of chunk_embeddings
print(chunk_embeddings.shape)
# Return the chunk_embeddings
return chunk_embeddings
# Call the create_embeddings function and store the result in a new chunk_embeddings variable
chunk_embeddings = create_embeddings(cleaned_chunks)
#STEP 5: (Convert query into vectors, find most relevant 3 chunks as vectors, convert those 3 chunks back into text, output text)
# Define a function to find the most relevant text chunks for a given query, chunk_embeddings, and text_chunks
def get_top_chunks(query, chunk_embeddings, text_chunks):
# Convert the query text into a vector embedding
query_embedding = model.encode(query, convert_to_tensor=True) # Complete this line
# Normalize the query embedding to unit length for accurate similarity comparison
query_embedding_normalized = query_embedding / query_embedding.norm()
# Normalize all chunk embeddings to unit length for consistent comparison
chunk_embeddings_normalized = chunk_embeddings / chunk_embeddings.norm(dim=1, keepdim=True)
# Calculate cosine similarity between query and all chunks using matrix multiplication
similarities = torch.matmul(chunk_embeddings_normalized, query_embedding_normalized) # Complete this line
# Print the similarities
print(similarities)
# Find the indices of the 3 chunks with highest similarity scores
top_indices = torch.topk(similarities, k=3).indices
# Print the top indices
print(top_indices)
# Create an empty list to store the most relevant chunks
top_chunks = []
# Loop through the top indices and retrieve the corresponding text chunks
for index in top_indices:
relevant_text_chunk = text_chunks[index]
top_chunks.append(relevant_text_chunk)
# Return the list of most relevant chunks
return top_chunks
#STEP 6:
client = InferenceClient("Qwen/Qwen2.5-7B-Instruct-1M")
def respond(message, history):
top_results = get_top_chunks(message, chunk_embeddings, cleaned_chunks)
print(top_results)
messages = [{"role": "system", "content": f"You are a friendly chatbot. You give people advice about brushing their teeth. Base your response on the following information {top_results}"}]
if history:
messages.extend(history)
messages.append({"role": "user", "content": message})
response = client.chat_completion(messages, max_tokens = 100)
return response['choices'][0]['message']['content'].strip()
def echo(message, history):
return message
def yes_no(message, history):
responses = ["Yes", "No"]
return random.choice(responses)
chatbot = gr.ChatInterface(respond, type="messages")
# Call the get_top_chunks function with the original query
chatbot.launch()
|