Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import random | |
| from huggingface_hub import InferenceClient | |
| #STEP 1: (Import Sentence Transformer Library and Torch) | |
| from sentence_transformers import SentenceTransformer | |
| import torch | |
| # ===== LOAD & PROCESS YOUR NEW CONTENT ===== | |
| #STEP 2: (Load/process text file) | |
| # Open the tooth_brushin_text.txt file in read mode with UTF-8 encoding | |
| with open("tooth_brushin_text.txt", "r", encoding="utf-8") as file: | |
| # Read the entire contents of the file and store it in a variable | |
| tooth_brushin_text = file.read() | |
| # Print the text below | |
| print(tooth_brushin_text) | |
| # ===== APPLY THE COMPLETE WORKFLOW ===== | |
| #STEP 3: (Split text file by chunk (BY SENTENCE) clean/strip chunks) | |
| def preprocess_text(text): | |
| # Strip extra whitespace from the beginning and the end of the text | |
| cleaned_text = text.strip() | |
| # Split the cleaned_text by every period | |
| chunks = cleaned_text.split(".") | |
| # Create an empty list to store cleaned chunks | |
| cleaned_chunks = [] | |
| # Write your for-in loop below to clean each chunk and add it to the cleaned_chunks list | |
| for chunk in chunks: | |
| stripped_chunk = chunk.strip() | |
| if len(stripped_chunk) > 0: | |
| cleaned_chunks.append(chunk) | |
| # Print cleaned_chunks | |
| print(cleaned_chunks) | |
| num_of_chunks = len(cleaned_chunks) | |
| # Print the length of cleaned_chunks | |
| print(f"There are {num_of_chunks} chunks.") | |
| # Return the cleaned_chunks | |
| return cleaned_chunks | |
| # Call the preprocess_text function and store the result in a cleaned_chunks variable | |
| cleaned_chunks = preprocess_text(tooth_brushin_text) | |
| #STEP 4: (Convert Chunks into vectors) | |
| # Load the pre-trained embedding model that converts text to vectors | |
| model = SentenceTransformer('all-MiniLM-L6-v2') | |
| def create_embeddings(text_chunks): | |
| # Convert each text chunk into a vector embedding and store as a tensor | |
| chunk_embeddings = model.encode(text_chunks, convert_to_tensor=True) | |
| # Print the chunk embeddings | |
| print(chunk_embeddings) | |
| # Print the shape of chunk_embeddings | |
| print(chunk_embeddings.shape) | |
| # Return the chunk_embeddings | |
| return chunk_embeddings | |
| # Call the create_embeddings function and store the result in a new chunk_embeddings variable | |
| chunk_embeddings = create_embeddings(cleaned_chunks) | |
| #STEP 5: (Convert query into vectors, find most relevant 3 chunks as vectors, convert those 3 chunks back into text, output text) | |
| # Define a function to find the most relevant text chunks for a given query, chunk_embeddings, and text_chunks | |
| def get_top_chunks(query, chunk_embeddings, text_chunks): | |
| # Convert the query text into a vector embedding | |
| query_embedding = model.encode(query, convert_to_tensor=True) # Complete this line | |
| # Normalize the query embedding to unit length for accurate similarity comparison | |
| query_embedding_normalized = query_embedding / query_embedding.norm() | |
| # Normalize all chunk embeddings to unit length for consistent comparison | |
| chunk_embeddings_normalized = chunk_embeddings / chunk_embeddings.norm(dim=1, keepdim=True) | |
| # Calculate cosine similarity between query and all chunks using matrix multiplication | |
| similarities = torch.matmul(chunk_embeddings_normalized, query_embedding_normalized) # Complete this line | |
| # Print the similarities | |
| print(similarities) | |
| # Find the indices of the 3 chunks with highest similarity scores | |
| top_indices = torch.topk(similarities, k=3).indices | |
| # Print the top indices | |
| print(top_indices) | |
| # Create an empty list to store the most relevant chunks | |
| top_chunks = [] | |
| # Loop through the top indices and retrieve the corresponding text chunks | |
| for index in top_indices: | |
| relevant_text_chunk = text_chunks[index] | |
| top_chunks.append(relevant_text_chunk) | |
| # Return the list of most relevant chunks | |
| return top_chunks | |
| #STEP 6: | |
| client = InferenceClient("Qwen/Qwen2.5-7B-Instruct-1M") | |
| def respond(message, history): | |
| top_results = get_top_chunks(message, chunk_embeddings, cleaned_chunks) | |
| print(top_results) | |
| messages = [{"role": "system", "content": f"You are a friendly chatbot. You give people advice about brushing their teeth. Base your response on the following information {top_results}"}] | |
| if history: | |
| messages.extend(history) | |
| messages.append({"role": "user", "content": message}) | |
| response = client.chat_completion(messages, max_tokens = 100) | |
| return response['choices'][0]['message']['content'].strip() | |
| def echo(message, history): | |
| return message | |
| def yes_no(message, history): | |
| responses = ["Yes", "No"] | |
| return random.choice(responses) | |
| chatbot = gr.ChatInterface(respond, type="messages") | |
| # Call the get_top_chunks function with the original query | |
| chatbot.launch() | |