Spaces:
Sleeping
Sleeping
File size: 4,816 Bytes
8a63900 9e5311f 8a63900 5581c0e 6c07787 6eb4753 8a63900 6c07787 7d1e9fa 6c07787 5581c0e a237d07 5581c0e a0ff287 5581c0e a0ff287 5581c0e 49d9c6b b791534 66295de b791534 5581c0e d95f4cb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 |
import gradio as gr
from huggingface_hub import InferenceClient
from sentence_transformers import SentenceTransformer
import torch
import random
#Semantic Search
#STEP 1
#!pip install -q sentence-transformers
#STEP 2
# Open the water_cycle.txt file in read mode with UTF-8 encoding
with open("water_cycle.txt", "r", encoding="utf-8") as file:
# Read the entire contents of the file and store it in a variable
water_cycle_text = file.read()
# Print the text below
print(water_cycle_text)
#STEP 3
def preprocess_text(text):
# Strip extra whitespace from the beginning and the end of the text
cleaned_text = text.strip()
# Split the cleaned_text by every newline character (\n)
chunks = cleaned_text.split("\n")
# Create an empty list to store cleaned chunks
cleaned_chunks = []
# Write your for-in loop below to clean each chunk and add it to the cleaned_chunks list
for chunk in chunks:
stripped_chunk = chunk.strip()
if len(stripped_chunk) >= 0:
cleaned_chunks.append(stripped_chunk)
# Print cleaned_chunks
print(cleaned_chunks)
# Print the length of cleaned_chunks
print(len(cleaned_chunks))
# Return the cleaned_chunks
return cleaned_chunks
# Call the preprocess_text function and store the result in a cleaned_chunks variable
cleaned_chunks = preprocess_text(water_cycle_text)
#STEP 4
# Load the pre-trained embedding model that converts text to vectors
model = SentenceTransformer('all-MiniLM-L6-v2')
def create_embeddings(text_chunks):
# Convert each text chunk into a vector embedding and store as a tensor
chunk_embeddings = model.encode(text_chunks, convert_to_tensor=True) # Replace ... with the text_chunks list
# Print the chunk embeddings
print(chunk_embeddings)
# Print the shape of chunk_embeddings
print(chunk_embeddings.shape) # no parentheses on .shape because it's a property, not a method! Look up the difference between class methods and classes properties.
# Return the chunk_embeddings
return chunk_embeddings
# Call the create_embeddings function and store the result in a new chunk_embeddings variable
chunk_embeddings = create_embeddings(cleaned_chunks)
#STEP 5
# Define a function to find the most relevant text chunks for a given query, chunk_embeddings, and text_chunks
def get_top_chunks(query, chunk_embeddings, text_chunks):
# Convert the query text into a vector embedding
query_embedding = model.encode(query, convert_to_tensor=True)
# Normalize the query embedding to unit length for accurate similarity comparison
query_embedding_normalized = query_embedding / query_embedding.norm()
# Normalize all chunk embeddings to unit length for consistent comparison
chunk_embeddings_normalized = chunk_embeddings / chunk_embeddings.norm(dim=1, keepdim=True)
# Calculate cosine similarity between query and all chunks using matrix multiplication
similarities = torch.matmul(chunk_embeddings_normalized, query_embedding_normalized)
# Print the similarities
print(similarities)
# Find the indices of the 3 chunks with highest similarity scores
top_indices = torch.topk(similarities, k=3).indices
# Print the top indices
print(top_indices)
# Create an empty list to store the most relevant chunks
top_chunks = []
# Loop through the top indices and retrieve the corresponding text chunks
for index in top_indices:
chunk = text_chunks[index]
top_chunks.append(chunk)
# Return the list of most relevant chunks
return top_chunks
#STEP 6 or Practice
# ===== LOAD & PROCESS YOUR NEW CONTENT =====
with open("em_spectrum.txt", "r", encoding="utf-8") as file:
# Read the entire contents of the file and store it in a variable
em_spectrum_text = file.read()
# Print the text below
print(em_spectrum_text)
# ===== APPLY THE COMPLETE WORKFLOW =====
#need cleaned_chunks variable
#need chunk_embeddings variable
em_cleaned_chunks = preprocess_text(em_spectrum_text)
em_chunk_embeddings = create_embeddings(em_cleaned_chunks)
test_question = "What type of EM radiation has the most energy?"
print("test question:", test_question)
em_top_results = get_top_chunks(test_question, em_chunk_embeddings, em_cleaned_chunks)
print(em_top_results)
# ===== EXPERIMENT & VERIFY =====
client=InferenceClient("HuggingFaceH4/zephyr-7b-beta")
def respond(message, history):
messages = [
{"role":"system",
"content": "You are a friendly chatbot! :)",
}
]
if history:
messages.extend(history)
messages.append(
{"role": "user",
"content": message})
response = client.chat_completion(messages, max_tokens=100)
print(response)
return response['choices'][0]['message']['content'].strip()
chatbot = gr.ChatInterface(respond, type="messages")
chatbot.launch()
print(messages)
print(history) |