mmargg's picture
added yield response
22fbb3d verified
raw
history blame
6.14 kB
#import libraries here
import gradio as gr
import random
from huggingface_hub import InferenceClient
#STEP 1: Import Sentence Transformer Library And Torch
from sentence_transformers import SentenceTransformer
import torch
with open("poverty_and_education.txt", "r", encoding="utf-8") as file:
# Read the entire contents of the file and store it in a variable
poverty_and_education = file.read()
with open("academic_tips_text.txt", "r", encoding="utf-8") as file:
# Read the entire contents of the file and store it in a variable
acadenic_tips_text = file.read()
with open("time_management.txt", "r", encoding="utf-8") as file:
# Read the entire contents of the file and store it in a variable
acadenic_tips_text = file.read()
with open("Extracurricular_ideas.txt", "r", encoding="utf-8") as file:
# Read the entire contents of the file and store it in a variable
acadenic_tips_text = file.read()
with open("financial_aid.txt", "r", encoding="utf-8") as file:
# Read the entire contents of the file and store it in a variable
acadenic_tips_text = file.read()
# Print the text below
print(poverty_and_education)
# ===== APPLY THE COMPLETE WORKFLOW =====
### STEP 3
def preprocess_text(text):
# Strip extra whitespace from the beginning and the end of the text
cleaned_text = text.strip()
# Split the cleaned_text by every newline character (\n)
chunks = cleaned_text.split("\n")
# Create an empty list to store cleaned chunks
cleaned_chunks = []
# Write your for-in loop below to clean each chunk and add it to the cleaned_chunks list
for chunk in chunks:
stripped_chunk = chunk.strip()
if len(stripped_chunk) > 0:
cleaned_chunks.append(stripped_chunk)
# Print cleaned_chunks
print(cleaned_chunks)
# Print the length of cleaned_chunks
num_of_chunks = len(cleaned_chunks)
print(num_of_chunks)
print(f"There are {num_of_chunks} amount of chunks")
# Return the cleaned_chunks
return cleaned_chunks
# Load the pre-trained embedding model that converts text to vectors
model = SentenceTransformer('all-MiniLM-L6-v2')
### STEP 4
def create_embeddings(text_chunks):
# Convert each text chunk into a vector embedding and store as a tensor
chunk_embeddings = model.encode(text_chunks, convert_to_tensor=True) # Replace ... with the text_chunks list
# Print the chunk embeddings
print(chunk_embeddings)
# Print the shape of chunk_embeddings
print(chunk_embeddings.shape)
# Return the chunk_embeddings
return chunk_embeddings
# Call the create_embeddings function and store the result in a new chunk_embeddings variable
#chunk_embeddings = create_embeddings(cleaned_chunks) # Complete this line
###STEP 5
# Define a function to find the most relevant text chunks for a given query, chunk_embeddings, and text_chunks
def get_top_chunks(query, chunk_embeddings, text_chunks):
# Convert the query text into a vector embedding
query_embedding = model.encode(query, convert_to_tensor = True) # Complete this line
# Normalize the query embedding to unit length for accurate similarity comparison
query_embedding_normalized = query_embedding / query_embedding.norm()
# Normalize all chunk embeddings to unit length for consistent comparison
chunk_embeddings_normalized = chunk_embeddings / chunk_embeddings.norm(dim=1, keepdim=True)
# Calculate cosine similarity between query and all chunks using matrix multiplication
similarities = torch.matmul(chunk_embeddings_normalized, query_embedding_normalized) # Complete this line
# Print the similarities
print(similarities)
# Find the indices of the 3 chunks with highest similarity scores
top_indices = torch.topk(similarities, k=3).indices
# Print the top indices
print(top_indices)
# Create an empty list to store the most relevant chunks
top_chunks = []
# Loop through the top indices and retrieve the corresponding text chunks
for i in top_indices:
relevant_info = cleaned_chunks[i]
top_chunks.append(relevant_info)
# Return the list of most relevant chunks
return top_chunks
# Print the top results
#print(top_results)
cleaned_chunks = preprocess_text(poverty_and_education)
chunk_embeddings = create_embeddings(cleaned_chunks)
#AI API being used
client= InferenceClient("Qwen/Qwen2.5-7B-Instruct-1M")
response=""
#defining role of AI and user
def respond(message,history):
information = get_top_chunks(message, chunk_embeddings, cleaned_chunks)
messages = [{"role": "assistant", "content": f"You are a friendly chatbot that gives advice to disadvantaged students about their education based on their question. When you give advice, keep in mind the following infromation {information}"}]
if history:
messages.extend(history) #keep adding history
messages.append({"role":"user", "content": message})
response=client.chat_completion(messages, stream=True, max_tokens=100) #capping how many words the LLM is allowed to generate as a respond (100 words)
for message in client.chat_completion(messages):
token = message.choices[0].delta.content
response+=token
yield response['choices'][0]['message']['content'].strip() #storing value of response in a readable format to display
### STEP 6
# Call the preprocess_text function and store the result in a cleaned_chunks variable
cleaned_chunks = preprocess_text(poverty_and_education) # Complete this line
top_results = get_top_chunks("How does poverty affect one's education?", chunk_embeddings, cleaned_chunks) # Complete this line
print(top_results)
#Defining chatbot giving user a UI to interact, see their conversation history, and see new messages using built in gr feature
#ChatInterface requires at least one parameter(a function)
chatbot = gr.ChatInterface(respond,type="messages", title="Accessible Intelligence Hub", theme="Taithrah/Minimal")
#launching chatbot
chatbot.launch()
#You may run into errors when you're trying different models. To see the error messages, set debug to True in launch()