mmargg's picture
Update app.py
7933090 verified
#import libraries here
import gradio as gr
import random
from huggingface_hub import InferenceClient
#STEP 1: Import Sentence Transformer Library And Torch
from sentence_transformers import SentenceTransformer
import torch
with open("poverty_and_education.txt", "r", encoding="utf-8") as file:
# Read the entire contents of the file and store it in a variable
poverty_and_education = file.read()
with open("academic_tips_text.txt", "r", encoding="utf-8") as file:
# Read the entire contents of the file and store it in a variable
academic_tips_text = file.read()
with open("time_management.txt", "r", encoding="utf-8") as file:
# Read the entire contents of the file and store it in a variable
time_management = file.read()
with open("Extracurricular_ideas.txt", "r", encoding="utf-8") as file:
# Read the entire contents of the file and store it in a variable
extracurricular_ideas = file.read()
with open("financial_aid.txt", "r", encoding="utf-8") as file:
# Read the entire contents of the file and store it in a variable
financial_aid = file.read()
# Print the text below
print(academic_tips_text)
# ===== APPLY THE COMPLETE WORKFLOW =====
### STEP 3
def preprocess_text(text):
# Strip extra whitespace from the beginning and the end of the text
cleaned_text = text.strip()
# Split the cleaned_text by every newline character (\n)
chunks = cleaned_text.split("\n")
# Create an empty list to store cleaned chunks
cleaned_chunks = []
# Write your for-in loop below to clean each chunk and add it to the cleaned_chunks list
for chunk in chunks:
stripped_chunk = chunk.strip()
if len(stripped_chunk) > 0:
cleaned_chunks.append(stripped_chunk)
# Print cleaned_chunks
print(cleaned_chunks)
# Print the length of cleaned_chunks
num_of_chunks = len(cleaned_chunks)
print(num_of_chunks)
print(f"There are {num_of_chunks} amount of chunks")
# Return the cleaned_chunks
return cleaned_chunks
# Load the pre-trained embedding model that converts text to vectors
model = SentenceTransformer('all-MiniLM-L6-v2')
### STEP 4
def create_embeddings(text_chunks):
# Convert each text chunk into a vector embedding and store as a tensor
chunk_embeddings = model.encode(text_chunks, convert_to_tensor=True) # Replace ... with the text_chunks list
# Print the chunk embeddings
print(chunk_embeddings)
# Print the shape of chunk_embeddings
print(chunk_embeddings.shape)
# Return the chunk_embeddings
return chunk_embeddings
# Call the create_embeddings function and store the result in a new chunk_embeddings variable
#chunk_embeddings = create_embeddings(cleaned_chunks) # Complete this line
###STEP 5
# Define a function to find the most relevant text chunks for a given query, chunk_embeddings, and text_chunks
def get_top_chunks(query, chunk_embeddings, text_chunks):
# Convert the query text into a vector embedding
query_embedding = model.encode(query, convert_to_tensor = True) # Complete this line
# Normalize the query embedding to unit length for accurate similarity comparison
query_embedding_normalized = query_embedding / query_embedding.norm()
# Normalize all chunk embeddings to unit length for consistent comparison
chunk_embeddings_normalized = chunk_embeddings / chunk_embeddings.norm(dim=1, keepdim=True)
# Calculate cosine similarity between query and all chunks using matrix multiplication
similarities = torch.matmul(chunk_embeddings_normalized, query_embedding_normalized) # Complete this line
# Print the similarities
print(similarities)
# Find the indices of the 3 chunks with highest similarity scores
top_indices = torch.topk(similarities, k=3).indices
# Print the top indices
print(top_indices)
# Create an empty list to store the most relevant chunks
top_chunks = []
# Loop through the top indices and retrieve the corresponding text chunks
for i in top_indices:
relevant_info = text_chunks[i]
top_chunks.append(relevant_info)
# Return the list of most relevant chunks
return top_chunks
# Print the top results
#print(top_results)
cleaned_chunks = preprocess_text(academic_tips_text)
cleaned_chunks2= preprocess_text(extracurricular_ideas)
cleaned_chunks3= preprocess_text(time_management)
cleaned_chunks4= preprocess_text(financial_aid)
chunk_embeddings = create_embeddings(cleaned_chunks)
chunk_embeddings2 = create_embeddings(cleaned_chunks2)
chunk_embeddings3 = create_embeddings(cleaned_chunks3)
chunk_embeddings4 = create_embeddings(cleaned_chunks4)
#AI API being used
client= InferenceClient("openai/gpt-oss-20b")
#defining role of AI and user
information=""
def respond(message,history):
topic_chunks=[]
if chatbot_topic=="Academia":
topic_chunks=get_top_chunks(message, chunk_embeddings, cleaned_chunks)
print(topic_chunks)
elif chatbot_topic=="Extracurriculars":
topic_chunks=get_top_chunks(message, chunk_embeddings2, cleaned_chunks2)
print(topic_chunks)
elif chatbot_topic=="Time Management":
topic_chunks=get_top_chunks(message, chunk_embeddings3, cleaned_chunks3)
print(topic_chunks)
elif chatbot_topic=="Financial Aid":
topic_chunks=get_top_chunks(message, chunk_embeddings4, cleaned_chunks4)
print(topic_chunks)
#return information
#return topic_chunks
if chatbot_mode=="Peer Mode":
messages = [{"role": "assistant", "content": f"You are a casual, sometimes funny chatbot who acts like a peer of the person who is asking the question. You relate to their situation and give them relevant advice. You only answer in complete sentences with correct grammar, punctuation, and complete ideas. You respond clearly in under three complete bullet points under 250 characters. When you give advice, keep in mind the following information {topic_chunks}"}]
if chatbot_mode=="Guidance Counselor Mode":
messages = [{"role": "assistant", "content": f"You act as a helpful guidance counselor with an educated understanding of high school life and college admissions. You guide the student to consider their academic potential, while maintaining the passion and balance they need. You only answer in complete sentences with correct grammar, punctuation, and complete ideas. When you give advice, keep in mind the following information {topic_chunks}"}]
if chatbot_mode=="Parent Mode":
messages = [{"role": "assistant", "content": f"You are a guiding, nurturing, and protective parent who wants their student to reach their fullest potential while learning to grow up with the proper physical, emotional, and social development. You want to build your student into a responsible adult, but also want them to pursue success in their life and establish a good future. You only answer in complete sentences with correct grammar, punctuation, and complete ideas. When you give advice, keep in mind the following information {information}"}]
else:
messages = [{"role": "assistant", "content": f"You are a friendly, helpful chatbot that gives academic advice to disadvantaged students about their education based on their question. You only answer in complete sentences with correct grammar, punctuation, and complete ideas. When you give advice, keep in mind the following information {topic_chunks}"}]
if history:
messages.extend(history) #keep adding history
messages.append({"role":"user","content": message})
response=client.chat_completion(messages, temperature=0.2)#capping how many words the LLM is allowed to generate as a respond (100 words)
return response['choices'][0]['message']['content'].strip() #storing value of response in a readable format to display
### STEP 6
# Call the preprocess_text function and store the result in a cleaned_chunks variable
cleaned_chunks = preprocess_text(academic_tips_text) # Complete this line
top_results = get_top_chunks("How does poverty affect one's education?", chunk_embeddings, cleaned_chunks) # Complete this line
print(top_results)
#Defining chatbot giving user a UI to interact, see their conversation history, and see new messages using built in gr feature
#ChatInterface requires at least one parameter(a function)
chatbot = gr.ChatInterface(respond,type="messages", title="Accessible Intelligence Hub", theme="Taithrah/Minimal")
def save_chat_history(history, username):
if not username:
username = "anonymous"
timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
filename = f"chat_history_{username}_{timestamp}.txt"
with open(filename, "w", encoding="utf-8") as f:
f.write(f"Chat History for {username} - {timestamp}\n\n")
for exchange in history:
if isinstance(exchange, tuple) and len(exchange) == 2:
user_msg, bot_msg = exchange
f.write(f"User: {user_msg}\n")
f.write(f"Bot: {bot_msg}\n\n")
elif isinstance(exchange, dict):
# Handle dictionary format if needed
role = exchange.get("role", "unknown")
content = exchange.get("content", "")
f.write(f"{role.capitalize()}: {content}\n\n")
return filename
with gr.Blocks(
theme=gr.themes.Soft(
primary_hue="purple",
secondary_hue="fuchsia",
neutral_hue="gray",
text_size="lg"
).set(
background_fill_primary='*neutral_200',
background_fill_secondary='neutral_100',
background_fill_secondary_dark='secondary_500',
border_color_accent='*secondary_400',
border_color_accent_dark='*secondary_800',
color_accent='*secondary_600',
color_accent_soft='*secondary_200',
color_accent_soft_dark='*secondary_800',
button_primary_background_fill='*secondary_400',
button_primary_background_fill_dark='*secondary_600',
button_primary_text_color='white',
button_primary_border_color='*secondary_700',
button_primary_border_color_dark='*secondary_900'
)
) as demo:
with gr.Row(scale=1):
chatbot_topic=gr.CheckboxGroup(["Academia", "Extracurriculars", "Time Management", "Financial Aid"], label="What would you like advice about?")
with gr.Row(scale=1):
chatbot_mode=gr.CheckboxGroup(["Guidance Counselor Mode", "Peer Mode", "Parent Mode"], label="How would you like the chatbot to respond?")
#with gr.Row():
#save_button = gr.Button("💾 Save Chat History",
#variant="primary",
#size="sm")
#download_button = gr.File(interactive=True,
#visible=True,
#elem_classes=["download-btn"])
gr.ChatInterface(
fn=respond,
title="Accessible Intelligence Hub",
description="Ask about your education",
type="messages",
)
#with gr.Row(scale=1):
#chatbot_topic=gr.CheckboxGroup(["Academia", "Extracurriculars", "Time Management", "Financial Aid"], label="What would you like advice about?")
#with gr.Row(scale=1):
#chatbot_mode=gr.CheckboxGroup(["Guidance Counselor Mode", "Peer Mode", "Parent Mode"], label="How would you like the chatbot to respond?")
#demo.css = """
#.download-btn {
# min-width: 200px !important;
#}
#.download-btn .gr-button {
# background: var(--button-primary-background-fill) !important;
# color: var(--button-primary-text-color) !important;
#}
#"""
#with gr.Blocks() as demo:
#chatbot = gr.Chatbot()
username_input = gr.Textbox(label="Username")
save_button = gr.Button("Save Chat History")
download_button = gr.File(label="Download Chat History", visible=False)
save_button.click(
fn=save_chat_history,
inputs=[chatbot, username_input],
outputs=download_button
).then(
fn=lambda: gr.update(visible=True),
outputs=download_button
)
save_button = gr.Button("💾 Save Chat History",
variant="primary",
size="sm")
download_button = gr.File(interactive=True,
visible=True,
elem_classes=["download-btn"])
#launching chatbot
demo.launch()
#You may run into errors when you're trying different models. To see the error messages, set debug to True in launch()