Spaces:
Sleeping
Sleeping
File size: 12,989 Bytes
7ff2c4e 7f05e6b 932d832 7f05e6b ac54872 932d832 712849b ac54872 afd529d 712849b afd529d 6eecb3a afd529d 6eecb3a ac54872 7f05e6b 95cdd07 7f05e6b 932d832 7f05e6b 932d832 7f05e6b 932d832 7f05e6b 932d832 7f05e6b 932d832 7f05e6b 932d832 7f05e6b 932d832 7f05e6b b6a7487 7f05e6b 932d832 7f05e6b 932d832 7f05e6b 932d832 7f05e6b 932d832 7f05e6b fd633bc 7f05e6b 932d832 7f05e6b 932d832 7f05e6b 932d832 7f05e6b 932d832 7f05e6b 932d832 7f05e6b 932d832 7f05e6b 932d832 7f05e6b 932d832 7f05e6b 932d832 0a8c18b 932d832 7f05e6b 932d832 7f05e6b 97142b4 95cdd07 943274f fd633bc 943274f 7ff2c4e edddf0d 7ff2c4e 03a0e46 9ffda34 932d832 943274f 6c50765 943274f 4a5c0d8 943274f 4a5c0d8 943274f 4a5c0d8 943274f 3ca09c9 71e107d 17b2c46 da9c248 17b2c46 453c4ea 17b2c46 2f523d1 17b2c46 453c4ea 7ff2c4e 932d832 244b6bb 7ff2c4e 29c9487 5e26e2c 7f05e6b 95cdd07 7f05e6b 7ff2c4e aac68e8 91506bf 904795b 7c35017 904795b 7c35017 bce72ed 0a35893 904795b e10fb09 cab5d6c 37e77f2 c4aba3e 904795b 2686d5e da332f2 6932713 0a35893 505301f 7933090 1096d51 b0527ae 1096d51 ffbe399 b0527ae 932d832 981daab 7ff2c4e b544281 7ff2c4e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 |
#import libraries here
import gradio as gr
import random
from huggingface_hub import InferenceClient
#STEP 1: Import Sentence Transformer Library And Torch
from sentence_transformers import SentenceTransformer
import torch
with open("poverty_and_education.txt", "r", encoding="utf-8") as file:
# Read the entire contents of the file and store it in a variable
poverty_and_education = file.read()
with open("academic_tips_text.txt", "r", encoding="utf-8") as file:
# Read the entire contents of the file and store it in a variable
academic_tips_text = file.read()
with open("time_management.txt", "r", encoding="utf-8") as file:
# Read the entire contents of the file and store it in a variable
time_management = file.read()
with open("Extracurricular_ideas.txt", "r", encoding="utf-8") as file:
# Read the entire contents of the file and store it in a variable
extracurricular_ideas = file.read()
with open("financial_aid.txt", "r", encoding="utf-8") as file:
# Read the entire contents of the file and store it in a variable
financial_aid = file.read()
# Print the text below
print(academic_tips_text)
# ===== APPLY THE COMPLETE WORKFLOW =====
### STEP 3
def preprocess_text(text):
# Strip extra whitespace from the beginning and the end of the text
cleaned_text = text.strip()
# Split the cleaned_text by every newline character (\n)
chunks = cleaned_text.split("\n")
# Create an empty list to store cleaned chunks
cleaned_chunks = []
# Write your for-in loop below to clean each chunk and add it to the cleaned_chunks list
for chunk in chunks:
stripped_chunk = chunk.strip()
if len(stripped_chunk) > 0:
cleaned_chunks.append(stripped_chunk)
# Print cleaned_chunks
print(cleaned_chunks)
# Print the length of cleaned_chunks
num_of_chunks = len(cleaned_chunks)
print(num_of_chunks)
print(f"There are {num_of_chunks} amount of chunks")
# Return the cleaned_chunks
return cleaned_chunks
# Load the pre-trained embedding model that converts text to vectors
model = SentenceTransformer('all-MiniLM-L6-v2')
### STEP 4
def create_embeddings(text_chunks):
# Convert each text chunk into a vector embedding and store as a tensor
chunk_embeddings = model.encode(text_chunks, convert_to_tensor=True) # Replace ... with the text_chunks list
# Print the chunk embeddings
print(chunk_embeddings)
# Print the shape of chunk_embeddings
print(chunk_embeddings.shape)
# Return the chunk_embeddings
return chunk_embeddings
# Call the create_embeddings function and store the result in a new chunk_embeddings variable
#chunk_embeddings = create_embeddings(cleaned_chunks) # Complete this line
###STEP 5
# Define a function to find the most relevant text chunks for a given query, chunk_embeddings, and text_chunks
def get_top_chunks(query, chunk_embeddings, text_chunks):
# Convert the query text into a vector embedding
query_embedding = model.encode(query, convert_to_tensor = True) # Complete this line
# Normalize the query embedding to unit length for accurate similarity comparison
query_embedding_normalized = query_embedding / query_embedding.norm()
# Normalize all chunk embeddings to unit length for consistent comparison
chunk_embeddings_normalized = chunk_embeddings / chunk_embeddings.norm(dim=1, keepdim=True)
# Calculate cosine similarity between query and all chunks using matrix multiplication
similarities = torch.matmul(chunk_embeddings_normalized, query_embedding_normalized) # Complete this line
# Print the similarities
print(similarities)
# Find the indices of the 3 chunks with highest similarity scores
top_indices = torch.topk(similarities, k=3).indices
# Print the top indices
print(top_indices)
# Create an empty list to store the most relevant chunks
top_chunks = []
# Loop through the top indices and retrieve the corresponding text chunks
for i in top_indices:
relevant_info = text_chunks[i]
top_chunks.append(relevant_info)
# Return the list of most relevant chunks
return top_chunks
# Print the top results
#print(top_results)
cleaned_chunks = preprocess_text(academic_tips_text)
cleaned_chunks2= preprocess_text(extracurricular_ideas)
cleaned_chunks3= preprocess_text(time_management)
cleaned_chunks4= preprocess_text(financial_aid)
chunk_embeddings = create_embeddings(cleaned_chunks)
chunk_embeddings2 = create_embeddings(cleaned_chunks2)
chunk_embeddings3 = create_embeddings(cleaned_chunks3)
chunk_embeddings4 = create_embeddings(cleaned_chunks4)
#AI API being used
client= InferenceClient("openai/gpt-oss-20b")
#defining role of AI and user
information=""
def respond(message,history):
topic_chunks=[]
if chatbot_topic=="Academia":
topic_chunks=get_top_chunks(message, chunk_embeddings, cleaned_chunks)
print(topic_chunks)
elif chatbot_topic=="Extracurriculars":
topic_chunks=get_top_chunks(message, chunk_embeddings2, cleaned_chunks2)
print(topic_chunks)
elif chatbot_topic=="Time Management":
topic_chunks=get_top_chunks(message, chunk_embeddings3, cleaned_chunks3)
print(topic_chunks)
elif chatbot_topic=="Financial Aid":
topic_chunks=get_top_chunks(message, chunk_embeddings4, cleaned_chunks4)
print(topic_chunks)
#return information
#return topic_chunks
if chatbot_mode=="Peer Mode":
messages = [{"role": "assistant", "content": f"You are a casual, sometimes funny chatbot who acts like a peer of the person who is asking the question. You relate to their situation and give them relevant advice. You only answer in complete sentences with correct grammar, punctuation, and complete ideas. You respond clearly in under three complete bullet points under 250 characters. When you give advice, keep in mind the following information {topic_chunks}"}]
if chatbot_mode=="Guidance Counselor Mode":
messages = [{"role": "assistant", "content": f"You act as a helpful guidance counselor with an educated understanding of high school life and college admissions. You guide the student to consider their academic potential, while maintaining the passion and balance they need. You only answer in complete sentences with correct grammar, punctuation, and complete ideas. When you give advice, keep in mind the following information {topic_chunks}"}]
if chatbot_mode=="Parent Mode":
messages = [{"role": "assistant", "content": f"You are a guiding, nurturing, and protective parent who wants their student to reach their fullest potential while learning to grow up with the proper physical, emotional, and social development. You want to build your student into a responsible adult, but also want them to pursue success in their life and establish a good future. You only answer in complete sentences with correct grammar, punctuation, and complete ideas. When you give advice, keep in mind the following information {information}"}]
else:
messages = [{"role": "assistant", "content": f"You are a friendly, helpful chatbot that gives academic advice to disadvantaged students about their education based on their question. You only answer in complete sentences with correct grammar, punctuation, and complete ideas. When you give advice, keep in mind the following information {topic_chunks}"}]
if history:
messages.extend(history) #keep adding history
messages.append({"role":"user","content": message})
response=client.chat_completion(messages, temperature=0.2)#capping how many words the LLM is allowed to generate as a respond (100 words)
return response['choices'][0]['message']['content'].strip() #storing value of response in a readable format to display
### STEP 6
# Call the preprocess_text function and store the result in a cleaned_chunks variable
cleaned_chunks = preprocess_text(academic_tips_text) # Complete this line
top_results = get_top_chunks("How does poverty affect one's education?", chunk_embeddings, cleaned_chunks) # Complete this line
print(top_results)
#Defining chatbot giving user a UI to interact, see their conversation history, and see new messages using built in gr feature
#ChatInterface requires at least one parameter(a function)
chatbot = gr.ChatInterface(respond,type="messages", title="Accessible Intelligence Hub", theme="Taithrah/Minimal")
def save_chat_history(history, username):
if not username:
username = "anonymous"
timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
filename = f"chat_history_{username}_{timestamp}.txt"
with open(filename, "w", encoding="utf-8") as f:
f.write(f"Chat History for {username} - {timestamp}\n\n")
for exchange in history:
if isinstance(exchange, tuple) and len(exchange) == 2:
user_msg, bot_msg = exchange
f.write(f"User: {user_msg}\n")
f.write(f"Bot: {bot_msg}\n\n")
elif isinstance(exchange, dict):
# Handle dictionary format if needed
role = exchange.get("role", "unknown")
content = exchange.get("content", "")
f.write(f"{role.capitalize()}: {content}\n\n")
return filename
with gr.Blocks(
theme=gr.themes.Soft(
primary_hue="purple",
secondary_hue="fuchsia",
neutral_hue="gray",
text_size="lg"
).set(
background_fill_primary='*neutral_200',
background_fill_secondary='neutral_100',
background_fill_secondary_dark='secondary_500',
border_color_accent='*secondary_400',
border_color_accent_dark='*secondary_800',
color_accent='*secondary_600',
color_accent_soft='*secondary_200',
color_accent_soft_dark='*secondary_800',
button_primary_background_fill='*secondary_400',
button_primary_background_fill_dark='*secondary_600',
button_primary_text_color='white',
button_primary_border_color='*secondary_700',
button_primary_border_color_dark='*secondary_900'
)
) as demo:
with gr.Row(scale=1):
chatbot_topic=gr.CheckboxGroup(["Academia", "Extracurriculars", "Time Management", "Financial Aid"], label="What would you like advice about?")
with gr.Row(scale=1):
chatbot_mode=gr.CheckboxGroup(["Guidance Counselor Mode", "Peer Mode", "Parent Mode"], label="How would you like the chatbot to respond?")
#with gr.Row():
#save_button = gr.Button("💾 Save Chat History",
#variant="primary",
#size="sm")
#download_button = gr.File(interactive=True,
#visible=True,
#elem_classes=["download-btn"])
gr.ChatInterface(
fn=respond,
title="Accessible Intelligence Hub",
description="Ask about your education",
type="messages",
)
#with gr.Row(scale=1):
#chatbot_topic=gr.CheckboxGroup(["Academia", "Extracurriculars", "Time Management", "Financial Aid"], label="What would you like advice about?")
#with gr.Row(scale=1):
#chatbot_mode=gr.CheckboxGroup(["Guidance Counselor Mode", "Peer Mode", "Parent Mode"], label="How would you like the chatbot to respond?")
#demo.css = """
#.download-btn {
# min-width: 200px !important;
#}
#.download-btn .gr-button {
# background: var(--button-primary-background-fill) !important;
# color: var(--button-primary-text-color) !important;
#}
#"""
#with gr.Blocks() as demo:
#chatbot = gr.Chatbot()
username_input = gr.Textbox(label="Username")
save_button = gr.Button("Save Chat History")
download_button = gr.File(label="Download Chat History", visible=False)
save_button.click(
fn=save_chat_history,
inputs=[chatbot, username_input],
outputs=download_button
).then(
fn=lambda: gr.update(visible=True),
outputs=download_button
)
save_button = gr.Button("💾 Save Chat History",
variant="primary",
size="sm")
download_button = gr.File(interactive=True,
visible=True,
elem_classes=["download-btn"])
#launching chatbot
demo.launch()
#You may run into errors when you're trying different models. To see the error messages, set debug to True in launch()
|