Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,33 +1,21 @@
|
|
| 1 |
-
#import libraries here
|
| 2 |
import gradio as gr
|
| 3 |
import random
|
| 4 |
from huggingface_hub import InferenceClient
|
|
|
|
| 5 |
#STEP 1: Import Sentence Transformer Library And Torch
|
| 6 |
from sentence_transformers import SentenceTransformer
|
| 7 |
import torch
|
| 8 |
|
| 9 |
-
|
| 10 |
-
# Read the entire contents of the file and store it in a variable
|
| 11 |
-
charities_text = file.read()
|
| 12 |
-
|
| 13 |
-
with open("financial_advice.txt", "r", encoding="utf-8") as file:
|
| 14 |
-
# Read the entire contents of the file and store it in a variable
|
| 15 |
-
financial_advice_text = file.read()
|
| 16 |
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
#with open("financial_aid.txt", "r", encoding="utf-8") as file:
|
| 26 |
-
# Read the entire contents of the file and store it in a variable
|
| 27 |
-
# financial_aid = file.read()
|
| 28 |
|
| 29 |
-
# Print the text below
|
| 30 |
-
print(charities_text)
|
| 31 |
|
| 32 |
# ===== APPLY THE COMPLETE WORKFLOW =====
|
| 33 |
|
|
@@ -49,16 +37,18 @@ def preprocess_text(text):
|
|
| 49 |
cleaned_chunks.append(stripped_chunk)
|
| 50 |
|
| 51 |
# Print cleaned_chunks
|
| 52 |
-
print(cleaned_chunks)
|
| 53 |
|
| 54 |
# Print the length of cleaned_chunks
|
| 55 |
num_of_chunks = len(cleaned_chunks)
|
| 56 |
-
|
| 57 |
|
| 58 |
-
|
| 59 |
# Return the cleaned_chunks
|
| 60 |
return cleaned_chunks
|
| 61 |
|
|
|
|
|
|
|
| 62 |
|
| 63 |
# Load the pre-trained embedding model that converts text to vectors
|
| 64 |
model = SentenceTransformer('all-MiniLM-L6-v2')
|
|
@@ -77,6 +67,8 @@ def create_embeddings(text_chunks):
|
|
| 77 |
# Return the chunk_embeddings
|
| 78 |
return chunk_embeddings
|
| 79 |
|
|
|
|
|
|
|
| 80 |
# Call the create_embeddings function and store the result in a new chunk_embeddings variable
|
| 81 |
#chunk_embeddings = create_embeddings(cleaned_chunks) # Complete this line
|
| 82 |
|
|
@@ -96,13 +88,13 @@ def get_top_chunks(query, chunk_embeddings, text_chunks):
|
|
| 96 |
similarities = torch.matmul(chunk_embeddings_normalized, query_embedding_normalized) # Complete this line
|
| 97 |
|
| 98 |
# Print the similarities
|
| 99 |
-
print(similarities)
|
| 100 |
|
| 101 |
# Find the indices of the 3 chunks with highest similarity scores
|
| 102 |
top_indices = torch.topk(similarities, k=3).indices
|
| 103 |
|
| 104 |
# Print the top indices
|
| 105 |
-
print(top_indices)
|
| 106 |
|
| 107 |
# Create an empty list to store the most relevant chunks
|
| 108 |
top_chunks = []
|
|
@@ -115,18 +107,6 @@ def get_top_chunks(query, chunk_embeddings, text_chunks):
|
|
| 115 |
# Return the list of most relevant chunks
|
| 116 |
return top_chunks
|
| 117 |
|
| 118 |
-
# Print the top results
|
| 119 |
-
#print(top_results)
|
| 120 |
-
cleaned_chunks = preprocess_text(charities_text)
|
| 121 |
-
cleaned_chunks2= preprocess_text(financial_advice_text)
|
| 122 |
-
#cleaned_chunks3= preprocess_text(time_management)
|
| 123 |
-
#cleaned_chunks4= preprocess_text(financial_aid)
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
chunk_embeddings = create_embeddings(cleaned_chunks)
|
| 127 |
-
chunk_embeddings2 = create_embeddings(cleaned_chunks2)
|
| 128 |
-
#chunk_embeddings3 = create_embeddings(cleaned_chunks3)
|
| 129 |
-
#chunk_embeddings4 = create_embeddings(cleaned_chunks4)
|
| 130 |
|
| 131 |
|
| 132 |
#AI API being used
|
|
@@ -134,40 +114,27 @@ client= InferenceClient("openai/gpt-oss-20b")
|
|
| 134 |
#defining role of AI and user
|
| 135 |
|
| 136 |
information=""
|
|
|
|
| 137 |
def respond(message,history):
|
| 138 |
topic_chunks=[]
|
| 139 |
if chatbot_topic=="Helping Charities":
|
| 140 |
|
| 141 |
-
topic_chunks=get_top_chunks(message,
|
| 142 |
-
print(topic_chunks)
|
| 143 |
|
| 144 |
elif chatbot_topic=="Financial Aid":
|
| 145 |
-
topic_chunks=get_top_chunks(message,
|
| 146 |
-
print(topic_chunks)
|
| 147 |
-
|
| 148 |
|
| 149 |
-
#elif chatbot_topic=="Time Management":
|
| 150 |
-
# topic_chunks=get_top_chunks(message, chunk_embeddings3, cleaned_chunks3)
|
| 151 |
-
# print(topic_chunks)
|
| 152 |
|
| 153 |
-
|
| 154 |
-
#elif chatbot_topic=="Financial Aid":
|
| 155 |
-
# topic_chunks=get_top_chunks(message, chunk_embeddings4, cleaned_chunks4)
|
| 156 |
-
# print(topic_chunks)
|
| 157 |
-
|
| 158 |
#return information
|
| 159 |
#return topic_chunks
|
| 160 |
chatbot_mode=="Advice Mode"
|
| 161 |
messages = [{"role": "assistant", "content": f"You are a helpful and insightful chatbot who acts like a financial advisor of a university student who wants to learn to manage their personal finances. You analyse their situation and give relevant advice and insights. You only answer in complete sentences with correct grammar, punctuation, and complete ideas. You respond clearly in under five complete bullet points under 500 characters. When you give advice, keep in mind the following information {topic_chunks}"}]
|
| 162 |
|
| 163 |
-
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
# if chatbot_mode=="Parent Mode":
|
| 167 |
-
# messages = [{"role": "assistant", "content": f"You are a guiding, nurturing, and protective parent who wants their student to reach their fullest potential while learning to grow up with the proper physical, emotional, and social development. You want to build your student into a responsible adult, but also want them to pursue success in their life and establish a good future. You only answer in complete sentences with correct grammar, punctuation, and complete ideas. When you give advice, keep in mind the following information {information}"}]
|
| 168 |
-
|
| 169 |
-
#else:
|
| 170 |
-
# messages = [{"role": "assistant", "content": f"You are a friendly, helpful chatbot that gives academic advice to disadvantaged students about their education based on their question. You only answer in complete sentences with correct grammar, punctuation, and complete ideas. When you give advice, keep in mind the following information {topic_chunks}"}]
|
| 171 |
|
| 172 |
if history:
|
| 173 |
messages.extend(history) #keep adding history
|
|
@@ -251,45 +218,7 @@ with gr.Blocks(
|
|
| 251 |
title="Finance Management Hub",
|
| 252 |
description="Ask about your personal finance",
|
| 253 |
type="messages",
|
| 254 |
-
|
| 255 |
-
|
| 256 |
)
|
| 257 |
-
#with gr.Row(scale=1):
|
| 258 |
-
#chatbot_topic=gr.CheckboxGroup(["Academia", "Extracurriculars", "Time Management", "Financial Aid"], label="What would you like advice about?")
|
| 259 |
-
#with gr.Row(scale=1):
|
| 260 |
-
#chatbot_mode=gr.CheckboxGroup(["Guidance Counselor Mode", "Peer Mode", "Parent Mode"], label="How would you like the chatbot to respond?")
|
| 261 |
-
|
| 262 |
-
#demo.css = """
|
| 263 |
-
#.download-btn {
|
| 264 |
-
# min-width: 200px !important;
|
| 265 |
-
#}
|
| 266 |
-
#.download-btn .gr-button {
|
| 267 |
-
# background: var(--button-primary-background-fill) !important;
|
| 268 |
-
# color: var(--button-primary-text-color) !important;
|
| 269 |
-
#}
|
| 270 |
-
#"""
|
| 271 |
-
|
| 272 |
-
|
| 273 |
-
#with gr.Blocks() as demo:
|
| 274 |
-
#chatbot = gr.Chatbot()
|
| 275 |
-
#username_input = gr.Textbox(label="Username")
|
| 276 |
-
#save_button = gr.Button("Save Chat History")
|
| 277 |
-
#download_button = gr.File(label="Download Chat History", visible=False)
|
| 278 |
-
|
| 279 |
-
#save_button.click(
|
| 280 |
-
# fn=save_chat_history,
|
| 281 |
-
# inputs=[chatbot, username_input],
|
| 282 |
-
# outputs=download_button
|
| 283 |
-
#).then(
|
| 284 |
-
# fn=lambda: gr.update(visible=True),
|
| 285 |
-
# outputs=download_button
|
| 286 |
-
# )
|
| 287 |
-
#save_button = gr.Button("💾 Save Chat History",
|
| 288 |
-
# variant="primary",
|
| 289 |
-
# size="sm")
|
| 290 |
-
#download_button = gr.File(interactive=True,
|
| 291 |
-
# visible=True,
|
| 292 |
-
# elem_classes=["download-btn"])
|
| 293 |
|
| 294 |
#launching chatbot
|
| 295 |
demo.launch()
|
|
|
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
import random
|
| 3 |
from huggingface_hub import InferenceClient
|
| 4 |
+
|
| 5 |
#STEP 1: Import Sentence Transformer Library And Torch
|
| 6 |
from sentence_transformers import SentenceTransformer
|
| 7 |
import torch
|
| 8 |
|
| 9 |
+
# LOAD FILES
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
|
| 11 |
+
def load_files(path):
|
| 12 |
+
with open(path, "r", encoding = "utf-8") as f:
|
| 13 |
+
return f.read
|
| 14 |
|
| 15 |
+
|
| 16 |
+
charities_text = load_files("charities.txt")
|
| 17 |
+
financial_advice_text = load_files("financial_advice.txt")
|
|
|
|
|
|
|
|
|
|
|
|
|
| 18 |
|
|
|
|
|
|
|
| 19 |
|
| 20 |
# ===== APPLY THE COMPLETE WORKFLOW =====
|
| 21 |
|
|
|
|
| 37 |
cleaned_chunks.append(stripped_chunk)
|
| 38 |
|
| 39 |
# Print cleaned_chunks
|
| 40 |
+
#print(cleaned_chunks)
|
| 41 |
|
| 42 |
# Print the length of cleaned_chunks
|
| 43 |
num_of_chunks = len(cleaned_chunks)
|
| 44 |
+
# print(num_of_chunks)
|
| 45 |
|
| 46 |
+
# print(f"There are {num_of_chunks} amount of chunks")
|
| 47 |
# Return the cleaned_chunks
|
| 48 |
return cleaned_chunks
|
| 49 |
|
| 50 |
+
cleaned_charities = preprocess_text(charities_text)
|
| 51 |
+
cleaned_finance = preprocess_text(financial_advice_text)
|
| 52 |
|
| 53 |
# Load the pre-trained embedding model that converts text to vectors
|
| 54 |
model = SentenceTransformer('all-MiniLM-L6-v2')
|
|
|
|
| 67 |
# Return the chunk_embeddings
|
| 68 |
return chunk_embeddings
|
| 69 |
|
| 70 |
+
charity_embeddings = create_embeddings(cleaned_charities)
|
| 71 |
+
finance_embeddings2 = create_embeddings(cleaned_finance)
|
| 72 |
# Call the create_embeddings function and store the result in a new chunk_embeddings variable
|
| 73 |
#chunk_embeddings = create_embeddings(cleaned_chunks) # Complete this line
|
| 74 |
|
|
|
|
| 88 |
similarities = torch.matmul(chunk_embeddings_normalized, query_embedding_normalized) # Complete this line
|
| 89 |
|
| 90 |
# Print the similarities
|
| 91 |
+
#print(similarities)
|
| 92 |
|
| 93 |
# Find the indices of the 3 chunks with highest similarity scores
|
| 94 |
top_indices = torch.topk(similarities, k=3).indices
|
| 95 |
|
| 96 |
# Print the top indices
|
| 97 |
+
#print(top_indices)
|
| 98 |
|
| 99 |
# Create an empty list to store the most relevant chunks
|
| 100 |
top_chunks = []
|
|
|
|
| 107 |
# Return the list of most relevant chunks
|
| 108 |
return top_chunks
|
| 109 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 110 |
|
| 111 |
|
| 112 |
#AI API being used
|
|
|
|
| 114 |
#defining role of AI and user
|
| 115 |
|
| 116 |
information=""
|
| 117 |
+
|
| 118 |
def respond(message,history):
|
| 119 |
topic_chunks=[]
|
| 120 |
if chatbot_topic=="Helping Charities":
|
| 121 |
|
| 122 |
+
topic_chunks=get_top_chunks(message, charity_embeddings, cleaned_charities)
|
| 123 |
+
#print(topic_chunks)
|
| 124 |
|
| 125 |
elif chatbot_topic=="Financial Aid":
|
| 126 |
+
topic_chunks=get_top_chunks(message, finance_embeddings, cleaned_finance)
|
| 127 |
+
#print(topic_chunks)
|
|
|
|
| 128 |
|
|
|
|
|
|
|
|
|
|
| 129 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 130 |
#return information
|
| 131 |
#return topic_chunks
|
| 132 |
chatbot_mode=="Advice Mode"
|
| 133 |
messages = [{"role": "assistant", "content": f"You are a helpful and insightful chatbot who acts like a financial advisor of a university student who wants to learn to manage their personal finances. You analyse their situation and give relevant advice and insights. You only answer in complete sentences with correct grammar, punctuation, and complete ideas. You respond clearly in under five complete bullet points under 500 characters. When you give advice, keep in mind the following information {topic_chunks}"}]
|
| 134 |
|
| 135 |
+
if chatbot_mode == "Advice Mode":
|
| 136 |
+
role_message = ( "You are a helpful and insightful chatbot who acts like a financial " "advisor of a university student. Respond in under five bullet points, " f"under 500 characters, using this context: {topic_chunks}" )
|
| 137 |
+
else: role_message = f"You are a helpful chatbot. Use this context: {topic_chunks}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 138 |
|
| 139 |
if history:
|
| 140 |
messages.extend(history) #keep adding history
|
|
|
|
| 218 |
title="Finance Management Hub",
|
| 219 |
description="Ask about your personal finance",
|
| 220 |
type="messages",
|
|
|
|
|
|
|
| 221 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 222 |
|
| 223 |
#launching chatbot
|
| 224 |
demo.launch()
|