import gradio as gr import random from huggingface_hub import InferenceClient import pandas as pd from sentence_transformers import SentenceTransformer import torch # LOAD FILES def load_files(path): with open(path, "r", encoding = "utf-8") as f: return f.read() charities_text = load_files("charities.txt") financial_advice_text = load_files("financial_advice.txt") # ### def preprocess_text(text): # Strip extra whitespace from the beginning and the end of the text cleaned_text = text.strip() # Split the cleaned_text by every newline character (\n) chunks = cleaned_text.split("\n") # Create an empty list to store cleaned chunks cleaned_chunks = [] # Write your for-in loop below to clean each chunk and add it to the cleaned_chunks list for chunk in chunks: stripped_chunk = chunk.strip() if len(stripped_chunk) > 0: cleaned_chunks.append(stripped_chunk) # Print the length of cleaned_chunks num_of_chunks = len(cleaned_chunks) # print(num_of_chunks) return cleaned_chunks cleaned_charities = preprocess_text(charities_text) cleaned_finance = preprocess_text(financial_advice_text) # Load the pre-trained embedding model that converts text to vectors model = SentenceTransformer('all-MiniLM-L6-v2') ### STEP 4 def create_embeddings(text_chunks): # Convert each text chunk into a vector embedding and store as a tensor chunk_embeddings = model.encode(text_chunks, convert_to_tensor=True) # Replace ... with the text_chunks list # Print the chunk embeddings print(chunk_embeddings) # Print the shape of chunk_embeddings print(chunk_embeddings.shape) # Return the chunk_embeddings return chunk_embeddings charity_embeddings = create_embeddings(cleaned_charities) finance_embeddings = create_embeddings(cleaned_finance) ###STEP 5 # Define a function to find the most relevant text chunks for a given query, chunk_embeddings, and text_chunks def get_top_chunks(query, chunk_embeddings, text_chunks): # Convert the query text into a vector embedding query_embedding = model.encode(query, convert_to_tensor = True) # Complete this line # Normalize the query embedding to unit length for accurate similarity comparison query_embedding_normalized = query_embedding / query_embedding.norm() # Normalize all chunk embeddings to unit length for consistent comparison chunk_embeddings_normalized = chunk_embeddings / chunk_embeddings.norm(dim=1, keepdim=True) # Calculate cosine similarity between query and all chunks using matrix multiplication similarities = torch.matmul(chunk_embeddings_normalized, query_embedding_normalized) # Complete this line # Find the indices of the 3 chunks with highest similarity scores top_indices = torch.topk(similarities, k=3).indices # Create an empty list to store the most relevant chunks top_chunks = [] # Loop through the top indices and retrieve the corresponding text chunks for i in top_indices: relevant_info = text_chunks[i] top_chunks.append(relevant_info) # Return the list of most relevant chunks return top_chunks #CSV files columns = ["TransactionID", "UserID", "Date", "Description", "Amount", "Type", "Extra1", "Extra2"] spendings = pd.read_csv("september_transactions_detailed.csv", names = columns) spendings['Amount'] = pd.to_numeric(spendings['Amount'], errors='coerce').fillna(0) def get_advice(user_id): user_data = spendings[spendings['UserID'] == user_id] if user_data.empty: return "No spending data found for this user." # Only consider expenses expenses = user_data[user_data['Type'].str.lower() == "expense"] total_spent = expenses['Amount'].sum() category_spent = expenses.groupby('Description')['Amount'].sum().to_dict() advice = [] for cat, amt in category_spent.items(): if amt > total_spent * 0.3: advice.append(f"You spend a lot on {cat}. Consider budgeting here.") advice_text = " | ".join(advice) if advice else "Your spending looks balanced across categories." summary_text = f"Total spent: ${total_spent:.2f}. Category breakdown: {category_spent}. Advice: {advice_text}" return summary_text #AI API being used client= InferenceClient("openai/gpt-oss-20b") #defining role of AI and user information="" def respond(message, history, chatbot_topic_values, user_id=1): topic_chunks = [] if chatbot_topic_values and "Helping Charities" in chatbot_topic_values: topic_chunks = get_top_chunks(message, charity_embeddings, cleaned_charities) elif chatbot_topic_values and "Financial Aid" in chatbot_topic_values: topic_chunks = get_top_chunks(message, finance_embeddings, cleaned_finance) csv_advice = get_advice(user_id) role_message = ( "You are a helpful and insightful chatbot who acts like a financial " "advisor for university students. DO NOT ask the user for additional input. " "You should only output your answers as text or bullet points, not tables or grids" "Do not output any markdown, keep responses short and concise, maximum around 500 characters." f"Use the following spending data from the CSV file to provide advice: {csv_advice}. " f"Also consider this context: {topic_chunks}" ) #if chatbot_mode_values and "General Advice" in chatbot_mode_values: # role_message = ( # "You are a helpful and insightful chatbot who acts like a financial " # "advisor of a university student. Respond in under five bullet points, " # f"under 500 characters, using this context: {topic_chunks}" #) #elif chatbot_mode_values and "Personal Advice" in chatbot_mode_values: # role_message = ( # "You are a helpful and insightful chatbot who acts like a financial " # "DO NOT ask the user for additional numbers or input" # f"Use the following spending data from the CSV file to provide advice {csv_advice}" #) #else: # role_message = f"You are a helpful chatbot. Use this context: {topic_chunks}" messages = [{"role": "system", "content": role_message}] if history: messages.extend(history) messages.append({"role": "user", "content": message}) response = client.chat_completion(messages, temperature=0.2) return response['choices'][0]['message']['content'].strip() ### STEP 6 # Call the preprocess_text function and store the result in a cleaned_chunks variable cleaned_chunks = preprocess_text(financial_advice_text) # Complete this line top_results = get_top_chunks("What financial advice you give me?", finance_embeddings, cleaned_finance) #Defining chatbot giving user a UI to interact, see their conversation history, and see new messages using built in gr feature #ChatInterface requires at least one parameter(a function) chatbot = gr.ChatInterface(respond,type="messages", title="Finance Management Hub", theme="Taithrah/Minimal") def save_chat_history(history, username): if not username: username = "anonymous" timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S") filename = f"chat_history_{username}_{timestamp}.txt" with open(filename, "w", encoding="utf-8") as f: f.write(f"Chat History for {username} - {timestamp}\n\n") for exchange in history: if isinstance(exchange, tuple) and len(exchange) == 2: user_msg, bot_msg = exchange f.write(f"User: {user_msg}\n") f.write(f"Bot: {bot_msg}\n\n") elif isinstance(exchange, dict): # Handle dictionary format if needed role = exchange.get("role", "unknown") content = exchange.get("content", "") f.write(f"{role.capitalize()}: {content}\n\n") return filename with gr.Blocks( theme=gr.themes.Soft( primary_hue="blue", secondary_hue="fuchsia", neutral_hue="gray", text_size="lg", ).set( background_fill_primary='*neutral_200', background_fill_secondary='neutral_100', background_fill_secondary_dark='secondary_500', border_color_accent='*secondary_400', border_color_accent_dark='*secondary_800', color_accent='*secondary_600', color_accent_soft='*secondary_200', color_accent_soft_dark='*secondary_800', button_primary_background_fill='*secondary_400', button_primary_background_fill_dark='*secondary_600', button_primary_text_color='white', button_primary_border_color='*secondary_700', button_primary_border_color_dark='*secondary_900' ) ) as demo: with gr.Row(scale=1): chatbot_topic=gr.CheckboxGroup(["Helping Charities", "Financial Aid"], label="What would you like advice about?") gr.ChatInterface( fn=lambda msg, hist, topic_vals: respond(msg, hist, topic_vals), title="Finance Management Hub", description="Ask about your personal finance", type="messages", additional_inputs=[chatbot_topic] ) #launching chatbot demo.launch()