anniesaxena commited on
Commit
a418c45
·
verified ·
1 Parent(s): 7da6426

Made Chatbot(has buttons/images)

Browse files
Files changed (1) hide show
  1. app.py +60 -21
app.py CHANGED
@@ -1,46 +1,85 @@
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
 
 
 
3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  client = InferenceClient("google/gemma-2-2b-it")
5
 
 
6
  def respond(message, history):
7
- messages = [{"role": "system", "content": "I am a mean chatbot."}]
 
 
8
 
9
- # add all previous messages to the messages list
 
10
  if history:
11
  for user_msg, assistant_msg in history:
12
  messages.append({"role": "user", "content": user_msg})
13
  messages.append({"role": "assistant", "content": assistant_msg})
14
 
 
15
  # add the current user's message to the messages list
16
  messages.append({"role": "user", "content": message})
17
 
 
18
  # makes the chat completion API call,
19
  # sending the messages and other parameters to the model
20
  # implements streaming, where one word/token appears at a time
21
-
22
  response = ""
23
 
 
 
24
  for message in client.chat_completion(
25
  messages,
26
  max_tokens=500,
27
  temperature=.1,
28
  stream=True):
 
 
 
 
29
 
30
- token = message.choices[0].delta.content
31
- response += token
32
- yield response
33
-
34
- chatbot = gr.ChatInterface(respond, examples = ["Learn About Stocks", "Help Me Budget"], title = "ChaChingas", description = "This is a financial literacy bot!")
35
-
36
- with gr.Blocks() as chatbot:
37
- gr.Image(
38
- value = "/content/Banner.png",
39
- show_label = False,
40
- show_share_button= False,
41
- show_download_button= False )
42
- gr.ChatInterface(respond, type = "messages")
43
-
44
-
45
- chatbot.launch()
46
-
 
1
  import gradio as gr
2
+ from huggingface_hub import InferenceClient #imports huggingface models
3
+ from sentence_transformers import SentenceTransformer
4
+ import torch
5
+ import numpy as np
6
 
7
+
8
+ # Load and process the knowledge base text file
9
+ with open("/content/Knowledge.txt", "r", encoding="utf-8") as f:
10
+ knowledge_text = f.read()
11
+
12
+
13
+ # Split the text into chunks (for example, by paragraphs)
14
+ chunks = [chunk.strip() for chunk in knowledge_text.split("\n\n") if chunk.strip()]
15
+
16
+
17
+ # Load an embedding model (this one is light and fast)
18
+ embedder = SentenceTransformer('all-MiniLM-L6-v2')
19
+
20
+
21
+ # Precompute embeddings for all chunks (as a tensor for fast similarity search)
22
+ chunk_embeddings = embedder.encode(chunks, convert_to_tensor=True)
23
+ def get_relevant_context(query, top_k=3):
24
+ """
25
+ Compute the embedding for the query, compare it against all chunk embeddings,
26
+ and return the top_k most similar chunks concatenated into a context string.
27
+ """
28
+ # Compute and normalize the query embedding
29
+ query_embedding = embedder.encode(query, convert_to_tensor=True)
30
+ query_embedding = query_embedding / query_embedding.norm()
31
+
32
+ # Normalize chunk embeddings along the embedding dimension
33
+ norm_chunk_embeddings = chunk_embeddings / chunk_embeddings.norm(dim=1, keepdim=True)
34
+
35
+ # Compute cosine similarity between the query and each chunk
36
+ similarities = torch.matmul(norm_chunk_embeddings, query_embedding)
37
+
38
+ # Get the indices of the top_k most similar chunks
39
+ top_k_indices = torch.topk(similarities, k=top_k).indices.cpu().numpy()
40
+
41
+ # Concatenate the top chunks into a single context string
42
+ context = "\n\n".join([chunks[i] for i in top_k_indices])
43
+ return context
44
+
45
+
46
  client = InferenceClient("google/gemma-2-2b-it")
47
 
48
+
49
  def respond(message, history):
50
+ messages = [{"role": "system", "content": ""You are ChaChingas, an AI financial advisor for students and low-income families. Only answer questions about budgeting, saving, debt, or personal finance. If a user asks about unrelated topics like recipes, sports, or entertainment, politely say: 'I'm here to help with money and budgeting—ask me anything about that!' Speak clearly, keep answers short, and use simple language. When asked about budgeting, explain the 50/30/20 rule: 50% for needs, 30% for wants, 20% for savings or debt. Be supportive, practical, and easy to understand. Avoid giving investment, tax, or legal advice, and never ask for or handle sensitive personal financial information.”
51
+ "}]
52
+ context = get_relevant_context(message, top_k=3)
53
 
54
+
55
+ # add all previous messages to the messages list
56
  if history:
57
  for user_msg, assistant_msg in history:
58
  messages.append({"role": "user", "content": user_msg})
59
  messages.append({"role": "assistant", "content": assistant_msg})
60
 
61
+
62
  # add the current user's message to the messages list
63
  messages.append({"role": "user", "content": message})
64
 
65
+
66
  # makes the chat completion API call,
67
  # sending the messages and other parameters to the model
68
  # implements streaming, where one word/token appears at a time
69
+
70
  response = ""
71
 
72
+
73
+ # iterate through each message in the method
74
  for message in client.chat_completion(
75
  messages,
76
  max_tokens=500,
77
  temperature=.1,
78
  stream=True):
79
+ # add the tokens to the output content
80
+ token = message.choices[0].delta.content # capture the most recent toke
81
+ response += token # Add it to the response
82
+ yield response # yield the response:
83
 
84
+ chatbot = gr.ChatInterface(respond, examples = ["Learn About Stocks", "Help me Budget"], title = "ChaChingus", description = "This is a financial literacy chatbot")
85
+ chatbot.launch()