File size: 4,527 Bytes
235b1ba
6ba9a57
 
 
 
f20e203
 
29f41ae
bca5dd6
0dfa422
 
f20e203
e91337f
 
f9f3be8
e91337f
 
 
f9f3be8
e91337f
 
 
 
ef74b61
41bc4c0
5365751
fef5776
235b1ba
5365751
 
 
 
 
 
 
 
 
 
 
7a49b3f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5365751
d988bde
dad409f
c63d6bc
df40ed1
522c4c0
a7bd1f8
d988bde
 
a45d244
d988bde
a45d244
a7bd1f8
 
d988bde
f4f569e
d988bde
0d31859
d988bde
 
 
23ea263
d6f6191
baf17f8
4813d63
baf17f8
51d373a
 
784be43
 
 
 
51d373a
 
 
d460421
a0541ae
c0e3a66
d6f6191
4813d63
8734997
b50b32a
64308a0
762a536
6b3d127
 
762a536
1b710a2
d460421
 
 
 
8061387
 
3be9cdc
91bbdd0
c411742
 
 
 
 
1ea5603
 
 
 
6ec3458
 
d988bde
 
235b1ba
0bc90b3
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
import gradio as gr
from huggingface_hub import InferenceClient
from sentence_transformers import SentenceTransformer
import torch
import numpy as np

theme = gr.themes.Soft(
    primary_hue="rose",
    secondary_hue="zinc",
    neutral_hue="pink"
    )

custom_css = """
:root { /* This applies to the light mode */
    --background-fill-primary: *primary_100 !important; /* Light pink */
}

.dark { /* This applies to the dark mode */
    --background-fill-primary: #FFB6C1 !important; /* Hot pink */
}
"""


with open("knowledge.txt" , "r", encoding="utf-8") as f:
    knowledge_base = f.read()

print("Knowledge base loaded.")

cleaned_text = knowledge_base.strip()

chunks = cleaned_text.split("\n")
cleaned_chunks = []

for chunk in chunks:
  stripped_chunk = chunk.strip()
  if stripped_chunk:
    cleaned_chunks.append(stripped_chunk)
print(cleaned_chunks)

model = SentenceTransformer('all-MiniLM-L6-v2')
chunk_embeddings = model.encode(cleaned_chunks, convert_to_tensor=True)
print(chunk_embeddings)

def get_top_chunks(query):
  query_embedding = model.encode(query, convert_to_tensor=True)
  query_embedding_normalized = query_embedding / query_embedding.norm()
  chunk_embeddings_normalized = chunk_embeddings / chunk_embeddings.norm(dim=1, keepdim=True)

  similarities = torch.matmul(chunk_embeddings_normalized, query_embedding_normalized)
  print(similarities)

  top_indices = torch.topk(similarities, k=3).indices
  print(top_indices)

  top_chunks = []

  for i in top_indices:
    chunk = chunks[i]
    top_chunks.append(chunk)

  return top_chunks

client = InferenceClient("google/gemma-3-27b-it")

def respond(message,history):
    info = get_top_chunks(message)
    messages = [{"role": "system" , "content": f"Your name is BloomBot and you're a supportive and helpful chatbot catered towards women of all ages. You're friendly and caring. You give clear appropiate explainations with {info} and keep your explainations to 10 sentences maximum. You should make sure of the users age so you can give the most appropiate answer." 
                }]
    if history:
        messages.extend(history)
        
    messages.append({"role" : "user", "content" : message})
    
    response = ""
        
    for message in client.chat_completion(
        messages, 
        max_tokens = 500,
        stream=True,
        top_p = .2
    ):
        token = message.choices[0].delta.content
        response += token
    yield response 
    
def display_image():
    return "Screenshot 2025-06-12 at 10.53.59โ€ฏAM.png"

def show_info(topic):
    responses = {
        "General Health": 18009949662,
        "Maternal Mental Health": 18338526262,
        "Domestic Violence": 18007997233,
        "Postpartum Support": 18009944773
    }
    return responses.get(topic, "Select a topic to see more info.")


with gr.Blocks (theme = theme) as chatbot:
    gr.Image(display_image())
    gr.ChatInterface(respond, type = "messages",
    title = "Hi, I'm BloomBot! ๐ŸŒธ",
    textbox= gr.Textbox(placeholder="Share Your Age and Ask Me Anything!"),
    description = "This tool is here to listen and provide information on female health topics, and all discussions will be kept confidential. โค๏ธโ€๐Ÿฉน",
    examples = ["What are the common symptoms of menopause?", 
                "What are some vitamins that are good for teenage girls?",
                "What should I know about puberty?",
                "Where can I find my nearest OBGYN?"]
                          )
    title_hotline= "# Select To Get Hotline Number"
    
    with gr.Tabs():
        with gr.TabItem("Resources"): 
            gr.Markdown("### Resources")
            open_google = gr.Button(value="๐Ÿ—“๏ธ Period Tracker", link="https://drive.google.com/file/d/1_KNELAUDLLidwAT3fs2JBuO1yPgMGoDv/view")
            open_google = gr.Button(value="๐Ÿ‘ฉ๐Ÿปโ€๐Ÿผ New Moms Support Group", link="https://www.instagram.com/firsttimemomsacademy/")
            
        
    with gr.TabItem("Call a Hotline"): 
        gr.Markdown(title_hotline)
        #gr.Markdown(hotline_text)
        dropdown = gr.Dropdown(choices=["General Health", "Maternal Mental Health", "Domestic Violence", "Postpartum Support"],
        label="Choose Your Hotline"
                                  )
        output = gr.Textbox(label="Hotline Info", interactive=False)
    
    dropdown.change(fn=show_info, inputs=dropdown, outputs=output)
     #with gr.Tab("Educational PDFs"):
       # gr.Markdown("### ๐Ÿ“˜ Helpful Resources")

chatbot.launch(debug=True)