File size: 4,684 Bytes
235b1ba 6ba9a57 f20e203 c769ad2 bd550bc f20e203 e91337f ef74b61 41bc4c0 5365751 fef5776 235b1ba 5365751 7a49b3f 5365751 d988bde dad409f c63d6bc 522c4c0 a7bd1f8 d988bde a45d244 d988bde a45d244 a7bd1f8 d988bde f4f569e d988bde 0d31859 d988bde c8355cc 23ea263 d988bde a666c04 5229086 f715e9b baf17f8 4813d63 baf17f8 d460421 a0541ae e93a362 c0e3a66 01e6de2 65a1ec1 4813d63 8734997 b50b32a 64308a0 762a536 6b3d127 762a536 d460421 608f7c6 d460421 a341362 d460421 22c87f5 6ec3458 d988bde 235b1ba 0bc90b3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 |
import gradio as gr
from huggingface_hub import InferenceClient
from sentence_transformers import SentenceTransformer
import torch
import numpy as np
theme = gr.themes.Soft(
primary_hue="pink",
secondary_hue="pink",
neutral_hue="fuchsia",
).set(
link_text_color='*secondary_700',
background_fill_primary = 'primary_600'
)
custom_css = """
:root { /* This applies to the light mode */
--background-fill-primary: #FFB6C1 !important; /* Light pink */
}
.dark { /* This applies to the dark mode */
--background-fill-primary: #FF69B4 !important; /* Hot pink */
}
"""
with open("knowledge.txt" , "r", encoding="utf-8") as f:
knowledge_base = f.read()
print("Knowledge base loaded.")
cleaned_text = knowledge_base.strip()
chunks = cleaned_text.split("\n")
cleaned_chunks = []
for chunk in chunks:
stripped_chunk = chunk.strip()
if stripped_chunk:
cleaned_chunks.append(stripped_chunk)
print(cleaned_chunks)
model = SentenceTransformer('all-MiniLM-L6-v2')
chunk_embeddings = model.encode(cleaned_chunks, convert_to_tensor=True)
print(chunk_embeddings)
def get_top_chunks(query):
query_embedding = model.encode(query, convert_to_tensor=True)
query_embedding_normalized = query_embedding / query_embedding.norm()
chunk_embeddings_normalized = chunk_embeddings / chunk_embeddings.norm(dim=1, keepdim=True)
similarities = torch.matmul(chunk_embeddings_normalized, query_embedding_normalized)
print(similarities)
top_indices = torch.topk(similarities, k=3).indices
print(top_indices)
top_chunks = []
for i in top_indices:
chunk = chunks[i]
top_chunks.append(chunk)
return top_chunks
client = InferenceClient("google/gemma-3-27b-it")
def respond(message,history):
info = get_top_chunks(message)
messages = [{"role": "system" , "content": f"Your name is BloomBot and you're a supportive and helpful chatbot catered towards teens ages 10-18. You give clear kid-appropiate explainations with {info} and keep your explainations to 10 sentences maximum."
}]
if history:
messages.extend(history)
messages.append({"role" : "user", "content" : message})
response = ""
for message in client.chat_completion(
messages,
max_tokens = 500,
stream=True,
#temperature = .2
top_p = .2
):
token = message.choices[0].delta.content
response += token
# print(response)
yield response
#theme = gr.themes.Ocean(
# primary_hue="pink",
# secondary_hue="pink",
# neutral_hue="fuchsia"
# )
def display_image():
return "Screenshot 2025-06-12 at 10.53.59 AM.png"
with gr.Blocks (theme = theme) as chatbot:
# chatbot = gr.Chatbot()
gr.Image(display_image())
gr.ChatInterface(respond, type = "messages", #theme = gr.themes.Soft(
#primary_hue="pink",
#secondary_hue="pink",
#neutral_hue="fuchsia"),
title = "Hi, I'm BloomBot! 🌸",
textbox= gr.Textbox(placeholder="Share Your Age and Ask Me Anything!"),
description = "This tool is here to listen and provide information on female health topics, and all discussions will be kept confidential. ❤️🩹",
examples = ["What are the common symptoms of menopause?",
"What are some vitamins that are good for teenage girls?",
"What should I know about puberty?",
"Where can I find my nearest OBGYN?"]
)
title_hotline= "# Select your city to find the nearest hotline"
hotline_text= """### Placeholder"""
with gr.Tabs():
with gr.TabItem("Resources"):
gr.Markdown("### Resources")
gr.HTML("""
<a href="https://drive.google.com/file/d/1_KNELAUDLLidwAT3fs2JBuO1yPgMGoDv/view" target="_blank">
# <button style="
background-color: #ff69b4; /* hot pink */
color: white;
font-family: monospace;
font-weight: bold;
font-size: 16px;
border: none;
border-radius: 6px;
padding: 12px 24px;
cursor: pointer;
transition: background-color 0.3s ease;
"onmouseover="this.style.backgroundColor='#ff85c1'"
onmouseout="this.style.backgroundColor='#ff69b4'">
# 📄 Period Tracker
# </button>
</a>
# """)
with gr.TabItem("Call a Hotline"):
gr.Markdown(title_hotline)
gr.Markdown(hotline_text)
#with gr.Tab("Educational PDFs"):
# gr.Markdown("### 📘 Helpful Resources")
chatbot.launch(debug=True)
|