import gradio as gr from huggingface_hub import InferenceClient from sentence_transformers import SentenceTransformer import torch import numpy as np theme = gr.themes.Soft( primary_hue="rose", secondary_hue="zinc", neutral_hue="pink" ) custom_css = """ :root { /* This applies to the light mode */ --background-fill-primary: *primary_100 !important; /* Light pink */ } .dark { /* This applies to the dark mode */ --background-fill-primary: #FFB6C1 !important; /* Hot pink */ } """ with open("knowledge.txt" , "r", encoding="utf-8") as f: knowledge_base = f.read() print("Knowledge base loaded.") cleaned_text = knowledge_base.strip() chunks = cleaned_text.split("\n") cleaned_chunks = [] for chunk in chunks: stripped_chunk = chunk.strip() if stripped_chunk: cleaned_chunks.append(stripped_chunk) print(cleaned_chunks) model = SentenceTransformer('all-MiniLM-L6-v2') chunk_embeddings = model.encode(cleaned_chunks, convert_to_tensor=True) print(chunk_embeddings) def get_top_chunks(query): query_embedding = model.encode(query, convert_to_tensor=True) query_embedding_normalized = query_embedding / query_embedding.norm() chunk_embeddings_normalized = chunk_embeddings / chunk_embeddings.norm(dim=1, keepdim=True) similarities = torch.matmul(chunk_embeddings_normalized, query_embedding_normalized) print(similarities) top_indices = torch.topk(similarities, k=3).indices print(top_indices) top_chunks = [] for i in top_indices: chunk = chunks[i] top_chunks.append(chunk) return top_chunks client = InferenceClient("google/gemma-3-27b-it") def respond(message,history): info = get_top_chunks(message) messages = [{"role": "system" , "content": f"Your name is BloomBot and you're a supportive and helpful chatbot catered towards women of all ages. You're friendly and caring. You give clear appropiate explainations with {info} and keep your explainations to 10 sentences maximum. You should make sure of the users age so you can give the most appropiate answer." }] if history: messages.extend(history) messages.append({"role" : "user", "content" : message}) response = "" for message in client.chat_completion( messages, max_tokens = 500, stream=True, top_p = .2 ): token = message.choices[0].delta.content response += token yield response def display_image(): return "Screenshot 2025-06-12 at 10.53.59 AM.png" def show_info(topic): responses = { "General Health": 18009949662, "Maternal Mental Health": 18338526262, "Domestic Violence": 18007997233, "Postpartum Support": 18009944773 } return responses.get(topic, "Select a topic to see more info.") with gr.Blocks (theme = theme) as chatbot: gr.Image(display_image()) gr.ChatInterface(respond, type = "messages", title = "Hi, I'm BloomBot! 🌸", textbox= gr.Textbox(placeholder="Share Your Age and Ask Me Anything!"), description = "This tool is here to listen and provide information on female health topics, and all discussions will be kept confidential. ❤️‍🩹", examples = ["What are the common symptoms of menopause?", "What are some vitamins that are good for teenage girls?", "What should I know about puberty?", "Where can I find my nearest OBGYN?"] ) title_hotline= "# Select To Get Hotline Number" with gr.Tabs(): with gr.TabItem("Resources"): gr.Markdown("### Resources") open_google = gr.Button(value="🗓️ Period Tracker", link="https://drive.google.com/file/d/1_KNELAUDLLidwAT3fs2JBuO1yPgMGoDv/view") open_google = gr.Button(value="👩🏻‍🍼 New Moms Support Group", link="https://www.instagram.com/firsttimemomsacademy/") with gr.TabItem("Call a Hotline"): gr.Markdown(title_hotline) #gr.Markdown(hotline_text) dropdown = gr.Dropdown(choices=["General Health", "Maternal Mental Health", "Domestic Violence", "Postpartum Support"], label="Choose Your Hotline" ) output = gr.Textbox(label="Hotline Info", interactive=False) dropdown.change(fn=show_info, inputs=dropdown, outputs=output) #with gr.Tab("Educational PDFs"): # gr.Markdown("### 📘 Helpful Resources") chatbot.launch(debug=True)