Spaces:
Sleeping
Sleeping
design
Browse files
app.py
CHANGED
|
@@ -1,53 +1,28 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
from huggingface_hub import InferenceClient
|
| 3 |
-
import os
|
| 4 |
-
#importing new libraries
|
| 5 |
from sentence_transformers import SentenceTransformer
|
| 6 |
import torch
|
| 7 |
import numpy as np
|
| 8 |
|
| 9 |
-
#
|
| 10 |
-
|
| 11 |
-
# Load and process the knowledge base text file
|
| 12 |
with open("knowledge.txt", "r", encoding="utf-8") as f:
|
| 13 |
knowledge_text = f.read()
|
| 14 |
-
|
| 15 |
-
# Split the text into chunks (for example, by paragraphs)
|
| 16 |
chunks = [chunk.strip() for chunk in knowledge_text.split("\n\n") if chunk.strip()]
|
| 17 |
-
|
| 18 |
-
# Load an embedding model (this one is light and fast)
|
| 19 |
embedder = SentenceTransformer('all-MiniLM-L6-v2')
|
| 20 |
-
|
| 21 |
-
# Precompute embeddings for all chunks (as a tensor for fast similarity search)
|
| 22 |
chunk_embeddings = embedder.encode(chunks, convert_to_tensor=True)
|
| 23 |
|
| 24 |
def get_relevant_context(query, top_k=3):
|
| 25 |
-
"""
|
| 26 |
-
Compute the embedding for the query, compare it against all chunk embeddings,
|
| 27 |
-
and return the top_k most similar chunks concatenated into a context string.
|
| 28 |
-
"""
|
| 29 |
-
|
| 30 |
-
# Compute and normalize the query embedding
|
| 31 |
query_embedding = embedder.encode(query, convert_to_tensor=True)
|
| 32 |
query_embedding = query_embedding / query_embedding.norm()
|
| 33 |
-
|
| 34 |
-
# Normalize chunk embeddings along the embedding dimension
|
| 35 |
norm_chunk_embeddings = chunk_embeddings / chunk_embeddings.norm(dim=1, keepdim=True)
|
| 36 |
-
|
| 37 |
-
# Compute cosine similarity between the query and each chunk
|
| 38 |
similarities = torch.matmul(norm_chunk_embeddings, query_embedding)
|
| 39 |
-
|
| 40 |
-
# Get the indices of the top_k most similar chunks
|
| 41 |
top_k_indices = torch.topk(similarities, k=top_k).indices.cpu().numpy()
|
| 42 |
-
|
| 43 |
-
# Concatenate the top chunks into a single context string
|
| 44 |
context = "\n\n".join([chunks[i] for i in top_k_indices])
|
| 45 |
return context
|
| 46 |
|
| 47 |
-
## END OF NEW CODE
|
| 48 |
-
|
| 49 |
client = InferenceClient("google/gemma-2-2b-it")
|
| 50 |
-
|
|
|
|
| 51 |
Cycle-Aware Wellness AI Coach (Strict Enforcement Version)
|
| 52 |
==========================================================
|
| 53 |
Mission:
|
|
@@ -84,31 +59,43 @@ Core Support Areas (Scope You *Do* Cover):
|
|
| 84 |
- Offer encouragement when users feel overwhelmed, bloated, or inconsistent.
|
| 85 |
Final Boundary Rule:
|
| 86 |
--------------------
|
| 87 |
-
🔒 Strictly decline all unrelated questions. Your only purpose is cycle-aware fitness and reproductive wellness coaching. Do not give general medical, tech, legal, or life advice.
|
| 88 |
"""
|
|
|
|
| 89 |
def respond(message, history):
|
| 90 |
messages = [{"role": "system", "content": cycle_ai_prompt}]
|
| 91 |
-
#new line of code
|
| 92 |
if history:
|
| 93 |
for user_msg, assistant_msg in history:
|
| 94 |
messages.append({"role": "user", "content": user_msg})
|
| 95 |
messages.append({"role": "assistant", "content": assistant_msg})
|
| 96 |
messages.append({"role": "user", "content": message})
|
| 97 |
-
# Not using stream
|
| 98 |
response = client.chat_completion(
|
| 99 |
messages,
|
| 100 |
max_tokens=500,
|
| 101 |
temperature=0.1
|
| 102 |
)
|
| 103 |
return response['choices'][0]['message']['content'].strip()
|
|
|
|
| 104 |
def custom_chat_ui():
|
| 105 |
-
with gr.Blocks(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 106 |
gr.Image(
|
| 107 |
-
value="pitbull.jpg",
|
| 108 |
show_label=False,
|
| 109 |
show_share_button=False,
|
| 110 |
-
show_download_button=False
|
|
|
|
|
|
|
| 111 |
)
|
| 112 |
-
gr.ChatInterface(respond)
|
| 113 |
return demo
|
| 114 |
-
|
|
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
from huggingface_hub import InferenceClient
|
|
|
|
|
|
|
| 3 |
from sentence_transformers import SentenceTransformer
|
| 4 |
import torch
|
| 5 |
import numpy as np
|
| 6 |
|
| 7 |
+
# Load and process the knowledge base
|
|
|
|
|
|
|
| 8 |
with open("knowledge.txt", "r", encoding="utf-8") as f:
|
| 9 |
knowledge_text = f.read()
|
|
|
|
|
|
|
| 10 |
chunks = [chunk.strip() for chunk in knowledge_text.split("\n\n") if chunk.strip()]
|
|
|
|
|
|
|
| 11 |
embedder = SentenceTransformer('all-MiniLM-L6-v2')
|
|
|
|
|
|
|
| 12 |
chunk_embeddings = embedder.encode(chunks, convert_to_tensor=True)
|
| 13 |
|
| 14 |
def get_relevant_context(query, top_k=3):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
query_embedding = embedder.encode(query, convert_to_tensor=True)
|
| 16 |
query_embedding = query_embedding / query_embedding.norm()
|
|
|
|
|
|
|
| 17 |
norm_chunk_embeddings = chunk_embeddings / chunk_embeddings.norm(dim=1, keepdim=True)
|
|
|
|
|
|
|
| 18 |
similarities = torch.matmul(norm_chunk_embeddings, query_embedding)
|
|
|
|
|
|
|
| 19 |
top_k_indices = torch.topk(similarities, k=top_k).indices.cpu().numpy()
|
|
|
|
|
|
|
| 20 |
context = "\n\n".join([chunks[i] for i in top_k_indices])
|
| 21 |
return context
|
| 22 |
|
|
|
|
|
|
|
| 23 |
client = InferenceClient("google/gemma-2-2b-it")
|
| 24 |
+
|
| 25 |
+
cycle_ai_prompt = """
|
| 26 |
Cycle-Aware Wellness AI Coach (Strict Enforcement Version)
|
| 27 |
==========================================================
|
| 28 |
Mission:
|
|
|
|
| 59 |
- Offer encouragement when users feel overwhelmed, bloated, or inconsistent.
|
| 60 |
Final Boundary Rule:
|
| 61 |
--------------------
|
| 62 |
+
🔒 Strictly decline all unrelated questions. Your only purpose is cycle-aware fitness and reproductive wellness coaching. Do not give general medical, tech, legal, or life advice. You can still give advice relating to helping symptoms like home remedies. No actual medical advice, but recipes are fine.
|
| 63 |
"""
|
| 64 |
+
|
| 65 |
def respond(message, history):
|
| 66 |
messages = [{"role": "system", "content": cycle_ai_prompt}]
|
|
|
|
| 67 |
if history:
|
| 68 |
for user_msg, assistant_msg in history:
|
| 69 |
messages.append({"role": "user", "content": user_msg})
|
| 70 |
messages.append({"role": "assistant", "content": assistant_msg})
|
| 71 |
messages.append({"role": "user", "content": message})
|
|
|
|
| 72 |
response = client.chat_completion(
|
| 73 |
messages,
|
| 74 |
max_tokens=500,
|
| 75 |
temperature=0.1
|
| 76 |
)
|
| 77 |
return response['choices'][0]['message']['content'].strip()
|
| 78 |
+
|
| 79 |
def custom_chat_ui():
|
| 80 |
+
with gr.Blocks(
|
| 81 |
+
theme=gr.themes.Soft(
|
| 82 |
+
primary_hue="pink",
|
| 83 |
+
secondary_hue="orange",
|
| 84 |
+
neutral_hue="yellow",
|
| 85 |
+
spacing_size="lg",
|
| 86 |
+
radius_size="lg", # ← FIXED: was "xl"
|
| 87 |
+
font=[gr.themes.GoogleFont("Quicksand"), "sans-serif"],
|
| 88 |
+
font_mono=[gr.themes.GoogleFont("IBM Plex Mono"), "monospace"]
|
| 89 |
+
)
|
| 90 |
+
) as demo:
|
| 91 |
gr.Image(
|
|
|
|
| 92 |
show_label=False,
|
| 93 |
show_share_button=False,
|
| 94 |
+
show_download_button=False,
|
| 95 |
+
container=False,
|
| 96 |
+
height=200
|
| 97 |
)
|
| 98 |
+
gr.ChatInterface(respond, title="🌸 Cycle-Aware Wellness Coach 🌞")
|
| 99 |
return demo
|
| 100 |
+
|
| 101 |
+
custom_chat_ui().launch()
|