Spaces:
Sleeping
Sleeping
File size: 5,803 Bytes
eb897a8 15d8c4e ba601cd eb897a8 15d8c4e 6b8f81f eb897a8 15d8c4e eb897a8 15d8c4e eb897a8 15d8c4e eb897a8 15d8c4e eb897a8 15d8c4e eb897a8 6b8f81f eb897a8 15d8c4e eb897a8 15d8c4e eb897a8 15d8c4e eb897a8 6b8f81f 6bda8eb 6b8f81f 15d8c4e eb897a8 15d8c4e eb897a8 15d8c4e 6b8f81f 15d8c4e 6b8f81f 15d8c4e 6b8f81f 15d8c4e 6b8f81f 15d8c4e 6b8f81f b7f9954 6b8f81f 2b6a2c4 15d8c4e 2b6a2c4 6b8f81f 15d8c4e 6b8f81f 15d8c4e 6b8f81f eb897a8 15d8c4e | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 | import os
import pickle
import numpy as np
import gradio as gr
from dataclasses import dataclass, field
from sentence_transformers import SentenceTransformer
from pydantic_ai import Agent, RunContext
from pydantic_ai.models.google import GoogleModel
from pydantic_ai.providers.google import GoogleProvider
from typing import List, Dict
# --- CONFIGURATION ---
CACHE_PATH = "vector_store_cache.pkl"
MODEL_NAME = "gemini-2.5-flash-lite"
ACCESS_PASSWORD = "secret-mitrp-password"
# ==========================================
# PART 1: BACKEND LOGIC (RAG & AGENT)
# ==========================================
@dataclass
class VectorStore:
chunks: List[Dict] = field(default_factory=list) # each: {text, page_start, page_end, chunk_id}
embeddings: np.ndarray = field(default_factory=lambda: np.array([]))
def search(self, query: str, model: SentenceTransformer, top_k: int = 5) -> List[Dict]:
if len(self.chunks) == 0:
return []
query_embedding = model.encode([query])[0]
query_norm = query_embedding / (np.linalg.norm(query_embedding) + 1e-9)
norms = np.linalg.norm(self.embeddings, axis=1, keepdims=True) + 1e-9
normalized = self.embeddings / norms
similarities = normalized @ query_norm
top_indices = np.argsort(similarities)[-top_k:][::-1]
return [
{
"text": self.chunks[i]["text"],
"score": float(similarities[i]),
"pages": f"{self.chunks[i].get('page_start', '?')}β{self.chunks[i].get('page_end', '?')}",
}
for i in top_indices
]
def load_vector_store() -> VectorStore:
"""Load pre-built index from cache. Raises if missing."""
if not os.path.exists(CACHE_PATH):
raise FileNotFoundError(
f"Cache file '{CACHE_PATH}' not found. "
"Run `uv run build_index.py` to generate it, then commit it to your repo."
)
print(f"β³ Loading vector store from {CACHE_PATH}...")
with open(CACHE_PATH, "rb") as f:
data = pickle.load(f)
chunks = data["chunks"]
embeddings = data["embeddings"]
print(f"β
Loaded {len(chunks)} chunks.")
return VectorStore(chunks=chunks, embeddings=embeddings)
# Initialize embedding model and vector store at startup
print("β³ Loading embedding model...")
embed_model = SentenceTransformer("sentence-transformers/all-MiniLM-L6-v2")
global_vector_store = load_vector_store()
# Initialize Pydantic AI Agent
api_key = os.getenv("GEMINI_API_KEY")
agent = None
if api_key:
provider = GoogleProvider(api_key=api_key)
model = GoogleModel(MODEL_NAME, provider=provider)
agent = Agent(
model,
deps_type=VectorStore,
system_prompt=(
"You are an expert on MITRP Policies. "
"Always call `search_policy` to retrieve relevant excerpts before answering. "
"Cite the page numbers provided in each excerpt. "
"If the retrieved text does not contain the answer, say so explicitly."
),
)
@agent.tool
def search_policy(ctx: RunContext[VectorStore], query: str) -> str:
"""Search the MITRP policy document for relevant excerpts."""
results = ctx.deps.search(query, embed_model, top_k=5)
if not results:
return "No relevant policy sections found."
return "\n\n".join(
f"--- Excerpt (p. {r['pages']}, relevance {r['score']:.2f}) ---\n{r['text']}"
for r in results
)
else:
print("β οΈ GEMINI_API_KEY not set β agent will not function.")
# ==========================================
# PART 2: FRONTEND LOGIC (UI & AUTH)
# ==========================================
async def chat_logic(message, history):
if not agent:
return "β οΈ Error: GEMINI_API_KEY is not configured."
try:
result = await agent.run(message, deps=global_vector_store)
return getattr(result, "output", getattr(result, "data", str(result)))
except Exception as e:
return f"Error: {str(e)}"
def login_logic(password):
if password == ACCESS_PASSWORD:
return gr.update(visible=False), gr.update(visible=True), ""
return (
gr.update(visible=True),
gr.update(visible=False),
"<p style='color:red'>β Incorrect Password</p>",
)
# --- GRADIO BLOCKS LAYOUT ---
custom_css = "footer {visibility: hidden}"
with gr.Blocks(title="MITRP Policy Assistant") as app:
# --- SCREEN 1: LOGIN ---
with gr.Column(visible=True) as login_col:
gr.Markdown("## π MITRP Policy Bot\nPlease enter the access password to continue.")
with gr.Row():
pass_input = gr.Textbox(
label="Password",
type="password",
placeholder="Enter password...",
show_label=False,
scale=4,
)
login_btn = gr.Button("Login", variant="primary", scale=1)
error_msg = gr.Markdown("")
# --- SCREEN 2: CHAT ---
with gr.Column(visible=False) as chat_col:
gr.Markdown("## ποΈ MITRP Policy Assistant")
chat_interface = gr.ChatInterface(
fn=chat_logic,
examples=[
"How many papers should I write per year?",
"What is the vacation policy?",
"How do I connect to the GPU machines?",
],
)
# --- EVENT LISTENERS ---
login_btn.click(
fn=login_logic,
inputs=[pass_input],
outputs=[login_col, chat_col, error_msg],
)
pass_input.submit(
fn=login_logic,
inputs=[pass_input],
outputs=[login_col, chat_col, error_msg],
)
if __name__ == "__main__":
app.launch(theme="soft", css=custom_css) |