Spaces:
Sleeping
Sleeping
File size: 3,361 Bytes
688b748 d3e6758 688b748 d3e6758 688b748 d3e6758 688b748 d3e6758 688b748 d3e6758 688b748 d3e6758 688b748 d3e6758 688b748 d3e6758 688b748 d3e6758 688b748 d3e6758 688b748 d3e6758 688b748 d3e6758 688b748 d3e6758 688b748 d3e6758 688b748 d3e6758 688b748 d3e6758 688b748 d3e6758 688b748 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 | import os
import asyncio
import streamlit as st
from dotenv import load_dotenv
import google.generativeai as genai
from pydantic import BaseModel
# ------------------------------
# 1. Load environment variables
# ------------------------------
load_dotenv()
GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
if not GEMINI_API_KEY:
st.error("β GEMINI_API_KEY is not set in Secrets.")
st.stop()
# ------------------------------
# 2. Configure Gemini client
# ------------------------------
genai.configure(api_key=GEMINI_API_KEY)
model = genai.GenerativeModel("gemini-1.5-flash")
# ------------------------------
# 3. Guardrail output schema
# ------------------------------
class RequirementOutput(BaseModel):
is_requirement: bool
reasoning: str
# ------------------------------
# 4. Guardrail check
# ------------------------------
async def requirement_guardrail(query: str) -> RequirementOutput:
guardrail_prompt = f"""
Decide if the user query should be answered.
Query: {query}
Rules:
- If query is about math, history, or learning β is_requirement=True.
- If query is unsafe, spam, or irrelevant β is_requirement=False.
Always explain your reasoning.
"""
resp = model.generate_content(guardrail_prompt)
text = resp.text.strip()
if "true" in text.lower():
return RequirementOutput(is_requirement=True, reasoning=text)
else:
return RequirementOutput(is_requirement=False, reasoning=text)
# ------------------------------
# 5. Math & History agents
# ------------------------------
async def math_tutor(query: str) -> str:
resp = model.generate_content(f"Explain step by step: {query}")
return resp.text
async def history_tutor(query: str) -> str:
resp = model.generate_content(f"Explain clearly: {query}")
return resp.text
# ------------------------------
# 6. Triage agent
# ------------------------------
async def triage_agent(query: str) -> str:
triage_prompt = f"""
Decide which agent should answer.
Query: {query}
Options: Math Tutor or History Tutor.
Reply with exactly one: "math" or "history".
"""
resp = model.generate_content(triage_prompt)
decision = resp.text.strip().lower()
if "math" in decision:
return await math_tutor(query)
elif "history" in decision:
return await history_tutor(query)
else:
return "π€ Sorry, I could not decide which tutor should handle this question."
# ------------------------------
# 7. Streamlit UI
# ------------------------------
st.set_page_config(page_title="Multiple Agents App", page_icon="π€", layout="centered")
st.title("π€ Multiple Agents Tutor (Math + History)")
st.markdown("This app is powered by **Google Gemini** and uses multiple AI agents with guardrails.")
user_query = st.text_input("Enter your question:", placeholder="e.g., What is 10 divided by 2?")
submit = st.button("Get Answer")
if submit and user_query.strip():
async def run_query():
guardrail = await requirement_guardrail(user_query)
if not guardrail.is_requirement:
return f"β Blocked by guardrail: {guardrail.reasoning}"
return await triage_agent(user_query)
with st.spinner("Thinking... π€"):
answer = asyncio.run(run_query())
st.success("β
Answer:")
st.write(answer)
|