younus00's picture
feat: added custom prompts
b026ea4
from src.rag import RAG
from src.mcp_client import JournalMCPClient
from llama_index.core.agent import ReActAgent
from llama_index.llms.google_genai import GoogleGenAI
from llama_index.core.workflow import Context
from llama_index.core.agent.workflow import AgentStream, ToolCallResult
import gradio as gr
import asyncio
import yaml
agent = None
ctx = None
async def initialize_agent(google_api_key, folder_path="./dummy_data", model="gemini-2.5-flash-lite"):
global agent, ctx
# get llm
llm = GoogleGenAI(model=model, api_key=google_api_key)
# load RAG
rag = RAG(llm=llm)
rag.load_and_index_folder(folder_path)
rag_agent_tools = rag.get_tools()
# load MCP tools
mcp_client = JournalMCPClient()
mcp_tools = await mcp_client.get_tools()
# create agent
agent = ReActAgent(
tools=[*rag_agent_tools, *mcp_tools],
llm=llm,
verbose=True,
)
# update prompt
with open("prompts.yaml", "r") as f:
prompts_dict = yaml.safe_load(f)
agent.update_prompts({"react_header": prompts_dict["react_header"]})
# set context
ctx = Context(agent)
async def run_agent(query_text, chat_history):
global agent, ctx
handler = agent.run(query_text, ctx=ctx)
async for ev in handler.stream_events():
if isinstance(ev, ToolCallResult):
print(f"\nCall {ev.tool_name} with {ev.tool_kwargs}\nReturned: {ev.tool_output}")
if isinstance(ev, AgentStream):
print(f"{ev.delta}", end="", flush=True)
response = await handler
return str(response)
async def main():
# Create the multi-screen app using Blocks with state management
with gr.Blocks(title="AI Reflection Agent") as demo:
# State to track which screen is visible (0=API key, 1=loading, 2=chat)
screen_state = gr.State(value=0)
chat_history_state = gr.State(value=[])
# Screen 0: API Key Input
with gr.Column(visible=True) as col_screen0:
gr.Markdown("# 🤖 AI Reflection Agent")
gr.Markdown("Enter your Google API key to get started.")
with gr.Group():
api_input = gr.Textbox(
label="Google API Key",
type="password",
placeholder="Enter your Google API key...",
interactive=True
)
submit_btn = gr.Button("Initialize Agent", variant="primary", scale=1)
error_msg_0 = gr.Textbox(
label="Status",
interactive=False,
value=""
)
# Screen 1: Loading
with gr.Column(visible=False) as col_screen1:
gr.Markdown("# ⏳ Initializing Agent")
gr.Markdown("Please wait while we set up your agent. This may take a minute...")
with gr.Group():
status_text = gr.Textbox(
value="Starting initialization...",
interactive=False,
label="Status"
)
# Screen 2: Chat Interface
with gr.Column(visible=False) as col_screen2:
gr.ChatInterface(
fn=run_agent,
chatbot=gr.Chatbot(height="600"),
textbox=gr.Textbox(placeholder="Ask me a question...", container=False, scale=7),
title="AI Reflection Agent",
description="Ask questions about your journal entries.",
examples=["What are my hobbies?", "Who are my friends that I play Rocket League with?"],
cache_examples=False,
)
# Handler to transition from Screen 0 to Screen 1 and back to 2 after init
async def on_submit(api_key):
# Show loading screen
yield {
col_screen0: gr.update(visible=False),
col_screen1: gr.update(visible=True),
col_screen2: gr.update(visible=False),
screen_state: 1,
status_text: gr.update(value="Initializing...")
}
# Initialize agent with progress
await initialize_agent(api_key)
yield {
status_text: gr.update(value="Agent ready! Transitioning to chat..."),
}
# Brief pause for user feedback
await asyncio.sleep(1)
# Show chat screen
yield {
col_screen0: gr.update(visible=False),
col_screen1: gr.update(visible=False),
col_screen2: gr.update(visible=True),
screen_state: 2,
chat_history_state: []
}
# Wire up the API key submission
submit_btn.click(
on_submit,
inputs=api_input,
outputs=[col_screen0, col_screen1, col_screen2, screen_state, status_text, error_msg_0, chat_history_state]
)
demo.launch(share=False)
if __name__ == "__main__":
asyncio.run(main())