rag_agent / ui /gradio_components.py
Cheh Kit Hong
fixed gr message
6b84d68
raw
history blame
6.64 kB
import gradio as gr
from langchain_core.messages import HumanMessage, AIMessage
from core.rag_agent import RAGAgent
import traceback
# Initialize components
rag_agent = None
def initialize_agent():
"""Initialize RAG agent lazily"""
global rag_agent
if rag_agent is None:
rag_agent = RAGAgent()
return rag_agent
def chat_with_agent(message, history):
"""Handle chat interactions with the RAG agent"""
if not message.strip():
return history
try:
agent = initialize_agent()
# Convert Gradio history format (dict with role/content) to LangChain messages
messages = []
if history:
for msg_dict in history:
if msg_dict["role"] == "user":
messages.append(HumanMessage(content=msg_dict["content"]))
elif msg_dict["role"] == "assistant":
messages.append(AIMessage(content=msg_dict["content"]))
# Add current user message
messages.append(HumanMessage(content=message))
# Create initial state
initial_state = {
"messages": messages,
}
# Invoke the agent graph
result = agent.agent_graph.invoke(
initial_state,
config=agent.get_config()
)
# Extract AI response
result_messages = result.get("messages", [])
ai_messages = [m for m in result_messages if isinstance(m, AIMessage)]
if ai_messages:
# Get the last AI message
response = ai_messages[-1].content
# Add routing info as metadata (optional)
rag_method = result.get("rag_method", "UNKNOWN")
response_with_metadata = f"{response}\n\n*[Source: {rag_method}]*"
# Return history in Gradio's dict format
new_history = history + [
{"role": "user", "content": message},
{"role": "assistant", "content": response_with_metadata}
]
return new_history
else:
new_history = history + [
{"role": "user", "content": message},
{"role": "assistant", "content": "⚠️ No response generated. Please try again."}
]
return new_history
except Exception as e:
error_msg = f"❌ Error: {str(e)}"
print(f"Chat error: {e}")
traceback.print_exc()
new_history = history + [
{"role": "user", "content": message},
{"role": "assistant", "content": error_msg}
]
return new_history
def reset_conversation():
"""Reset the conversation thread"""
global rag_agent
if rag_agent:
rag_agent.reset_thread()
return [] # Clear chat history
def create_gradio_ui():
"""Create the complete Gradio interface"""
with gr.Blocks(title="RAG Agent with Agentic Memory") as demo:
gr.Markdown("""
# πŸ€– RAG Agent with Agentic Memory
Chat with an intelligent agent that uses:
- πŸ“š **Local Knowledge Base** (ChromaDB) - Research papers on DeepAnalyze, AgentMem, SAM3, etc.
- πŸ” **Web Search** (Tavily) - Real-time information and current events
- πŸ“– **Wikipedia** - General knowledge
- πŸŽ“ **ArXiv** - Academic papers
""")
with gr.Row():
with gr.Column(scale=4):
gr.Markdown("### πŸ’¬ Chat Interface")
chatbot = gr.Chatbot(
label="Conversation",
height=500,
show_label=False,
)
with gr.Row():
msg = gr.Textbox(
label="Your Message",
placeholder="Ask me anything about your documents or general knowledge...",
scale=5,
show_label=False
)
submit_btn = gr.Button("Send πŸ“€", variant="primary", scale=1)
with gr.Row():
clear_btn = gr.Button("πŸ”„ Reset Conversation", variant="secondary")
with gr.Column(scale=1):
gr.Markdown("### πŸ“Š Agent Status")
status_box = gr.Markdown("*Ready*")
gr.Markdown("### πŸ’‘ Example Queries")
gr.Markdown("""
**Local Documents (RAG):**
- What is DeepAnalyze?
- Explain SAM 3 architecture
- What is AgentMem?
**Web Search:**
- Latest AI news in 2025
- Current events in technology
**General:**
- What is 15 Γ— 7?
- Explain machine learning
""")
# Event handlers
def submit_message(message, history):
"""Handle message submission"""
if not message.strip():
return history, ""
# Get response
new_history = chat_with_agent(message, history)
return new_history, ""
# Wire up events
msg.submit(
fn=submit_message,
inputs=[msg, chatbot],
outputs=[chatbot, msg]
)
submit_btn.click(
fn=submit_message,
inputs=[msg, chatbot],
outputs=[chatbot, msg]
)
clear_btn.click(
fn=reset_conversation,
outputs=[chatbot]
)
gr.Markdown("""
---
### πŸ”§ How it works:
1. **Type your question** in the text box
2. The agent will:
- 🧠 Analyze your query to determine the best source
- πŸ” Search relevant sources (Local docs, Web, Wikipedia)
- πŸ“ Generate a comprehensive answer
- πŸ’Ύ Remember conversation context for follow-up questions
3. Use **Reset Conversation** to start a new thread
---
*Powered by LangGraph + LangChain + ChromaDB + Anthropic Claude*
""")
return demo
if __name__ == "__main__":
demo = create_gradio_ui()
print("πŸš€ Starting Gradio interface...")
print("πŸ“ Running on: http://127.0.0.1:7860")
demo.launch(
share=False,
server_name="127.0.0.1",
server_port=7860,
show_error=True
)