|
|
import gradio as gr |
|
|
from gradio import ChatMessage |
|
|
import asyncio |
|
|
import json |
|
|
import hashlib |
|
|
from datetime import datetime |
|
|
from agent import LlamaIndexReportAgent |
|
|
from tools.simple_tools import get_workflow_state |
|
|
from llama_index.core.agent.workflow import ( |
|
|
AgentInput, |
|
|
AgentOutput, |
|
|
ToolCall, |
|
|
ToolCallResult, |
|
|
AgentStream, |
|
|
) |
|
|
from llama_index.core.workflow import Context |
|
|
|
|
|
|
|
|
agent_workflow = None |
|
|
|
|
|
def get_agent_workflow(): |
|
|
global agent_workflow |
|
|
if agent_workflow is None: |
|
|
agent_workflow = LlamaIndexReportAgent() |
|
|
return agent_workflow |
|
|
|
|
|
async def chat_with_agent(message, history): |
|
|
""" |
|
|
Async chat function that runs the agent workflow and streams each step. |
|
|
""" |
|
|
history = history or [] |
|
|
history.append(ChatMessage(role="user", content=message)) |
|
|
|
|
|
|
|
|
yield history, None, None, gr.update(value="", interactive=False) |
|
|
|
|
|
final_report_content = None |
|
|
structured_report_data = None |
|
|
displayed_tool_calls = set() |
|
|
|
|
|
try: |
|
|
workflow = get_agent_workflow() |
|
|
|
|
|
|
|
|
ctx = Context(workflow.agent_workflow) |
|
|
await ctx.set("state", { |
|
|
"research_notes": {}, |
|
|
"report_content": "Not written yet.", |
|
|
"review": "Review required.", |
|
|
}) |
|
|
|
|
|
handler = workflow.agent_workflow.run(user_msg=message, ctx=ctx) |
|
|
|
|
|
current_agent = None |
|
|
|
|
|
async for event in handler.stream_events(): |
|
|
print(f"DEBUG: Event type: {type(event).__name__}") |
|
|
|
|
|
if hasattr(event, "current_agent_name") and event.current_agent_name != current_agent: |
|
|
current_agent = event.current_agent_name |
|
|
history.append(ChatMessage( |
|
|
role="assistant", |
|
|
content=f"**🤖 Agent: {current_agent}**", |
|
|
metadata={"title": f"Agent: {current_agent}"} |
|
|
)) |
|
|
yield history, final_report_content, structured_report_data, gr.update(interactive=False) |
|
|
|
|
|
if isinstance(event, ToolCall): |
|
|
tool_call_kwargs_str = json.dumps(getattr(event, 'tool_kwargs', {}), sort_keys=True) |
|
|
tool_call_key = f"{current_agent}:{event.tool_name}:{hashlib.md5(tool_call_kwargs_str.encode()).hexdigest()[:8]}" |
|
|
print(f"DEBUG: ToolCall detected - Agent: {current_agent}, Tool: {event.tool_name}, Args: {getattr(event, 'tool_kwargs', {})}") |
|
|
|
|
|
if tool_call_key not in displayed_tool_calls: |
|
|
args_preview = str(getattr(event, 'tool_kwargs', {}))[:100] + "..." if len(str(getattr(event, 'tool_kwargs', {}))) > 100 else str(getattr(event, 'tool_kwargs', {})) |
|
|
history.append(ChatMessage( |
|
|
role="assistant", |
|
|
content=f"**🔨 Calling Tool:** `{event.tool_name}`\n**Arguments:** {args_preview}", |
|
|
metadata={"title": f"{current_agent} - Tool Call"} |
|
|
)) |
|
|
displayed_tool_calls.add(tool_call_key) |
|
|
yield history, final_report_content, structured_report_data, gr.update(interactive=False) |
|
|
|
|
|
elif isinstance(event, ToolCallResult): |
|
|
print(f"DEBUG: ToolCallResult - Tool: {getattr(event, 'tool_name', 'unknown')}, Output: {getattr(event, 'tool_output', 'no output')}") |
|
|
|
|
|
|
|
|
tool_output = getattr(event, 'tool_output', 'No output') |
|
|
tool_name = getattr(event, 'tool_name', 'unknown') |
|
|
output_preview = str(tool_output)[:200] + "..." if len(str(tool_output)) > 200 else str(tool_output) |
|
|
|
|
|
history.append(ChatMessage( |
|
|
role="assistant", |
|
|
content=f"**🔧 Tool Result ({tool_name}):**\n{output_preview}", |
|
|
metadata={"title": f"{current_agent} - Tool Result"} |
|
|
)) |
|
|
yield history, final_report_content, structured_report_data, gr.update(interactive=False) |
|
|
|
|
|
elif isinstance(event, AgentOutput) and event.response.content: |
|
|
print(f"DEBUG: AgentOutput from {current_agent}: {event.response.content}") |
|
|
|
|
|
history.append(ChatMessage( |
|
|
role="assistant", |
|
|
content=f"**📤 Thought:** {event.response.content}", |
|
|
metadata={"title": f"{current_agent} - Output"} |
|
|
)) |
|
|
yield history, final_report_content, structured_report_data, gr.update(interactive=False) |
|
|
|
|
|
|
|
|
print("DEBUG: Workflow completed, extracting final state...") |
|
|
final_state = get_workflow_state() |
|
|
print(f"DEBUG: Final state keys: {final_state.keys() if final_state else 'None'}") |
|
|
|
|
|
if final_state: |
|
|
print(f"DEBUG: Final state content: {json.dumps(final_state, indent=2, default=str)}") |
|
|
|
|
|
|
|
|
research_notes = final_state.get("research_notes", {}) |
|
|
print(f"DEBUG: Research notes found: {len(research_notes)} items") |
|
|
for title, content in research_notes.items(): |
|
|
print(f"DEBUG: Research note '{title}': {content[:100]}..." if len(content) > 100 else f"DEBUG: Research note '{title}': {content}") |
|
|
|
|
|
|
|
|
if final_state.get("structured_report"): |
|
|
structured_report_data = final_state["structured_report"] |
|
|
final_report_content = structured_report_data.get("content", "*Report content not found in structured report.*") |
|
|
print(f"DEBUG: Found structured report with content length: {len(final_report_content) if final_report_content else 0}") |
|
|
else: |
|
|
|
|
|
final_report_content = final_state.get("report_content", None) |
|
|
if final_report_content and final_report_content != "Not written yet.": |
|
|
print(f"DEBUG: Found report_content directly in state with length: {len(final_report_content)}") |
|
|
|
|
|
structured_report_data = { |
|
|
"title": "Generated Report", |
|
|
"content": final_report_content, |
|
|
"word_count": len(final_report_content.split()), |
|
|
"generated_at": datetime.now().isoformat(), |
|
|
"research_notes_count": len(final_state.get("research_notes", {})) |
|
|
} |
|
|
else: |
|
|
print("DEBUG: No valid report content found in final state") |
|
|
print(f"DEBUG: report_content value: '{final_report_content}'") |
|
|
|
|
|
if research_notes: |
|
|
final_report_content = f"**Research completed but report not written.**\n\n**Research Notes:**\n\n" |
|
|
for title, content in research_notes.items(): |
|
|
final_report_content += f"### {title}\n{content}\n\n" |
|
|
structured_report_data = { |
|
|
"title": "Research Notes (Report Incomplete)", |
|
|
"content": final_report_content, |
|
|
"word_count": len(final_report_content.split()), |
|
|
"generated_at": datetime.now().isoformat(), |
|
|
"research_notes_count": len(research_notes), |
|
|
"status": "incomplete" |
|
|
} |
|
|
print(f"DEBUG: Created fallback report from research notes") |
|
|
else: |
|
|
final_report_content = None |
|
|
structured_report_data = None |
|
|
else: |
|
|
print("DEBUG: No final state retrieved") |
|
|
final_report_content = None |
|
|
structured_report_data = None |
|
|
|
|
|
history.append(ChatMessage( |
|
|
role="assistant", |
|
|
content="✅ **Workflow completed!**", |
|
|
metadata={"title": "Workflow Complete"} |
|
|
)) |
|
|
|
|
|
if final_report_content: |
|
|
final_report_update = gr.update(value=final_report_content, visible=True) |
|
|
json_report_update = gr.update(value=structured_report_data, visible=True) if structured_report_data else gr.update(visible=False) |
|
|
else: |
|
|
final_report_update = gr.update(value="*No final report was generated. Check the workflow execution above.*", visible=True) |
|
|
json_report_update = gr.update(visible=False) |
|
|
|
|
|
yield history, final_report_update, json_report_update, gr.update(interactive=True, placeholder="Enter your next request...") |
|
|
|
|
|
except Exception as e: |
|
|
print(f"ERROR in chat_with_agent: {e}") |
|
|
import traceback |
|
|
traceback.print_exc() |
|
|
history.append(ChatMessage(role="assistant", content=f"❌ **Error:** {str(e)}", metadata={"title": "Error"})) |
|
|
yield history, gr.update(visible=False), gr.update(visible=False), gr.update(interactive=True) |
|
|
|
|
|
def like_feedback(evt: gr.LikeData): |
|
|
"""Handle user feedback on messages.""" |
|
|
print(f"User feedback - Index: {evt.index}, Liked: {evt.liked}, Value: {evt.value}") |
|
|
|
|
|
def format_structured_report_display(structured_report_data): |
|
|
"""Format structured report data for JSON display component.""" |
|
|
if not structured_report_data: |
|
|
return gr.JSON(visible=False) |
|
|
|
|
|
return gr.JSON( |
|
|
value=structured_report_data, |
|
|
visible=True |
|
|
) |
|
|
|
|
|
|
|
|
with gr.Blocks(title="LlamaIndex Report Generation Agent", theme=gr.themes.Soft()) as demo: |
|
|
gr.Markdown(""" |
|
|
# 🤖 LlamaIndex Report Generation Agent |
|
|
|
|
|
A multi-agent workflow built with LlamaIndex that uses teacher-student methodology to generate comprehensive reports. The system employs three specialized agents that collaborate step by step: |
|
|
- **ResearchAgent**: Searches the web and records research notes |
|
|
- **WriteAgent**: Creates structured reports based on research findings |
|
|
- **ReviewAgent**: Reviews reports and provides iterative feedback for improvement |
|
|
|
|
|
Enter any topic below to see the LlamaIndex agents collaborate using teacher-student methodology! |
|
|
""") |
|
|
|
|
|
chatbot = gr.Chatbot( |
|
|
label="Agent Workflow", |
|
|
type="messages", |
|
|
height=600, |
|
|
show_copy_button=True, |
|
|
placeholder="Ask me to write a report on any topic...", |
|
|
render_markdown=True |
|
|
) |
|
|
|
|
|
with gr.Row(): |
|
|
textbox = gr.Textbox( |
|
|
placeholder="Enter your request...", |
|
|
container=False, |
|
|
scale=7 |
|
|
) |
|
|
submit_btn = gr.Button("Submit", variant="primary", scale=1) |
|
|
|
|
|
with gr.Row(): |
|
|
with gr.Column(scale=2): |
|
|
final_report_output = gr.Textbox( |
|
|
label="📄 Final Report", |
|
|
interactive=False, |
|
|
lines=20, |
|
|
show_copy_button=True, |
|
|
visible=False |
|
|
) |
|
|
with gr.Column(scale=1): |
|
|
structured_report_json = gr.JSON(label="📊 Report Metadata", visible=False) |
|
|
|
|
|
gr.Examples( |
|
|
examples=[ |
|
|
"Write a report on the history of artificial intelligence", |
|
|
"Create a report about renewable energy technologies", |
|
|
"Write a report on the impact of social media on society", |
|
|
], |
|
|
inputs=textbox, |
|
|
) |
|
|
|
|
|
gr.Markdown(""" |
|
|
### How the LlamaIndex Teacher-Student Agent Works: |
|
|
1. **ResearchAgent** searches for information and takes comprehensive notes |
|
|
2. **WriteAgent** creates a structured report based on the research findings |
|
|
3. **ReviewAgent** reviews the report and provides constructive feedback |
|
|
4. The process iterates until the report meets quality standards |
|
|
|
|
|
Watch the real-time collaboration between LlamaIndex agents as they employ teacher-student methodology! |
|
|
""") |
|
|
|
|
|
|
|
|
submit_btn.click( |
|
|
chat_with_agent, |
|
|
inputs=[textbox, chatbot], |
|
|
outputs=[chatbot, final_report_output, structured_report_json, textbox], |
|
|
queue=True |
|
|
) |
|
|
|
|
|
textbox.submit( |
|
|
chat_with_agent, |
|
|
inputs=[textbox, chatbot], |
|
|
outputs=[chatbot, final_report_output, structured_report_json, textbox], |
|
|
queue=True |
|
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
|
demo.launch() |
|
|
|