|
|
""" |
|
|
CX AI Agent - Autonomous MCP Demo |
|
|
|
|
|
This is the PROPER MCP implementation where: |
|
|
- AI (Claude 3.5 Sonnet) autonomously calls MCP tools |
|
|
- NO hardcoded workflow |
|
|
- AI decides which tools to use and when |
|
|
- Full Model Context Protocol demonstration |
|
|
|
|
|
Perfect for MCP hackathon! |
|
|
""" |
|
|
|
|
|
import os |
|
|
import gradio as gr |
|
|
import asyncio |
|
|
from pathlib import Path |
|
|
from dotenv import load_dotenv |
|
|
|
|
|
|
|
|
load_dotenv() |
|
|
|
|
|
|
|
|
os.environ["USE_IN_MEMORY_MCP"] = "true" |
|
|
|
|
|
from mcp.registry import get_mcp_registry |
|
|
from mcp.agents.autonomous_agent import AutonomousMCPAgent |
|
|
|
|
|
|
|
|
|
|
|
mcp_registry = get_mcp_registry() |
|
|
|
|
|
|
|
|
async def run_autonomous_agent(task: str, api_key: str): |
|
|
""" |
|
|
Run the autonomous AI agent with MCP tool calling. |
|
|
|
|
|
Args: |
|
|
task: The task for the AI to complete autonomously |
|
|
api_key: Anthropic API key for Claude |
|
|
|
|
|
Yields: |
|
|
Progress updates from the agent |
|
|
""" |
|
|
|
|
|
if not api_key: |
|
|
yield "β Error: Please provide an Anthropic API key" |
|
|
return |
|
|
|
|
|
if not task: |
|
|
yield "β Error: Please provide a task description" |
|
|
return |
|
|
|
|
|
|
|
|
try: |
|
|
agent = AutonomousMCPAgent(mcp_registry=mcp_registry, api_key=api_key) |
|
|
except Exception as e: |
|
|
yield f"β Error initializing agent: {str(e)}" |
|
|
return |
|
|
|
|
|
|
|
|
output_text = "" |
|
|
|
|
|
try: |
|
|
async for event in agent.run(task, max_iterations=15): |
|
|
event_type = event.get("type") |
|
|
message = event.get("message", "") |
|
|
|
|
|
|
|
|
if event_type == "agent_start": |
|
|
output_text += f"\n{'='*60}\n" |
|
|
output_text += f"{message}\n" |
|
|
output_text += f"Model: {event.get('model')}\n" |
|
|
output_text += f"{'='*60}\n\n" |
|
|
|
|
|
elif event_type == "iteration_start": |
|
|
output_text += f"\n{message}\n" |
|
|
|
|
|
elif event_type == "tool_call": |
|
|
tool = event.get("tool") |
|
|
tool_input = event.get("input", {}) |
|
|
output_text += f"\n{message}\n" |
|
|
output_text += f" Input: {tool_input}\n" |
|
|
|
|
|
elif event_type == "tool_result": |
|
|
tool = event.get("tool") |
|
|
result = event.get("result", {}) |
|
|
output_text += f"{message}\n" |
|
|
|
|
|
|
|
|
if isinstance(result, dict): |
|
|
if "count" in result: |
|
|
output_text += f" β Returned {result['count']} items\n" |
|
|
elif "status" in result: |
|
|
output_text += f" β Status: {result['status']}\n" |
|
|
|
|
|
elif event_type == "tool_error": |
|
|
tool = event.get("tool") |
|
|
error = event.get("error") |
|
|
output_text += f"\n{message}\n" |
|
|
output_text += f" Error: {error}\n" |
|
|
|
|
|
elif event_type == "agent_complete": |
|
|
final_response = event.get("final_response", "") |
|
|
iterations = event.get("iterations", 0) |
|
|
output_text += f"\n{'='*60}\n" |
|
|
output_text += f"{message}\n" |
|
|
output_text += f"Iterations: {iterations}\n" |
|
|
output_text += f"{'='*60}\n\n" |
|
|
output_text += f"**Final Response:**\n\n{final_response}\n" |
|
|
|
|
|
elif event_type == "agent_error": |
|
|
error = event.get("error") |
|
|
output_text += f"\n{message}\n" |
|
|
output_text += f"Error: {error}\n" |
|
|
|
|
|
elif event_type == "agent_max_iterations": |
|
|
iterations = event.get("iterations", 0) |
|
|
output_text += f"\n{message}\n" |
|
|
|
|
|
yield output_text |
|
|
|
|
|
except Exception as e: |
|
|
output_text += f"\n\nβ Agent execution failed: {str(e)}\n" |
|
|
yield output_text |
|
|
|
|
|
|
|
|
def create_demo(): |
|
|
"""Create Gradio demo interface""" |
|
|
|
|
|
with gr.Blocks(title="CX AI Agent - Autonomous MCP Demo", theme=gr.themes.Soft()) as demo: |
|
|
gr.Markdown(""" |
|
|
# π€ CX AI Agent - Autonomous MCP Demo |
|
|
|
|
|
This demo shows **true AI-driven MCP usage** where Claude 3.5 Sonnet: |
|
|
- β
Autonomously decides which MCP tools to call |
|
|
- β
Uses Model Context Protocol servers (Search, Store, Email, Calendar) |
|
|
- β
NO hardcoded workflow - AI makes all decisions |
|
|
- β
Proper MCP protocol implementation |
|
|
|
|
|
## Available MCP Tools: |
|
|
- π **Search**: Web search, news search |
|
|
- πΎ **Store**: Save/retrieve prospects, companies, contacts, facts |
|
|
- π§ **Email**: Send emails, track threads |
|
|
- π
**Calendar**: Suggest meeting times, generate invites |
|
|
|
|
|
## Example Tasks: |
|
|
- "Research Shopify and determine if they're a good B2B prospect" |
|
|
- "Find 3 e-commerce companies and save them as prospects" |
|
|
- "Create a personalized outreach campaign for Stripe" |
|
|
- "Find recent news about AI startups and save as facts" |
|
|
""") |
|
|
|
|
|
with gr.Row(): |
|
|
with gr.Column(): |
|
|
api_key_input = gr.Textbox( |
|
|
label="Anthropic API Key", |
|
|
type="password", |
|
|
placeholder="sk-ant-...", |
|
|
info="Required for Claude 3.5 Sonnet (get one at console.anthropic.com)" |
|
|
) |
|
|
|
|
|
task_input = gr.Textbox( |
|
|
label="Task for AI Agent", |
|
|
placeholder="Research Shopify and create a prospect profile with facts", |
|
|
lines=3, |
|
|
info="Describe what you want the AI to do autonomously" |
|
|
) |
|
|
|
|
|
|
|
|
example_tasks = gr.Dropdown( |
|
|
label="Example Tasks (click to use)", |
|
|
choices=[ |
|
|
"Research Shopify and determine if they're a good B2B SaaS prospect", |
|
|
"Find recent news about Stripe and save as facts in the database", |
|
|
"Create a prospect profile for Notion including company info and facts", |
|
|
"Search for B2B SaaS companies in the e-commerce space and save top 3 prospects", |
|
|
"Research Figma's recent product launches and save relevant facts", |
|
|
], |
|
|
interactive=True |
|
|
) |
|
|
|
|
|
def use_example(example): |
|
|
return example |
|
|
|
|
|
example_tasks.change(fn=use_example, inputs=[example_tasks], outputs=[task_input]) |
|
|
|
|
|
run_btn = gr.Button("π Run Autonomous Agent", variant="primary", size="lg") |
|
|
|
|
|
with gr.Column(): |
|
|
output = gr.Textbox( |
|
|
label="Agent Progress & Results", |
|
|
lines=25, |
|
|
max_lines=50, |
|
|
show_copy_button=True |
|
|
) |
|
|
|
|
|
run_btn.click( |
|
|
fn=run_autonomous_agent, |
|
|
inputs=[task_input, api_key_input], |
|
|
outputs=[output] |
|
|
) |
|
|
|
|
|
gr.Markdown(""" |
|
|
## π― How It Works |
|
|
|
|
|
1. **You provide a task** - Tell the AI what you want to accomplish |
|
|
2. **AI analyzes the task** - Claude understands what needs to be done |
|
|
3. **AI decides which tools to use** - Autonomously chooses MCP tools |
|
|
4. **AI executes tools** - Calls MCP servers (search, store, email, calendar) |
|
|
5. **AI continues until complete** - Keeps working until task is done |
|
|
|
|
|
## π True MCP Implementation |
|
|
|
|
|
This is **NOT** a hardcoded workflow! The AI: |
|
|
- β
Decides which tools to call based on context |
|
|
- β
Adapts to new information |
|
|
- β
Can call tools in any order |
|
|
- β
Reasons about what information it needs |
|
|
- β
Stores data for later use |
|
|
|
|
|
## π‘ Tips |
|
|
|
|
|
- Be specific about what you want |
|
|
- The AI can search, save data, and reason about prospects |
|
|
- Try multi-step tasks to see autonomous decision-making |
|
|
- Check the progress log to see which tools the AI chooses |
|
|
|
|
|
--- |
|
|
|
|
|
**Powered by:** Claude 3.5 Sonnet + Model Context Protocol (MCP) |
|
|
""") |
|
|
|
|
|
return demo |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
demo = create_demo() |
|
|
demo.launch( |
|
|
server_name="0.0.0.0", |
|
|
server_port=7860, |
|
|
show_error=True |
|
|
) |
|
|
|