Upload 8 files
Browse files- Dockerfile +36 -0
- pyproject.toml +65 -0
- src/__init__.py +1 -0
- src/gradio_app/__init__.py +1 -0
- src/gradio_app/app.py +454 -0
- src/mcp_agent/__init__.py +1 -0
- src/mcp_agent/agent.py +193 -0
- uv.lock +0 -0
Dockerfile
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Use Python 3.13.2 slim image for efficiency
|
| 2 |
+
FROM python:3.13.2-slim
|
| 3 |
+
|
| 4 |
+
# Install system dependencies and uv
|
| 5 |
+
RUN apt-get update && apt-get install -y \
|
| 6 |
+
git \
|
| 7 |
+
curl \
|
| 8 |
+
&& rm -rf /var/lib/apt/lists/*
|
| 9 |
+
|
| 10 |
+
# Install uv
|
| 11 |
+
COPY --from=ghcr.io/astral-sh/uv:latest /uv /bin/uv
|
| 12 |
+
|
| 13 |
+
# Set working directory
|
| 14 |
+
WORKDIR /app
|
| 15 |
+
|
| 16 |
+
# Set environment variables
|
| 17 |
+
ENV PYTHONUNBUFFERED=1
|
| 18 |
+
ENV PYTHONDONTWRITEBYTECODE=1
|
| 19 |
+
|
| 20 |
+
# Copy project configuration files first to leverage Docker layer caching
|
| 21 |
+
COPY pyproject.toml uv.lock ./
|
| 22 |
+
|
| 23 |
+
# Install Python dependencies using uv
|
| 24 |
+
RUN uv sync --frozen --no-dev
|
| 25 |
+
|
| 26 |
+
# Copy application files
|
| 27 |
+
COPY src/ ./src/
|
| 28 |
+
|
| 29 |
+
# Create directories that might be needed
|
| 30 |
+
RUN mkdir -p notebooks
|
| 31 |
+
|
| 32 |
+
# Expose the port that Gradio will run on
|
| 33 |
+
EXPOSE 7860
|
| 34 |
+
|
| 35 |
+
# Use uv to run the application
|
| 36 |
+
CMD ["uv", "run", "python", "src/gradio_app/app.py"]
|
pyproject.toml
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[project]
|
| 2 |
+
name = "mcp-agent"
|
| 3 |
+
version = "0.1.0"
|
| 4 |
+
description = "MCP Agent for AI-powered task automation"
|
| 5 |
+
# hide for hugging face
|
| 6 |
+
# readme = "README.md"
|
| 7 |
+
requires-python = ">=3.13.2"
|
| 8 |
+
license = {text = "MIT"}
|
| 9 |
+
authors = [
|
| 10 |
+
{name = "Tristan Padiou", email = "Padioutristan@gmail.com"},
|
| 11 |
+
]
|
| 12 |
+
classifiers = [
|
| 13 |
+
"Development Status :: 3 - Alpha",
|
| 14 |
+
"Intended Audience :: Developers",
|
| 15 |
+
"License :: OSI Approved :: MIT License",
|
| 16 |
+
"Operating System :: OS Independent",
|
| 17 |
+
"Programming Language :: Python :: 3",
|
| 18 |
+
"Programming Language :: Python :: 3.13",
|
| 19 |
+
]
|
| 20 |
+
dependencies = [
|
| 21 |
+
"pydantic-ai==0.3.5",
|
| 22 |
+
"gradio==5.35.0",
|
| 23 |
+
]
|
| 24 |
+
|
| 25 |
+
[project.optional-dependencies]
|
| 26 |
+
dev = [
|
| 27 |
+
"pytest>=7.0.0",
|
| 28 |
+
"black>=23.0.0",
|
| 29 |
+
"flake8>=5.0.0",
|
| 30 |
+
"mypy>=1.0.0",
|
| 31 |
+
"pre-commit>=2.20.0",
|
| 32 |
+
]
|
| 33 |
+
|
| 34 |
+
[project.urls]
|
| 35 |
+
Homepage = "https://github.com/tristanpadiou/MCP-Agent"
|
| 36 |
+
Repository = "https://github.com/tristanpadiou/MCP-Agent"
|
| 37 |
+
Issues = "https://github.com/tristanpadiou/MCP-Agentissues"
|
| 38 |
+
|
| 39 |
+
[tool.uv]
|
| 40 |
+
dev-dependencies = [
|
| 41 |
+
"pytest>=7.0.0",
|
| 42 |
+
"black>=23.0.0",
|
| 43 |
+
"flake8>=5.0.0",
|
| 44 |
+
"mypy>=1.0.0",
|
| 45 |
+
"pre-commit>=2.20.0",
|
| 46 |
+
]
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
[tool.black]
|
| 51 |
+
line-length = 100
|
| 52 |
+
target-version = ['py313']
|
| 53 |
+
include = '\.pyi?$'
|
| 54 |
+
|
| 55 |
+
[tool.mypy]
|
| 56 |
+
python_version = "3.13"
|
| 57 |
+
warn_return_any = true
|
| 58 |
+
warn_unused_configs = true
|
| 59 |
+
disallow_untyped_defs = true
|
| 60 |
+
|
| 61 |
+
[tool.pytest.ini_options]
|
| 62 |
+
testpaths = ["tests"]
|
| 63 |
+
python_files = ["test_*.py", "*_test.py"]
|
| 64 |
+
python_classes = ["Test*"]
|
| 65 |
+
python_functions = ["test_*"]
|
src/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
#empty file to make the directory a package
|
src/gradio_app/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
#empty file to make the directory a package
|
src/gradio_app/app.py
ADDED
|
@@ -0,0 +1,454 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
import asyncio
|
| 3 |
+
import json
|
| 4 |
+
import atexit
|
| 5 |
+
import signal
|
| 6 |
+
import sys
|
| 7 |
+
from src.mcp_agent.agent import MCP_Agent
|
| 8 |
+
|
| 9 |
+
class GradioMCPApp:
|
| 10 |
+
def __init__(self):
|
| 11 |
+
self.agent = None
|
| 12 |
+
self.chat_history = []
|
| 13 |
+
self._loop = None
|
| 14 |
+
self.server_count = 1
|
| 15 |
+
|
| 16 |
+
def get_or_create_loop(self):
|
| 17 |
+
"""Get existing event loop or create a new one"""
|
| 18 |
+
if self._loop is None or self._loop.is_closed():
|
| 19 |
+
self._loop = asyncio.new_event_loop()
|
| 20 |
+
asyncio.set_event_loop(self._loop)
|
| 21 |
+
return self._loop
|
| 22 |
+
|
| 23 |
+
async def initialize_agent(self, openai_api_key, *server_configs):
|
| 24 |
+
"""Initialize the MCP Agent with provided configuration"""
|
| 25 |
+
try:
|
| 26 |
+
# Clean up existing agent first
|
| 27 |
+
if self.agent:
|
| 28 |
+
await self.disconnect_agent()
|
| 29 |
+
|
| 30 |
+
# Build MCP servers configuration from form fields
|
| 31 |
+
# server_configs comes as a flat list: [url1, name1, type1, token1, url2, name2, type2, token2, ...]
|
| 32 |
+
mcp_servers = []
|
| 33 |
+
for i in range(0, len(server_configs), 4):
|
| 34 |
+
if i + 3 < len(server_configs):
|
| 35 |
+
server_url = server_configs[i]
|
| 36 |
+
server_name = server_configs[i + 1]
|
| 37 |
+
server_type = server_configs[i + 2]
|
| 38 |
+
headers = server_configs[i + 3]
|
| 39 |
+
|
| 40 |
+
if server_url and server_url.strip():
|
| 41 |
+
server_config = {
|
| 42 |
+
'url': server_url.strip(),
|
| 43 |
+
'name': server_name.strip() if server_name and server_name.strip() else f'server_{i//4 + 1}',
|
| 44 |
+
'type': server_type if server_type else 'http',
|
| 45 |
+
'headers': headers.strip() if headers and headers.strip() else None
|
| 46 |
+
}
|
| 47 |
+
mcp_servers.append(server_config)
|
| 48 |
+
|
| 49 |
+
# Initialize agent
|
| 50 |
+
api_keys = {'openai_api_key': openai_api_key}
|
| 51 |
+
self.agent = MCP_Agent(api_keys=api_keys, mpc_server_urls=mcp_servers)
|
| 52 |
+
|
| 53 |
+
# Connect to MCP servers
|
| 54 |
+
await self.agent.connect()
|
| 55 |
+
|
| 56 |
+
server_count = len(mcp_servers)
|
| 57 |
+
if server_count == 0:
|
| 58 |
+
return True, "Agent initialized successfully (no MCP servers configured)!"
|
| 59 |
+
else:
|
| 60 |
+
return True, f"Agent initialized successfully with {server_count} MCP server(s)!"
|
| 61 |
+
|
| 62 |
+
except Exception as e:
|
| 63 |
+
return False, f"Error initializing agent: {str(e)}"
|
| 64 |
+
|
| 65 |
+
async def chat_with_agent(self, message):
|
| 66 |
+
"""Handle text chat with the agent"""
|
| 67 |
+
if not self.agent:
|
| 68 |
+
return self.chat_history, "Please initialize the agent first by providing your OpenAI API key and clicking 'Initialize Agent'."
|
| 69 |
+
|
| 70 |
+
if not message or not message.strip():
|
| 71 |
+
return self.chat_history, "Please provide a message."
|
| 72 |
+
|
| 73 |
+
try:
|
| 74 |
+
# Get response from agent
|
| 75 |
+
response = await self.agent.chat(message.strip())
|
| 76 |
+
|
| 77 |
+
# Update chat history
|
| 78 |
+
self.chat_history.append([message.strip(), str(response)])
|
| 79 |
+
|
| 80 |
+
return self.chat_history, ""
|
| 81 |
+
|
| 82 |
+
except Exception as e:
|
| 83 |
+
error_msg = f"Error during chat: {str(e)}"
|
| 84 |
+
self.chat_history.append([message, error_msg])
|
| 85 |
+
return self.chat_history, error_msg
|
| 86 |
+
|
| 87 |
+
async def reset_agent(self):
|
| 88 |
+
"""Reset the agent's conversation history"""
|
| 89 |
+
if self.agent:
|
| 90 |
+
self.agent.reset()
|
| 91 |
+
self.chat_history = []
|
| 92 |
+
return [], "Agent conversation history reset successfully!"
|
| 93 |
+
else:
|
| 94 |
+
return [], "No agent to reset. Please initialize the agent first."
|
| 95 |
+
|
| 96 |
+
async def disconnect_agent(self):
|
| 97 |
+
"""Disconnect from MCP servers"""
|
| 98 |
+
if self.agent:
|
| 99 |
+
try:
|
| 100 |
+
await self.agent.disconnect()
|
| 101 |
+
except Exception as e:
|
| 102 |
+
print(f"Error during disconnect: {e}")
|
| 103 |
+
finally:
|
| 104 |
+
self.agent = None
|
| 105 |
+
self.chat_history = []
|
| 106 |
+
return [], "Agent disconnected successfully!"
|
| 107 |
+
|
| 108 |
+
async def cleanup(self):
|
| 109 |
+
"""Clean up resources"""
|
| 110 |
+
if self.agent:
|
| 111 |
+
await self.disconnect_agent()
|
| 112 |
+
if self._loop and not self._loop.is_closed():
|
| 113 |
+
self._loop.close()
|
| 114 |
+
|
| 115 |
+
# Create the app instance
|
| 116 |
+
app_instance = GradioMCPApp()
|
| 117 |
+
|
| 118 |
+
def run_async_safely(coro, *args):
|
| 119 |
+
"""Safely run async function with proper error handling"""
|
| 120 |
+
loop = app_instance.get_or_create_loop()
|
| 121 |
+
try:
|
| 122 |
+
return loop.run_until_complete(coro(*args))
|
| 123 |
+
except Exception as e:
|
| 124 |
+
print(f"Error in async operation: {e}")
|
| 125 |
+
return None, f"Error: {str(e)}"
|
| 126 |
+
|
| 127 |
+
# Define async wrapper functions for Gradio
|
| 128 |
+
def initialize_agent_wrapper(openai_api_key, *server_configs):
|
| 129 |
+
success, message = run_async_safely(
|
| 130 |
+
app_instance.initialize_agent,
|
| 131 |
+
openai_api_key, *server_configs
|
| 132 |
+
)
|
| 133 |
+
if success is None:
|
| 134 |
+
return gr.update(visible=False), gr.update(visible=True), message
|
| 135 |
+
return gr.update(visible=success), gr.update(visible=not success), message
|
| 136 |
+
|
| 137 |
+
def chat_wrapper(message):
|
| 138 |
+
chat_history, error_msg = run_async_safely(app_instance.chat_with_agent, message)
|
| 139 |
+
if chat_history is None:
|
| 140 |
+
return [], error_msg, ""
|
| 141 |
+
return chat_history, error_msg, "" # Clear input
|
| 142 |
+
|
| 143 |
+
def reset_wrapper():
|
| 144 |
+
chat_history, message = run_async_safely(app_instance.reset_agent)
|
| 145 |
+
if chat_history is None:
|
| 146 |
+
return [], message
|
| 147 |
+
return chat_history, message
|
| 148 |
+
|
| 149 |
+
def disconnect_wrapper():
|
| 150 |
+
chat_history, message = run_async_safely(app_instance.disconnect_agent)
|
| 151 |
+
if chat_history is None:
|
| 152 |
+
return [], gr.update(visible=False), gr.update(visible=True), message
|
| 153 |
+
return chat_history, gr.update(visible=False), gr.update(visible=True), message
|
| 154 |
+
|
| 155 |
+
# Cleanup function for graceful shutdown
|
| 156 |
+
def cleanup_on_exit():
|
| 157 |
+
"""Cleanup function to run on exit"""
|
| 158 |
+
try:
|
| 159 |
+
loop = app_instance.get_or_create_loop()
|
| 160 |
+
if not loop.is_closed():
|
| 161 |
+
loop.run_until_complete(app_instance.cleanup())
|
| 162 |
+
except Exception as e:
|
| 163 |
+
print(f"Error during cleanup: {e}")
|
| 164 |
+
|
| 165 |
+
# Register cleanup function
|
| 166 |
+
atexit.register(cleanup_on_exit)
|
| 167 |
+
|
| 168 |
+
# Handle SIGINT (Ctrl+C) gracefully
|
| 169 |
+
def signal_handler(signum, frame):
|
| 170 |
+
print("\nReceived interrupt signal. Cleaning up...")
|
| 171 |
+
cleanup_on_exit()
|
| 172 |
+
sys.exit(0)
|
| 173 |
+
|
| 174 |
+
signal.signal(signal.SIGINT, signal_handler)
|
| 175 |
+
|
| 176 |
+
# Server management functions
|
| 177 |
+
def add_server(current_count):
|
| 178 |
+
"""Show the next server configuration"""
|
| 179 |
+
new_count = min(current_count + 1, 3) # Max 3 servers
|
| 180 |
+
return (
|
| 181 |
+
new_count,
|
| 182 |
+
gr.update(visible=new_count >= 2), # server2_group
|
| 183 |
+
gr.update(visible=new_count >= 3), # server3_group
|
| 184 |
+
gr.update(interactive=new_count < 3), # add_server_btn
|
| 185 |
+
gr.update(interactive=new_count > 1) # remove_server_btn
|
| 186 |
+
)
|
| 187 |
+
|
| 188 |
+
def remove_server(current_count):
|
| 189 |
+
"""Hide the last server configuration"""
|
| 190 |
+
new_count = max(current_count - 1, 1) # Min 1 server
|
| 191 |
+
return (
|
| 192 |
+
new_count,
|
| 193 |
+
gr.update(visible=new_count >= 2), # server2_group
|
| 194 |
+
gr.update(visible=new_count >= 3), # server3_group
|
| 195 |
+
gr.update(interactive=new_count < 3), # add_server_btn
|
| 196 |
+
gr.update(interactive=new_count > 1) # remove_server_btn
|
| 197 |
+
)
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
# Create the Gradio interface
|
| 202 |
+
with gr.Blocks(title="MCP Agent Chat", theme=gr.themes.Soft()) as demo:
|
| 203 |
+
with gr.Row():
|
| 204 |
+
|
| 205 |
+
gr.Markdown("# MCP Agent Chat Interface")
|
| 206 |
+
gr.HTML("") # Balance spacing
|
| 207 |
+
|
| 208 |
+
|
| 209 |
+
|
| 210 |
+
with gr.Sidebar():
|
| 211 |
+
# Sidebar for configuration
|
| 212 |
+
sidebar_column = gr.Column(scale=1, min_width=350)
|
| 213 |
+
with sidebar_column:
|
| 214 |
+
gr.Markdown("## 🔧 Configuration")
|
| 215 |
+
|
| 216 |
+
openai_key = gr.Textbox(
|
| 217 |
+
label="OpenAI API Key",
|
| 218 |
+
type="password",
|
| 219 |
+
placeholder="sk-...",
|
| 220 |
+
info="Your OpenAI API key for the language model"
|
| 221 |
+
)
|
| 222 |
+
|
| 223 |
+
gr.Markdown("### MCP Servers Setup")
|
| 224 |
+
gr.Markdown("Configure your MCP server connections (leave all URLs empty to run without MCP servers)")
|
| 225 |
+
|
| 226 |
+
# Container for dynamic server configurations
|
| 227 |
+
servers_container = gr.Column()
|
| 228 |
+
|
| 229 |
+
# Initial server configuration
|
| 230 |
+
with servers_container:
|
| 231 |
+
# Server 1 (always present)
|
| 232 |
+
with gr.Tab("Url based servers"):
|
| 233 |
+
with gr.Group():
|
| 234 |
+
gr.Markdown("#### Server 1")
|
| 235 |
+
server1_url = gr.Textbox(
|
| 236 |
+
label="Server URL",
|
| 237 |
+
placeholder="http://localhost:8000",
|
| 238 |
+
info="The URL of your MCP server"
|
| 239 |
+
)
|
| 240 |
+
server1_name = gr.Textbox(
|
| 241 |
+
label="Server Name",
|
| 242 |
+
placeholder="server_1",
|
| 243 |
+
info="A friendly name for your MCP server"
|
| 244 |
+
)
|
| 245 |
+
server1_type = gr.Dropdown(
|
| 246 |
+
label="Server Type",
|
| 247 |
+
choices=["http", "SSE"],
|
| 248 |
+
value="http",
|
| 249 |
+
info="The type of MCP server connection"
|
| 250 |
+
)
|
| 251 |
+
server1_headers = gr.Textbox(
|
| 252 |
+
label="Headers (Optional)",
|
| 253 |
+
type="password",
|
| 254 |
+
placeholder="Leave empty if not required",
|
| 255 |
+
info="Headers for the MCP server (if required), example: {'Authorization': 'Bearer 1234567890'}"
|
| 256 |
+
)
|
| 257 |
+
|
| 258 |
+
# Server 2 (optional)
|
| 259 |
+
server2_group = gr.Group(visible=False)
|
| 260 |
+
with server2_group:
|
| 261 |
+
gr.Markdown("#### Server 2")
|
| 262 |
+
server2_url = gr.Textbox(
|
| 263 |
+
label="Server URL",
|
| 264 |
+
placeholder="http://localhost:8001",
|
| 265 |
+
info="The URL of your MCP server"
|
| 266 |
+
)
|
| 267 |
+
server2_name = gr.Textbox(
|
| 268 |
+
label="Server Name",
|
| 269 |
+
placeholder="server_2",
|
| 270 |
+
info="A friendly name for your MCP server"
|
| 271 |
+
)
|
| 272 |
+
server2_type = gr.Dropdown(
|
| 273 |
+
label="Server Type",
|
| 274 |
+
choices=["http", "SSE"],
|
| 275 |
+
value="http",
|
| 276 |
+
info="The type of MCP server connection"
|
| 277 |
+
)
|
| 278 |
+
server2_headers = gr.Textbox(
|
| 279 |
+
label="Headers (Optional)",
|
| 280 |
+
type="password",
|
| 281 |
+
placeholder="Leave empty if not required",
|
| 282 |
+
info="Headers for the MCP server (if required), example: {'Authorization': 'Bearer 1234567890'}"
|
| 283 |
+
)
|
| 284 |
+
|
| 285 |
+
# Server 3 (optional)
|
| 286 |
+
server3_group = gr.Group(visible=False)
|
| 287 |
+
with server3_group:
|
| 288 |
+
gr.Markdown("#### Server 3")
|
| 289 |
+
server3_url = gr.Textbox(
|
| 290 |
+
label="Server URL",
|
| 291 |
+
placeholder="http://localhost:8002",
|
| 292 |
+
info="The URL of your MCP server"
|
| 293 |
+
)
|
| 294 |
+
server3_name = gr.Textbox(
|
| 295 |
+
label="Server Name",
|
| 296 |
+
placeholder="server_3",
|
| 297 |
+
info="A friendly name for your MCP server"
|
| 298 |
+
)
|
| 299 |
+
server3_type = gr.Dropdown(
|
| 300 |
+
label="Server Type",
|
| 301 |
+
choices=["http", "SSE"],
|
| 302 |
+
value="http",
|
| 303 |
+
info="The type of MCP server connection"
|
| 304 |
+
)
|
| 305 |
+
server3_headers = gr.Textbox(
|
| 306 |
+
label="Headers (Optional)",
|
| 307 |
+
type="password",
|
| 308 |
+
placeholder="Leave empty if not required",
|
| 309 |
+
info="Headers for the MCP server (if required), example: {'Authorization': 'Bearer 1234567890'}"
|
| 310 |
+
)
|
| 311 |
+
with gr.Tab("Stdio based servers"):
|
| 312 |
+
with gr.Group():
|
| 313 |
+
gr.Markdown("#### Server 1")
|
| 314 |
+
server1_command = gr.Textbox(
|
| 315 |
+
label="Command",
|
| 316 |
+
placeholder="npx",
|
| 317 |
+
info="The command to use to run the MCP server, docker, npm, python, etc."
|
| 318 |
+
)
|
| 319 |
+
server1_args = gr.Textbox(
|
| 320 |
+
label="Arguments",
|
| 321 |
+
placeholder="['-y', '@modelcontextprotocol/server-memory']",
|
| 322 |
+
info="The arguments to use to run the MCP server"
|
| 323 |
+
)
|
| 324 |
+
server1_name = gr.Textbox(
|
| 325 |
+
label="Server Name",
|
| 326 |
+
placeholder="server_1",
|
| 327 |
+
info="A friendly name for your MCP server"
|
| 328 |
+
)
|
| 329 |
+
# Server management buttons
|
| 330 |
+
with gr.Row():
|
| 331 |
+
add_server_btn = gr.Button("+ Add Server", variant="secondary", size="sm")
|
| 332 |
+
remove_server_btn = gr.Button("- Remove Server", variant="secondary", size="sm", interactive=False)
|
| 333 |
+
|
| 334 |
+
# Track current server count
|
| 335 |
+
server_count_state = gr.State(1)
|
| 336 |
+
|
| 337 |
+
init_btn = gr.Button("Initialize Agent", variant="primary", size="lg")
|
| 338 |
+
init_status = gr.Textbox(label="Status", interactive=False, max_lines=3)
|
| 339 |
+
|
| 340 |
+
# Main chat area
|
| 341 |
+
chat_column = gr.Column(scale=2)
|
| 342 |
+
with chat_column:
|
| 343 |
+
with gr.Row():
|
| 344 |
+
gr.Markdown("## 💬 Chat with your MCP Agent")
|
| 345 |
+
config_status = gr.Markdown("", visible=False) # Status when sidebar is collapsed
|
| 346 |
+
|
| 347 |
+
chat_interface = gr.Column(visible=False)
|
| 348 |
+
with chat_interface:
|
| 349 |
+
chatbot = gr.Chatbot(
|
| 350 |
+
label="Conversation",
|
| 351 |
+
height=500,
|
| 352 |
+
show_copy_button=True,
|
| 353 |
+
avatar_images=("👤", "🤖")
|
| 354 |
+
)
|
| 355 |
+
|
| 356 |
+
with gr.Row():
|
| 357 |
+
msg = gr.Textbox(
|
| 358 |
+
label="Message",
|
| 359 |
+
placeholder="Type your message here...",
|
| 360 |
+
scale=4,
|
| 361 |
+
lines=2
|
| 362 |
+
)
|
| 363 |
+
send_btn = gr.Button("Send", variant="primary", scale=1)
|
| 364 |
+
|
| 365 |
+
error_display = gr.Textbox(
|
| 366 |
+
label="Error Messages",
|
| 367 |
+
visible=False,
|
| 368 |
+
interactive=False
|
| 369 |
+
)
|
| 370 |
+
|
| 371 |
+
with gr.Row():
|
| 372 |
+
reset_btn = gr.Button("Reset Conversation", variant="secondary")
|
| 373 |
+
disconnect_btn = gr.Button("Disconnect Agent", variant="secondary")
|
| 374 |
+
|
| 375 |
+
# Placeholder when agent is not initialized
|
| 376 |
+
placeholder = gr.Markdown(
|
| 377 |
+
"### 👋 Welcome!\n\nPlease configure and initialize your MCP Agent using the sidebar to start chatting.",
|
| 378 |
+
visible=True
|
| 379 |
+
)
|
| 380 |
+
|
| 381 |
+
|
| 382 |
+
# Server management event handlers
|
| 383 |
+
add_server_btn.click(
|
| 384 |
+
fn=add_server,
|
| 385 |
+
inputs=[server_count_state],
|
| 386 |
+
outputs=[server_count_state, server2_group, server3_group, add_server_btn, remove_server_btn]
|
| 387 |
+
)
|
| 388 |
+
|
| 389 |
+
remove_server_btn.click(
|
| 390 |
+
fn=remove_server,
|
| 391 |
+
inputs=[server_count_state],
|
| 392 |
+
outputs=[server_count_state, server2_group, server3_group, add_server_btn, remove_server_btn]
|
| 393 |
+
)
|
| 394 |
+
|
| 395 |
+
# Event handlers
|
| 396 |
+
init_btn.click(
|
| 397 |
+
fn=initialize_agent_wrapper,
|
| 398 |
+
inputs=[
|
| 399 |
+
openai_key,
|
| 400 |
+
server1_url, server1_name, server1_type, server1_headers,
|
| 401 |
+
server2_url, server2_name, server2_type, server2_headers,
|
| 402 |
+
server3_url, server3_name, server3_type, server3_headers,
|
| 403 |
+
server1_command, server1_args, server1_name
|
| 404 |
+
],
|
| 405 |
+
outputs=[chat_interface, placeholder, init_status]
|
| 406 |
+
)
|
| 407 |
+
|
| 408 |
+
# Chat functionality
|
| 409 |
+
def handle_chat(message):
|
| 410 |
+
if not message or not message.strip():
|
| 411 |
+
return app_instance.chat_history, "Please provide a message.", ""
|
| 412 |
+
return chat_wrapper(message)
|
| 413 |
+
|
| 414 |
+
send_btn.click(
|
| 415 |
+
fn=handle_chat,
|
| 416 |
+
inputs=[msg],
|
| 417 |
+
outputs=[chatbot, error_display, msg]
|
| 418 |
+
).then(
|
| 419 |
+
lambda error: gr.update(visible=bool(error)),
|
| 420 |
+
inputs=[error_display],
|
| 421 |
+
outputs=[error_display]
|
| 422 |
+
)
|
| 423 |
+
|
| 424 |
+
msg.submit(
|
| 425 |
+
fn=handle_chat,
|
| 426 |
+
inputs=[msg],
|
| 427 |
+
outputs=[chatbot, error_display, msg]
|
| 428 |
+
).then(
|
| 429 |
+
lambda error: gr.update(visible=bool(error)),
|
| 430 |
+
inputs=[error_display],
|
| 431 |
+
outputs=[error_display]
|
| 432 |
+
)
|
| 433 |
+
|
| 434 |
+
reset_btn.click(
|
| 435 |
+
fn=reset_wrapper,
|
| 436 |
+
outputs=[chatbot, error_display]
|
| 437 |
+
).then(
|
| 438 |
+
lambda error: gr.update(visible=bool(error)),
|
| 439 |
+
inputs=[error_display],
|
| 440 |
+
outputs=[error_display]
|
| 441 |
+
)
|
| 442 |
+
|
| 443 |
+
disconnect_btn.click(
|
| 444 |
+
fn=disconnect_wrapper,
|
| 445 |
+
outputs=[chatbot, chat_interface, placeholder, init_status]
|
| 446 |
+
)
|
| 447 |
+
|
| 448 |
+
if __name__ == "__main__":
|
| 449 |
+
demo.launch(
|
| 450 |
+
server_name="127.0.0.1",
|
| 451 |
+
server_port=7860,
|
| 452 |
+
share=False,
|
| 453 |
+
show_error=True
|
| 454 |
+
)
|
src/mcp_agent/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
#empty file to make the directory a package
|
src/mcp_agent/agent.py
ADDED
|
@@ -0,0 +1,193 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from pydantic_ai import Agent, RunContext, format_as_xml
|
| 4 |
+
|
| 5 |
+
from pydantic_ai.models.openai import OpenAIModel
|
| 6 |
+
from pydantic_ai.providers.openai import OpenAIProvider
|
| 7 |
+
from pydantic_ai.mcp import MCPServerStreamableHTTP, MCPServerSSE, MCPServerStdio
|
| 8 |
+
from dataclasses import dataclass
|
| 9 |
+
from datetime import datetime
|
| 10 |
+
from pydantic import Field
|
| 11 |
+
import json
|
| 12 |
+
from pydantic_ai.messages import (
|
| 13 |
+
ModelMessage,
|
| 14 |
+
FinalResultEvent,
|
| 15 |
+
FunctionToolCallEvent,
|
| 16 |
+
FunctionToolResultEvent,
|
| 17 |
+
PartDeltaEvent,
|
| 18 |
+
PartStartEvent,
|
| 19 |
+
TextPartDelta,
|
| 20 |
+
ToolCallPartDelta,
|
| 21 |
+
)
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
@dataclass
|
| 27 |
+
class Api_keys:
|
| 28 |
+
api_keys: dict
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
@dataclass
|
| 32 |
+
class Message_state:
|
| 33 |
+
messages: list[ModelMessage]
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
class MCP_Agent:
|
| 39 |
+
def __init__(self, api_keys:dict, mpc_server_urls:list = [], mpc_stdio_commands:list = []):
|
| 40 |
+
"""
|
| 41 |
+
Args:
|
| 42 |
+
|
| 43 |
+
api_keys (dict): The API keys to use as a dictionary
|
| 44 |
+
mpc_server_urls (list): The list of dicts containing the url and the name
|
| 45 |
+
of the mpc server and the type of connection, and the bearer token if necessary
|
| 46 |
+
example:
|
| 47 |
+
[
|
| 48 |
+
{
|
| 49 |
+
'url': 'http://localhost:8000',
|
| 50 |
+
'name': 'mcp_server_1',
|
| 51 |
+
'type': 'http','SSE'
|
| 52 |
+
'headers': {'Authorization': 'Bearer', '1234567890'} #optional or None
|
| 53 |
+
}
|
| 54 |
+
]
|
| 55 |
+
mpc_stdio_commands (list): The list of commands to use with the stdio mpc server
|
| 56 |
+
example:
|
| 57 |
+
[
|
| 58 |
+
{
|
| 59 |
+
'name': 'memory',
|
| 60 |
+
'command': 'npx', 'docker', 'npm', 'python'
|
| 61 |
+
'args': ['-y', '@modelcontextprotocol/server-memory']
|
| 62 |
+
}
|
| 63 |
+
]
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
"""
|
| 67 |
+
|
| 68 |
+
self.api_keys=Api_keys(api_keys=api_keys)
|
| 69 |
+
|
| 70 |
+
self.mpc_server_urls = mpc_server_urls
|
| 71 |
+
self.mpc_stdio_commands = mpc_stdio_commands
|
| 72 |
+
# tools
|
| 73 |
+
self.llms={'mcp_llm':OpenAIModel('gpt-4.1-mini',provider=OpenAIProvider(api_key=self.api_keys.api_keys['openai_api_key']))}
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
#mpc servers
|
| 77 |
+
self.mpc_servers=[]
|
| 78 |
+
for mpc_server_url in self.mpc_server_urls:
|
| 79 |
+
if mpc_server_url['type'] == 'http':
|
| 80 |
+
if mpc_server_url['headers'] is not None:
|
| 81 |
+
self.mpc_servers.append(MCPServerStreamableHTTP(url=mpc_server_url['url'], headers=mpc_server_url['headers']))
|
| 82 |
+
else:
|
| 83 |
+
self.mpc_servers.append(MCPServerStreamableHTTP(mpc_server_url['url']))
|
| 84 |
+
elif mpc_server_url['type'] == 'SSE':
|
| 85 |
+
if mpc_server_url['headers'] is not None:
|
| 86 |
+
self.mpc_servers.append(MCPServerSSE(url=mpc_server_url['url'], headers=mpc_server_url['headers']))
|
| 87 |
+
else:
|
| 88 |
+
self.mpc_servers.append(MCPServerSSE(mpc_server_url['url']))
|
| 89 |
+
for mpc_stdio_command in self.mpc_stdio_commands:
|
| 90 |
+
self.mpc_servers.append(MCPServerStdio(command=mpc_stdio_command['command'], args=mpc_stdio_command['args']))
|
| 91 |
+
|
| 92 |
+
self._mcp_context_manager = None
|
| 93 |
+
self._is_connected = False
|
| 94 |
+
#agent
|
| 95 |
+
|
| 96 |
+
self.agent=Agent(self.llms['mcp_llm'],tools=[], mcp_servers=self.mpc_servers, instructions="you are a helpful assistant that can help with a wide range of tasks,\
|
| 97 |
+
you have the current time and the user query, you can use the tools provided to you if necessary to help the user with their queries, ask how you can help the user, sometimes the user will ask you not to use the tools, in this case you should not use the tools")
|
| 98 |
+
self.memory=Message_state(messages=[])
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
async def connect(self):
|
| 102 |
+
"""Establish persistent connection to MCP server"""
|
| 103 |
+
if not self._is_connected:
|
| 104 |
+
self._mcp_context_manager = self.agent.run_mcp_servers()
|
| 105 |
+
await self._mcp_context_manager.__aenter__()
|
| 106 |
+
self._is_connected = True
|
| 107 |
+
return "Connected to MCP server"
|
| 108 |
+
|
| 109 |
+
async def disconnect(self):
|
| 110 |
+
"""Close the MCP server connection"""
|
| 111 |
+
if self._is_connected and self._mcp_context_manager:
|
| 112 |
+
await self._mcp_context_manager.__aexit__(None, None, None)
|
| 113 |
+
self._is_connected = False
|
| 114 |
+
self._mcp_context_manager = None
|
| 115 |
+
return "Disconnected from MCP server"
|
| 116 |
+
async def chat(self, query:any):
|
| 117 |
+
"""
|
| 118 |
+
# Chat Function Documentation
|
| 119 |
+
|
| 120 |
+
This function enables interaction with the user through various types of input.
|
| 121 |
+
|
| 122 |
+
## Parameters
|
| 123 |
+
|
| 124 |
+
- `query`: The input to process. Can be one of the following types:
|
| 125 |
+
- String: Direct text input passed to the agent
|
| 126 |
+
- Binary content: Special format for media files (see below)
|
| 127 |
+
|
| 128 |
+
## Binary Content Types
|
| 129 |
+
|
| 130 |
+
The function supports different types of media through `BinaryContent` objects:
|
| 131 |
+
|
| 132 |
+
### Audio
|
| 133 |
+
```python
|
| 134 |
+
agent.chat([
|
| 135 |
+
'optional string message',
|
| 136 |
+
BinaryContent(data=audio, media_type='audio/wav')
|
| 137 |
+
])
|
| 138 |
+
```
|
| 139 |
+
|
| 140 |
+
### PDF Files
|
| 141 |
+
```python
|
| 142 |
+
agent.chat([
|
| 143 |
+
'optional string message',
|
| 144 |
+
BinaryContent(data=pdf_path.read_bytes(), media_type='application/pdf')
|
| 145 |
+
])
|
| 146 |
+
```
|
| 147 |
+
|
| 148 |
+
### Images
|
| 149 |
+
```python
|
| 150 |
+
agent.chat([
|
| 151 |
+
'optional string message',
|
| 152 |
+
BinaryContent(data=image_response.content, media_type='image/png')
|
| 153 |
+
])
|
| 154 |
+
```
|
| 155 |
+
|
| 156 |
+
## Returns
|
| 157 |
+
|
| 158 |
+
- `Agent_output`: as a pydantic object, the ui_version and voice_version are the two fields of the object
|
| 159 |
+
|
| 160 |
+
## Extra Notes
|
| 161 |
+
The message_history of Agent can be accessed using the following code:
|
| 162 |
+
```python
|
| 163 |
+
|
| 164 |
+
agent.memory.messages
|
| 165 |
+
```
|
| 166 |
+
"""
|
| 167 |
+
if not self._is_connected:
|
| 168 |
+
await self.connect()
|
| 169 |
+
|
| 170 |
+
result=await self.agent.run(query, message_history=self.memory.messages)
|
| 171 |
+
self.memory.messages=result.all_messages()
|
| 172 |
+
return result.output
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
def reset(self):
|
| 177 |
+
"""
|
| 178 |
+
Resets the Agent to its initial state.
|
| 179 |
+
|
| 180 |
+
Returns:
|
| 181 |
+
str: A confirmation message indicating that the agent has been reset.
|
| 182 |
+
"""
|
| 183 |
+
self.memory.messages=[]
|
| 184 |
+
return f'Agent has been reset'
|
| 185 |
+
|
| 186 |
+
async def __aenter__(self):
|
| 187 |
+
"""Async context manager entry"""
|
| 188 |
+
await self.connect()
|
| 189 |
+
return self
|
| 190 |
+
|
| 191 |
+
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
| 192 |
+
"""Async context manager exit"""
|
| 193 |
+
await self.disconnect()
|
uv.lock
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|