akseljoonas HF Staff commited on
Commit
ecacd30
·
1 Parent(s): 0127ea2

a working mcp integration

Browse files
agent/README.md CHANGED
@@ -19,6 +19,5 @@ context_manager/
19
  └── manager.py # Message history management
20
 
21
  config.py # Config with model_name + tools
22
- tools/ # Tool implementations (todo)
23
  utils/ # Logging, etc
24
  ```
 
19
  └── manager.py # Message history management
20
 
21
  config.py # Config with model_name + tools
 
22
  utils/ # Logging, etc
23
  ```
agent/config.py CHANGED
@@ -1,14 +1,18 @@
 
 
 
 
 
 
 
 
 
 
1
  from litellm import Tool
2
  from pydantic import BaseModel
3
 
4
-
5
- class MCPServerConfig(BaseModel):
6
- """Configuration for an MCP server"""
7
-
8
- name: str
9
- command: str
10
- args: list[str] = []
11
- env: dict[str, str] | None = None
12
 
13
 
14
  class Config(BaseModel):
@@ -17,10 +21,58 @@ class Config(BaseModel):
17
  model_name: str
18
  tools: list[Tool] = []
19
  system_prompt_path: str = ""
20
- mcp_servers: list[MCPServerConfig] = []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
 
22
 
23
  def load_config(config_path: str = "config.json") -> Config:
24
- """Load configuration from file"""
 
 
 
 
 
 
 
 
25
  with open(config_path, "r") as f:
26
- return Config.model_validate_json(f.read())
 
 
 
 
1
+ import json
2
+ import os
3
+ import re
4
+ from typing import Any, Union
5
+
6
+ from dotenv import load_dotenv
7
+ from fastmcp.mcp_config import (
8
+ RemoteMCPServer,
9
+ StdioMCPServer,
10
+ )
11
  from litellm import Tool
12
  from pydantic import BaseModel
13
 
14
+ # These two are the canonical server config types for MCP servers.
15
+ MCPServerConfig = Union[StdioMCPServer, RemoteMCPServer]
 
 
 
 
 
 
16
 
17
 
18
  class Config(BaseModel):
 
21
  model_name: str
22
  tools: list[Tool] = []
23
  system_prompt_path: str = ""
24
+ mcpServers: dict[str, MCPServerConfig] = {}
25
+
26
+
27
+ def substitute_env_vars(obj: Any) -> Any:
28
+ """
29
+ Recursively substitute environment variables in any data structure.
30
+
31
+ Supports ${VAR_NAME} syntax for required variables and ${VAR_NAME:-default} for optional.
32
+ """
33
+ if isinstance(obj, str):
34
+ pattern = r"\$\{([^}:]+)(?::(-)?([^}]*))?\}"
35
+
36
+ def replacer(match):
37
+ var_name = match.group(1)
38
+ has_default = match.group(2) is not None
39
+ default_value = match.group(3) if has_default else None
40
+
41
+ env_value = os.environ.get(var_name)
42
+
43
+ if env_value is not None:
44
+ return env_value
45
+ elif has_default:
46
+ return default_value or ""
47
+ else:
48
+ raise ValueError(
49
+ f"Environment variable '{var_name}' is not set. "
50
+ f"Add it to your .env file."
51
+ )
52
+
53
+ return re.sub(pattern, replacer, obj)
54
+
55
+ elif isinstance(obj, dict):
56
+ return {key: substitute_env_vars(value) for key, value in obj.items()}
57
+
58
+ elif isinstance(obj, list):
59
+ return [substitute_env_vars(item) for item in obj]
60
+
61
+ return obj
62
 
63
 
64
  def load_config(config_path: str = "config.json") -> Config:
65
+ """
66
+ Load configuration with environment variable substitution.
67
+
68
+ Use ${VAR_NAME} in your JSON for any secret.
69
+ Automatically loads from .env file.
70
+ """
71
+ # Load environment variables from .env file
72
+ load_dotenv()
73
+
74
  with open(config_path, "r") as f:
75
+ raw_config = json.load(f)
76
+
77
+ config_with_env = substitute_env_vars(raw_config)
78
+ return Config.model_validate(config_with_env)
agent/config_mcp_example.json CHANGED
@@ -2,12 +2,20 @@
2
  "model_name": "anthropic/claude-sonnet-4-5-20250929",
3
  "tools": [],
4
  "system_prompt_path": "",
5
- "mcp_servers": [
6
- {
7
- "name": "weather",
8
- "command": "python",
9
- "args": ["path/to/your/weather_server.py"],
10
- "env": null
 
 
 
 
 
 
 
 
11
  }
12
- ]
13
- }
 
2
  "model_name": "anthropic/claude-sonnet-4-5-20250929",
3
  "tools": [],
4
  "system_prompt_path": "",
5
+ "mcpServers": {
6
+ "hf-mcp-server": {
7
+ "transport": "http",
8
+ "url": "https://huggingface.co/mcp?login",
9
+ "headers": {
10
+ "Authorization": "Bearer ${HF_TOKEN}"
11
+ }
12
+ },
13
+ "playwright": {
14
+ "transport": "stdio",
15
+ "command": "npx",
16
+ "args": [
17
+ "@playwright/mcp@latest"
18
+ ]
19
  }
20
+ }
21
+ }
agent/core/__init__.py CHANGED
@@ -3,12 +3,9 @@ Core agent implementation
3
  Contains the main agent logic, decision-making, and orchestration
4
  """
5
 
6
- from agent.core.mcp_client import McpClient, McpConnectionManager
7
  from agent.core.tools import ToolRouter, ToolSpec, create_builtin_tools
8
 
9
  __all__ = [
10
- "McpClient",
11
- "McpConnectionManager",
12
  "ToolRouter",
13
  "ToolSpec",
14
  "create_builtin_tools",
 
3
  Contains the main agent logic, decision-making, and orchestration
4
  """
5
 
 
6
  from agent.core.tools import ToolRouter, ToolSpec, create_builtin_tools
7
 
8
  __all__ = [
 
 
9
  "ToolRouter",
10
  "ToolSpec",
11
  "create_builtin_tools",
agent/core/agent_loop.py CHANGED
@@ -9,6 +9,7 @@ from litellm import ChatCompletionMessageToolCall, Message, ModelResponse, acomp
9
 
10
  from agent.config import Config
11
  from agent.core.session import Event, OpType, Session
 
12
 
13
  ToolCall = ChatCompletionMessageToolCall
14
 
@@ -54,13 +55,18 @@ class Handlers:
54
  assistant_msg = Message(role="assistant", content=content)
55
  session.context_manager.add_message(assistant_msg)
56
  await session.send_event(
57
- Event(event_type="assistant_message", data={"content": content})
 
 
 
58
  )
59
  break
60
 
61
  # Add assistant message with tool calls to history
62
  # LiteLLM will format this correctly for the provider
63
- assistant_msg = Message(role="assistant", content=content, tool_calls=tool_calls)
 
 
64
  session.context_manager.add_message(assistant_msg)
65
 
66
  if content:
@@ -80,7 +86,7 @@ class Handlers:
80
  )
81
  )
82
 
83
- output, success = await session.tool_router.execute_tool(
84
  tool_name, tool_args
85
  )
86
 
@@ -98,8 +104,7 @@ class Handlers:
98
  event_type="tool_output",
99
  data={
100
  "tool": tool_name,
101
- "output": output[:200]
102
- + ("..." if len(output) > 200 else ""),
103
  "success": success,
104
  },
105
  )
@@ -108,8 +113,13 @@ class Handlers:
108
  iteration += 1
109
 
110
  except Exception as e:
 
 
111
  await session.send_event(
112
- Event(event_type="error", data={"error": str(e)})
 
 
 
113
  )
114
  break
115
 
@@ -195,58 +205,26 @@ async def process_submission(session: Session, submission) -> bool:
195
  async def submission_loop(
196
  submission_queue: asyncio.Queue,
197
  event_queue: asyncio.Queue,
198
- tool_router=None,
199
  config: Config | None = None,
 
200
  ) -> None:
201
  """
202
  Main agent loop - processes submissions and dispatches to handlers.
203
  This is the core of the agent (like submission_loop in codex.rs:1259-1340)
204
  """
205
- # Import here to avoid circular imports
206
- from agent.core.mcp_client import McpConnectionManager
207
- from agent.core.tools import ToolRouter, create_builtin_tools
208
-
209
- # Initialize MCP and tools
210
- if tool_router is None:
211
- mcp_manager = McpConnectionManager()
212
-
213
- # Add MCP servers from config
214
- if config and config.mcp_servers:
215
- print("🔌 Initializing MCP connections...")
216
- for server_config in config.mcp_servers:
217
- try:
218
- await mcp_manager.add_server(
219
- server_name=server_config.name,
220
- command=server_config.command,
221
- args=server_config.args,
222
- env=server_config.env,
223
- )
224
- except Exception as e:
225
- print(
226
- f"⚠️ Failed to connect to MCP server {server_config.name}: {e}"
227
- )
228
-
229
- # Create tool router
230
- tool_router = ToolRouter(mcp_manager)
231
-
232
- # Register built-in tools
233
- for tool in create_builtin_tools():
234
- tool_router.register_tool(tool)
235
-
236
- # Register MCP tools
237
- tool_router.register_mcp_tools()
238
-
239
- print(f"📦 Registered {len(tool_router.tools)} tools:")
240
- for tool_name in tool_router.tools.keys():
241
- print(f" - {tool_name}")
242
 
243
  # Create session and assign tool router
244
  session = Session(event_queue, config=config)
245
  session.tool_router = tool_router
246
  print("🤖 Agent loop started")
247
 
248
- try:
249
- # Main processing loop
 
 
 
 
 
250
  while session.is_running:
251
  submission = await submission_queue.get()
252
 
@@ -261,9 +239,5 @@ async def submission_loop(
261
  await session.send_event(
262
  Event(event_type="error", data={"error": str(e)})
263
  )
264
- finally:
265
- # Cleanup MCP connections
266
- if hasattr(tool_router, "mcp_manager") and tool_router.mcp_manager:
267
- await tool_router.mcp_manager.shutdown_all()
268
 
269
  print("🛑 Agent loop exited")
 
9
 
10
  from agent.config import Config
11
  from agent.core.session import Event, OpType, Session
12
+ from agent.core.tools import ToolRouter
13
 
14
  ToolCall = ChatCompletionMessageToolCall
15
 
 
55
  assistant_msg = Message(role="assistant", content=content)
56
  session.context_manager.add_message(assistant_msg)
57
  await session.send_event(
58
+ Event(
59
+ event_type="assistant_message",
60
+ data={"content": content},
61
+ )
62
  )
63
  break
64
 
65
  # Add assistant message with tool calls to history
66
  # LiteLLM will format this correctly for the provider
67
+ assistant_msg = Message(
68
+ role="assistant", content=content, tool_calls=tool_calls
69
+ )
70
  session.context_manager.add_message(assistant_msg)
71
 
72
  if content:
 
86
  )
87
  )
88
 
89
+ output, success = await session.tool_router.call_tool(
90
  tool_name, tool_args
91
  )
92
 
 
104
  event_type="tool_output",
105
  data={
106
  "tool": tool_name,
107
+ "output": output,
 
108
  "success": success,
109
  },
110
  )
 
113
  iteration += 1
114
 
115
  except Exception as e:
116
+ import traceback
117
+
118
  await session.send_event(
119
+ Event(
120
+ event_type="error",
121
+ data={"error": str(e + "\n" + traceback.format_exc())},
122
+ )
123
  )
124
  break
125
 
 
205
  async def submission_loop(
206
  submission_queue: asyncio.Queue,
207
  event_queue: asyncio.Queue,
 
208
  config: Config | None = None,
209
+ tool_router: ToolRouter | None = None,
210
  ) -> None:
211
  """
212
  Main agent loop - processes submissions and dispatches to handlers.
213
  This is the core of the agent (like submission_loop in codex.rs:1259-1340)
214
  """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
215
 
216
  # Create session and assign tool router
217
  session = Session(event_queue, config=config)
218
  session.tool_router = tool_router
219
  print("🤖 Agent loop started")
220
 
221
+ # Main processing loop
222
+ async with tool_router:
223
+ # Emit ready event after initialization
224
+ await session.send_event(
225
+ Event(event_type="ready", data={"message": "Agent initialized"})
226
+ )
227
+
228
  while session.is_running:
229
  submission = await submission_queue.get()
230
 
 
239
  await session.send_event(
240
  Event(event_type="error", data={"error": str(e)})
241
  )
 
 
 
 
242
 
243
  print("🛑 Agent loop exited")
agent/core/mcp_client.py DELETED
@@ -1,164 +0,0 @@
1
- """
2
- MCP (Model Context Protocol) client integration for the agent
3
- Based on the official MCP SDK implementation
4
- """
5
-
6
- import os
7
- from contextlib import AsyncExitStack
8
- from typing import Any, Optional
9
-
10
- from mcp import ClientSession, StdioServerParameters, types
11
- from mcp.client.stdio import stdio_client
12
-
13
-
14
- class McpClient:
15
- """
16
- Client for connecting to MCP servers using the official MCP SDK.
17
- Based on codex-rs/core/src/mcp_connection_manager.rs
18
- """
19
-
20
- def __init__(
21
- self,
22
- server_name: str,
23
- command: str,
24
- args: list[str] | None = None,
25
- env: dict[str, str] | None = None,
26
- ):
27
- self.server_name = server_name
28
- self.command = command
29
- self.args = args or []
30
- self.env = env or {}
31
- self.session: Optional[ClientSession] = None
32
- self.tools: dict[str, dict[str, Any]] = {}
33
- self.exit_stack = AsyncExitStack()
34
-
35
- async def start(self) -> None:
36
- """Start the MCP server connection using official SDK"""
37
- # Merge environment variables
38
- full_env = {**dict(os.environ), **self.env} if self.env else None
39
-
40
- # Create server parameters
41
- server_params = StdioServerParameters(
42
- command=self.command,
43
- args=self.args,
44
- env=full_env,
45
- )
46
-
47
- # Connect using stdio_client
48
- read, write = await self.exit_stack.enter_async_context(
49
- stdio_client(server_params)
50
- )
51
-
52
- # Create session
53
- self.session = await self.exit_stack.enter_async_context(
54
- ClientSession(read, write)
55
- )
56
-
57
- # Initialize
58
- await self.session.initialize()
59
-
60
- # List available tools
61
- tools_result = await self.session.list_tools()
62
- for tool in tools_result.tools:
63
- qualified_name = f"mcp__{self.server_name}__{tool.name}"
64
- self.tools[qualified_name] = {
65
- "name": tool.name,
66
- "description": tool.description or "",
67
- "inputSchema": tool.inputSchema,
68
- }
69
-
70
- async def call_tool(
71
- self, tool_name: str, arguments: dict[str, Any]
72
- ) -> tuple[str, bool]:
73
- """Execute a tool on the MCP server"""
74
- if not self.session:
75
- return "Client not connected", False
76
-
77
- # Strip the mcp__servername__ prefix to get the actual tool name
78
- actual_tool_name = tool_name.split("__")[-1]
79
-
80
- try:
81
- result = await self.session.call_tool(actual_tool_name, arguments)
82
-
83
- # Extract text from content
84
- text_parts = []
85
- for content in result.content:
86
- if isinstance(content, types.TextContent):
87
- text_parts.append(content.text)
88
- elif isinstance(content, types.ImageContent):
89
- text_parts.append(f"[Image: {content.mimeType}]")
90
- elif isinstance(content, types.EmbeddedResource):
91
- text_parts.append(f"[Resource: {content.resource}]")
92
-
93
- output = "\n".join(text_parts) if text_parts else str(result.content)
94
- success = not result.isError
95
-
96
- return output, success
97
- except Exception as e:
98
- return f"Tool call failed: {str(e)}", False
99
-
100
- def get_tools(self) -> dict[str, dict[str, Any]]:
101
- """Get all available tools from this server"""
102
- return self.tools.copy()
103
-
104
- async def shutdown(self) -> None:
105
- """Shutdown the MCP server connection"""
106
- await self.exit_stack.aclose()
107
-
108
-
109
- class McpConnectionManager:
110
- """
111
- Manages multiple MCP server connections.
112
- Based on codex-rs/core/src/mcp_connection_manager.rs
113
- """
114
-
115
- def __init__(self):
116
- self.clients: dict[str, McpClient] = {}
117
-
118
- async def add_server(
119
- self,
120
- server_name: str,
121
- command: str,
122
- args: list[str] | None = None,
123
- env: dict[str, str] | None = None,
124
- ) -> bool:
125
- """Add and start an MCP server"""
126
- try:
127
- client = McpClient(server_name, command, args, env)
128
- await client.start()
129
- self.clients[server_name] = client
130
- print(
131
- f"✅ MCP server '{server_name}' connected with {len(client.tools)} tools"
132
- )
133
- return True
134
- except Exception as e:
135
- print(f"❌ Failed to start MCP server '{server_name}': {e}")
136
- return False
137
-
138
- def list_all_tools(self) -> dict[str, dict[str, Any]]:
139
- """Aggregate tools from all connected servers"""
140
- all_tools = {}
141
- for client in self.clients.values():
142
- all_tools.update(client.get_tools())
143
- return all_tools
144
-
145
- async def call_tool(
146
- self, tool_name: str, arguments: dict[str, Any]
147
- ) -> tuple[str, bool]:
148
- """Route tool call to the appropriate MCP server"""
149
- # Extract server name from qualified tool name: mcp__servername__toolname
150
- if tool_name.startswith("mcp__"):
151
- parts = tool_name.split("__")
152
- if len(parts) >= 3:
153
- server_name = parts[1]
154
- if server_name in self.clients:
155
- return await self.clients[server_name].call_tool(
156
- tool_name, arguments
157
- )
158
-
159
- return "Unknown MCP tool", False
160
-
161
- async def shutdown_all(self) -> None:
162
- """Shutdown all MCP servers"""
163
- for client in self.clients.values():
164
- await client.shutdown()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
agent/core/tools.py CHANGED
@@ -4,10 +4,67 @@ Provides ToolSpec and ToolRouter for managing both built-in and MCP tools
4
  """
5
 
6
  import subprocess
 
7
  from dataclasses import dataclass
8
  from typing import Any, Awaitable, Callable, Optional
9
 
10
- from agent.core.mcp_client import McpConnectionManager
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
 
13
  @dataclass
@@ -26,30 +83,34 @@ class ToolRouter:
26
  Based on codex-rs/core/src/tools/router.rs
27
  """
28
 
29
- def __init__(self, mcp_manager: Optional[McpConnectionManager] = None):
30
  self.tools: dict[str, ToolSpec] = {}
31
- self.mcp_manager = mcp_manager
32
-
33
- def register_tool(self, spec: ToolSpec) -> None:
34
- """Register a tool with its handler"""
35
- self.tools[spec.name] = spec
36
-
37
- def register_mcp_tools(self) -> None:
38
- """Register all MCP tools from the connection manager"""
39
- if not self.mcp_manager:
40
- return
41
-
42
- mcp_tools = self.mcp_manager.list_all_tools()
43
- for tool_name, tool_def in mcp_tools.items():
44
- spec = ToolSpec(
45
- name=tool_name,
46
- description=tool_def.get("description", ""),
47
- parameters=tool_def.get(
48
- "inputSchema", {"type": "object", "properties": {}}
49
- ),
50
- handler=None, # MCP tools use the manager
 
 
 
 
 
51
  )
52
- self.tools[tool_name] = spec
53
 
54
  def get_tool_specs_for_llm(self) -> list[dict[str, Any]]:
55
  """Get tool specifications in OpenAI format"""
@@ -67,24 +128,42 @@ class ToolRouter:
67
  )
68
  return specs
69
 
70
- async def execute_tool(
71
- self, tool_name: str, arguments: dict[str, Any]
72
- ) -> tuple[str, bool]:
73
- """Execute a tool by name"""
74
- if tool_name not in self.tools:
75
- return f"Unknown tool: {tool_name}", False
 
 
76
 
77
- tool = self.tools[tool_name]
 
 
 
78
 
79
- # MCP tool
80
- if tool_name.startswith("mcp__") and self.mcp_manager:
81
- return await self.mcp_manager.call_tool(tool_name, arguments)
 
 
82
 
83
- # Built-in tool with handler
84
- if tool.handler:
 
 
 
 
85
  return await tool.handler(arguments)
86
 
87
- return "Tool has no handler", False
 
 
 
 
 
 
 
88
 
89
 
90
  # ============================================================================
 
4
  """
5
 
6
  import subprocess
7
+ import warnings
8
  from dataclasses import dataclass
9
  from typing import Any, Awaitable, Callable, Optional
10
 
11
+ from fastmcp import Client
12
+ from mcp.types import EmbeddedResource, ImageContent, TextContent
13
+
14
+ from agent.config import MCPServerConfig
15
+
16
+ # Suppress aiohttp deprecation warning
17
+ warnings.filterwarnings(
18
+ "ignore", category=DeprecationWarning, module="aiohttp.connector"
19
+ )
20
+
21
+
22
+ def convert_mcp_content_to_string(content: list) -> str:
23
+ """
24
+ Convert MCP content blocks to a string format compatible with LLM messages.
25
+
26
+ Based on FastMCP documentation, content can be:
27
+ - TextContent: has .text field
28
+ - ImageContent: has .data and .mimeType fields
29
+ - EmbeddedResource: has .resource field with .text or .blob
30
+
31
+ Args:
32
+ content: List of MCP content blocks
33
+
34
+ Returns:
35
+ String representation of the content suitable for LLM consumption
36
+ """
37
+ if not content:
38
+ return ""
39
+
40
+ parts = []
41
+ for item in content:
42
+ if isinstance(item, TextContent):
43
+ # Extract text from TextContent blocks
44
+ parts.append(item.text)
45
+ elif isinstance(item, ImageContent):
46
+ # TODO: Handle images
47
+ # For images, include a description with MIME type
48
+ parts.append(f"[Image: {item.mimeType}]")
49
+ elif isinstance(item, EmbeddedResource):
50
+ # TODO: Handle embedded resources
51
+ # For embedded resources, try to extract text
52
+ resource = item.resource
53
+ if hasattr(resource, "text") and resource.text:
54
+ parts.append(resource.text)
55
+ elif hasattr(resource, "blob") and resource.blob:
56
+ parts.append(
57
+ f"[Binary data: {resource.mimeType if hasattr(resource, 'mimeType') else 'unknown'}]"
58
+ )
59
+ else:
60
+ parts.append(
61
+ f"[Resource: {resource.uri if hasattr(resource, 'uri') else 'unknown'}]"
62
+ )
63
+ else:
64
+ # Fallback: try to convert to string
65
+ parts.append(str(item))
66
+
67
+ return "\n".join(parts)
68
 
69
 
70
  @dataclass
 
83
  Based on codex-rs/core/src/tools/router.rs
84
  """
85
 
86
+ def __init__(self, mcp_servers: dict[str, MCPServerConfig]):
87
  self.tools: dict[str, ToolSpec] = {}
88
+ self.mcp_servers: dict[str, dict[str, Any]] = {}
89
+
90
+ for tool in create_builtin_tools():
91
+ self.register_tool(tool)
92
+
93
+ if mcp_servers:
94
+ mcp_servers_payload = {}
95
+ for name, server in mcp_servers.items():
96
+ mcp_servers_payload[name] = server.model_dump()
97
+ self.mcp_client = Client({"mcpServers": mcp_servers_payload})
98
+ self._mcp_initialized = False
99
+
100
+ def register_tool(self, tool: ToolSpec) -> None:
101
+ self.tools[tool.name] = tool
102
+
103
+ async def register_mcp_tools(self) -> None:
104
+ tools = await self.mcp_client.list_tools()
105
+ for tool in tools:
106
+ self.register_tool(
107
+ ToolSpec(
108
+ name=tool.name,
109
+ description=tool.description,
110
+ parameters=tool.inputSchema,
111
+ handler=None,
112
+ )
113
  )
 
114
 
115
  def get_tool_specs_for_llm(self) -> list[dict[str, Any]]:
116
  """Get tool specifications in OpenAI format"""
 
128
  )
129
  return specs
130
 
131
+ async def __aenter__(self) -> "ToolRouter":
132
+ if self.mcp_client is not None:
133
+ await self.mcp_client.__aenter__()
134
+ await self.mcp_client.initialize()
135
+ await self.register_mcp_tools()
136
+ self._mcp_initialized = True
137
+ print(f"MCP initialized: {self._mcp_initialized}")
138
+ return self
139
 
140
+ async def __aexit__(self, exc_type, exc, tb) -> None:
141
+ if self.mcp_client is not None:
142
+ await self.mcp_client.__aexit__(exc_type, exc, tb)
143
+ self._mcp_initialized = False
144
 
145
+ async def call_tool(
146
+ self, tool_name: str, arguments: dict[str, Any]
147
+ ) -> tuple[str, bool]:
148
+ """
149
+ Call a tool and return (output_string, success_bool).
150
 
151
+ For MCP tools, converts the CallToolResult content blocks to a string.
152
+ For built-in tools, calls their handler directly.
153
+ """
154
+ # Check if this is a built-in tool with a handler
155
+ tool = self.tools.get(tool_name)
156
+ if tool and tool.handler:
157
  return await tool.handler(arguments)
158
 
159
+ # Otherwise, use MCP client
160
+ if self._mcp_initialized:
161
+ result = await self.mcp_client.call_tool(tool_name, arguments)
162
+ # Convert MCP content blocks to string
163
+ output = convert_mcp_content_to_string(result.content)
164
+ return output, not result.is_error
165
+
166
+ return "MCP client not initialized", False
167
 
168
 
169
  # ============================================================================
agent/main.py CHANGED
@@ -4,11 +4,13 @@ Interactive CLI chat with the agent
4
 
5
  import asyncio
6
  from dataclasses import dataclass
 
7
  from typing import Any, Optional
8
 
9
- from agent.config import Config
10
  from agent.core.agent_loop import submission_loop
11
  from agent.core.session import OpType
 
12
 
13
 
14
  @dataclass
@@ -28,7 +30,9 @@ class Submission:
28
 
29
 
30
  async def event_listener(
31
- event_queue: asyncio.Queue, turn_complete_event: asyncio.Event
 
 
32
  ) -> None:
33
  """Background task that listens for events and displays them"""
34
  while True:
@@ -36,7 +40,10 @@ async def event_listener(
36
  event = await event_queue.get()
37
 
38
  # Display event
39
- if event.event_type == "assistant_message":
 
 
 
40
  content = event.data.get("content", "") if event.data else ""
41
  if content:
42
  print(f"\n🤖 Assistant: {content}")
@@ -54,7 +61,11 @@ async def event_listener(
54
  print("✅ Turn complete\n")
55
  turn_complete_event.set()
56
  elif event.event_type == "error":
57
- error = event.data.get("error", "Unknown error") if event.data else "Unknown error"
 
 
 
 
58
  print(f"❌ Error: {error}")
59
  turn_complete_event.set()
60
  elif event.event_type == "shutdown":
@@ -78,7 +89,6 @@ async def get_user_input() -> str:
78
 
79
  async def main():
80
  """Interactive chat with the agent"""
81
-
82
  print("=" * 60)
83
  print("🤖 Interactive Agent Chat")
84
  print("=" * 60)
@@ -88,30 +98,36 @@ async def main():
88
  submission_queue = asyncio.Queue()
89
  event_queue = asyncio.Queue()
90
 
91
- # Event to signal turn completion
92
  turn_complete_event = asyncio.Event()
93
- turn_complete_event.set() # Ready for first input
 
94
 
95
  # Start agent loop in background
 
 
 
 
 
 
 
96
  agent_task = asyncio.create_task(
97
  submission_loop(
98
  submission_queue,
99
  event_queue,
100
- config=Config(
101
- model_name="anthropic/claude-sonnet-4-5-20250929",
102
- tools=[],
103
- system_prompt_path="",
104
- ),
105
  )
106
  )
107
 
108
  # Start event listener in background
109
  listener_task = asyncio.create_task(
110
- event_listener(event_queue, turn_complete_event)
111
  )
112
 
113
  # Wait for agent to initialize
114
- await asyncio.sleep(0.2)
 
115
 
116
  submission_id = 0
117
 
 
4
 
5
  import asyncio
6
  from dataclasses import dataclass
7
+ from pathlib import Path
8
  from typing import Any, Optional
9
 
10
+ from agent.config import load_config
11
  from agent.core.agent_loop import submission_loop
12
  from agent.core.session import OpType
13
+ from agent.core.tools import ToolRouter
14
 
15
 
16
  @dataclass
 
30
 
31
 
32
  async def event_listener(
33
+ event_queue: asyncio.Queue,
34
+ turn_complete_event: asyncio.Event,
35
+ ready_event: asyncio.Event,
36
  ) -> None:
37
  """Background task that listens for events and displays them"""
38
  while True:
 
40
  event = await event_queue.get()
41
 
42
  # Display event
43
+ if event.event_type == "ready":
44
+ print("✅ Agent ready")
45
+ ready_event.set()
46
+ elif event.event_type == "assistant_message":
47
  content = event.data.get("content", "") if event.data else ""
48
  if content:
49
  print(f"\n🤖 Assistant: {content}")
 
61
  print("✅ Turn complete\n")
62
  turn_complete_event.set()
63
  elif event.event_type == "error":
64
+ error = (
65
+ event.data.get("error", "Unknown error")
66
+ if event.data
67
+ else "Unknown error"
68
+ )
69
  print(f"❌ Error: {error}")
70
  turn_complete_event.set()
71
  elif event.event_type == "shutdown":
 
89
 
90
  async def main():
91
  """Interactive chat with the agent"""
 
92
  print("=" * 60)
93
  print("🤖 Interactive Agent Chat")
94
  print("=" * 60)
 
98
  submission_queue = asyncio.Queue()
99
  event_queue = asyncio.Queue()
100
 
101
+ # Events to signal agent state
102
  turn_complete_event = asyncio.Event()
103
+ turn_complete_event.set()
104
+ ready_event = asyncio.Event()
105
 
106
  # Start agent loop in background
107
+ config_path = Path(__file__).parent / "config_mcp_example.json"
108
+ config = load_config(config_path)
109
+
110
+ # Create tool router
111
+ print(f"Config: {config.mcpServers}")
112
+ tool_router = ToolRouter(config.mcpServers)
113
+
114
  agent_task = asyncio.create_task(
115
  submission_loop(
116
  submission_queue,
117
  event_queue,
118
+ config=config,
119
+ tool_router=tool_router,
 
 
 
120
  )
121
  )
122
 
123
  # Start event listener in background
124
  listener_task = asyncio.create_task(
125
+ event_listener(event_queue, turn_complete_event, ready_event)
126
  )
127
 
128
  # Wait for agent to initialize
129
+ print("⏳ Initializing agent...")
130
+ await ready_event.wait()
131
 
132
  submission_id = 0
133
 
agent/tools/__init__.py DELETED
@@ -1,8 +0,0 @@
1
- """
2
- Agent tools and actions
3
- Tools are the actions the agent can take to interact with the environment
4
- """
5
-
6
- from agent.tools.base import BaseTool, ToolRegistry
7
-
8
- __all__ = ["BaseTool", "ToolRegistry"]
 
 
 
 
 
 
 
 
 
pyproject.toml CHANGED
@@ -14,5 +14,5 @@ dependencies = [
14
  "python-dotenv>=1.2.1",
15
  "datasets>=4.3.0",
16
  "huggingface-hub>=1.0.1",
17
- "mcp>=1.21.0",
18
  ]
 
14
  "python-dotenv>=1.2.1",
15
  "datasets>=4.3.0",
16
  "huggingface-hub>=1.0.1",
17
+ "fastmcp>=2.4.0",
18
  ]
uv.lock CHANGED
The diff for this file is too large to render. See raw diff