wolf1997 commited on
Commit
8274937
·
verified ·
1 Parent(s): 1e1f936

Upload 5 files

Browse files
Files changed (5) hide show
  1. .dockerignore +48 -0
  2. Dockerfile +34 -0
  3. agent.py +183 -0
  4. app.py +435 -0
  5. requirements.txt +4 -0
.dockerignore ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Git files
2
+ .git
3
+ .gitignore
4
+
5
+ # Python cache files
6
+ __pycache__
7
+ *.pyc
8
+ *.pyo
9
+ *.pyd
10
+ .Python
11
+ env
12
+ pip-log.txt
13
+ pip-delete-this-directory.txt
14
+ .tox
15
+ .coverage
16
+ .coverage.*
17
+ .cache
18
+ nosetests.xml
19
+ coverage.xml
20
+ *.cover
21
+ *.log
22
+ .DS_Store
23
+
24
+ # Virtual environments
25
+ venv/
26
+ env/
27
+ ENV/
28
+ .venv/
29
+
30
+ # IDE files
31
+ .vscode/
32
+ .idea/
33
+ *.swp
34
+ *.swo
35
+ *~
36
+
37
+ # Documentation
38
+ README.md
39
+ *.md
40
+
41
+ # Jupyter notebooks (unless specifically needed)
42
+ notebooks/
43
+ *.ipynb
44
+
45
+ # Other unnecessary files
46
+ .pytest_cache/
47
+ .mypy_cache/
48
+ .gradio/
Dockerfile ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use Python 3.13.2 slim image for efficiency
2
+ FROM python:3.13.2-slim
3
+
4
+ # Set working directory
5
+ WORKDIR /app
6
+
7
+ # Set environment variables
8
+ ENV PYTHONUNBUFFERED=1
9
+ ENV PYTHONDONTWRITEBYTECODE=1
10
+
11
+ # Install system dependencies if needed
12
+ RUN apt-get update && apt-get install -y \
13
+ git \
14
+ && rm -rf /var/lib/apt/lists/*
15
+
16
+ # Copy requirements first to leverage Docker layer caching
17
+ COPY requirements.txt .
18
+
19
+ # Install Python dependencies
20
+ RUN pip install --no-cache-dir --upgrade pip && \
21
+ pip install --no-cache-dir -r requirements.txt
22
+
23
+ # Copy application files
24
+ COPY agent.py .
25
+ COPY app.py .
26
+
27
+ # Create directories that might be needed
28
+ RUN mkdir -p notebooks
29
+
30
+ # Expose the port that Gradio will run on
31
+ EXPOSE 7860
32
+
33
+ # Set the command to run the application
34
+ CMD ["python", "app.py"]
agent.py ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from pydantic_ai import Agent, RunContext, format_as_xml
4
+
5
+ from pydantic_ai.models.openai import OpenAIModel
6
+ from pydantic_ai.providers.openai import OpenAIProvider
7
+ from pydantic_ai.mcp import MCPServerStreamableHTTP, MCPServerSSE
8
+ from dataclasses import dataclass
9
+ from datetime import datetime
10
+ from pydantic import Field
11
+ import json
12
+ from pydantic_ai.messages import (
13
+ ModelMessage,
14
+ FinalResultEvent,
15
+ FunctionToolCallEvent,
16
+ FunctionToolResultEvent,
17
+ PartDeltaEvent,
18
+ PartStartEvent,
19
+ TextPartDelta,
20
+ ToolCallPartDelta,
21
+ )
22
+
23
+
24
+
25
+
26
+ @dataclass
27
+ class Api_keys:
28
+ api_keys: dict
29
+
30
+
31
+ @dataclass
32
+ class Message_state:
33
+ messages: list[ModelMessage]
34
+
35
+
36
+
37
+
38
+ class MCP_Agent:
39
+ def __init__(self, api_keys:dict, mpc_server_urls:list = []):
40
+ """
41
+ Args:
42
+
43
+ api_keys (dict): The API keys to use as a dictionary
44
+ mpc_server_urls (list): The list of dicts containing the url and the name
45
+ of the mpc server and the type of connection, and the bearer token if necessary
46
+ example:
47
+ [
48
+ {
49
+ 'url': 'http://localhost:8000',
50
+ 'name': 'mcp_server_1',
51
+ 'type': 'http','SSE'
52
+ 'bearer_token': '1234567890' #optional or None
53
+ }
54
+ ]
55
+
56
+
57
+ """
58
+ GEMINI_MODEL='gemini-2.0-flash'
59
+ self.api_keys=Api_keys(api_keys=api_keys)
60
+
61
+ self.mpc_server_urls = mpc_server_urls
62
+
63
+ # tools
64
+ self.llms={'mcp_llm':OpenAIModel('gpt-4.1-mini',provider=OpenAIProvider(api_key=self.api_keys.api_keys['openai_api_key']))}
65
+
66
+
67
+ #mpc servers
68
+ self.mpc_servers=[]
69
+ for mpc_server_url in self.mpc_server_urls:
70
+ if mpc_server_url['type'] == 'http':
71
+ if mpc_server_url['bearer_token'] is not None:
72
+ self.mpc_servers.append(MCPServerStreamableHTTP(mpc_server_url['url'], bearer_token=mpc_server_url['bearer_token']))
73
+ else:
74
+ self.mpc_servers.append(MCPServerStreamableHTTP(mpc_server_url['url']))
75
+ elif mpc_server_url['type'] == 'SSE':
76
+ if mpc_server_url['bearer_token'] is not None:
77
+ self.mpc_servers.append(MCPServerSSE(mpc_server_url['url'], bearer_token=mpc_server_url['bearer_token']))
78
+ else:
79
+ self.mpc_servers.append(MCPServerSSE(mpc_server_url['url']))
80
+
81
+
82
+ self._mcp_context_manager = None
83
+ self._is_connected = False
84
+ #agent
85
+
86
+ self.agent=Agent(self.llms['mcp_llm'],tools=[], mcp_servers=self.mpc_servers, instructions="you are a helpful assistant that can help with a wide range of tasks,\
87
+ you have the current time and the user query, you can use the tools provided to you if necessary to help the user with their queries, ask how you can help the user, sometimes the user will ask you not to use the tools, in this case you should not use the tools")
88
+ self.memory=Message_state(messages=[])
89
+
90
+
91
+ async def connect(self):
92
+ """Establish persistent connection to MCP server"""
93
+ if not self._is_connected:
94
+ self._mcp_context_manager = self.agent.run_mcp_servers()
95
+ await self._mcp_context_manager.__aenter__()
96
+ self._is_connected = True
97
+ return "Connected to MCP server"
98
+
99
+ async def disconnect(self):
100
+ """Close the MCP server connection"""
101
+ if self._is_connected and self._mcp_context_manager:
102
+ await self._mcp_context_manager.__aexit__(None, None, None)
103
+ self._is_connected = False
104
+ self._mcp_context_manager = None
105
+ return "Disconnected from MCP server"
106
+ async def chat(self, query:any):
107
+ """
108
+ # Chat Function Documentation
109
+
110
+ This function enables interaction with the user through various types of input.
111
+
112
+ ## Parameters
113
+
114
+ - `query`: The input to process. Can be one of the following types:
115
+ - String: Direct text input passed to the agent
116
+ - Binary content: Special format for media files (see below)
117
+
118
+ ## Binary Content Types
119
+
120
+ The function supports different types of media through `BinaryContent` objects:
121
+
122
+ ### Audio
123
+ ```python
124
+ agent.chat([
125
+ 'optional string message',
126
+ BinaryContent(data=audio, media_type='audio/wav')
127
+ ])
128
+ ```
129
+
130
+ ### PDF Files
131
+ ```python
132
+ agent.chat([
133
+ 'optional string message',
134
+ BinaryContent(data=pdf_path.read_bytes(), media_type='application/pdf')
135
+ ])
136
+ ```
137
+
138
+ ### Images
139
+ ```python
140
+ agent.chat([
141
+ 'optional string message',
142
+ BinaryContent(data=image_response.content, media_type='image/png')
143
+ ])
144
+ ```
145
+
146
+ ## Returns
147
+
148
+ - `Agent_output`: as a pydantic object, the ui_version and voice_version are the two fields of the object
149
+
150
+ ## Extra Notes
151
+ The message_history of Agent can be accessed using the following code:
152
+ ```python
153
+
154
+ agent.memory.messages
155
+ ```
156
+ """
157
+ if not self._is_connected:
158
+ await self.connect()
159
+
160
+ result=await self.agent.run(query, message_history=self.memory.messages)
161
+ self.memory.messages=result.all_messages()
162
+ return result.output
163
+
164
+
165
+
166
+ def reset(self):
167
+ """
168
+ Resets the Agent to its initial state.
169
+
170
+ Returns:
171
+ str: A confirmation message indicating that the agent has been reset.
172
+ """
173
+ self.memory.messages=[]
174
+ return f'Agent has been reset'
175
+
176
+ async def __aenter__(self):
177
+ """Async context manager entry"""
178
+ await self.connect()
179
+ return self
180
+
181
+ async def __aexit__(self, exc_type, exc_val, exc_tb):
182
+ """Async context manager exit"""
183
+ await self.disconnect()
app.py ADDED
@@ -0,0 +1,435 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import asyncio
3
+ import json
4
+ import atexit
5
+ import signal
6
+ import sys
7
+ from agent import MCP_Agent
8
+
9
+ class GradioMCPApp:
10
+ def __init__(self):
11
+ self.agent = None
12
+ self.chat_history = []
13
+ self._loop = None
14
+ self.server_count = 1
15
+
16
+ def get_or_create_loop(self):
17
+ """Get existing event loop or create a new one"""
18
+ if self._loop is None or self._loop.is_closed():
19
+ self._loop = asyncio.new_event_loop()
20
+ asyncio.set_event_loop(self._loop)
21
+ return self._loop
22
+
23
+ async def initialize_agent(self, openai_api_key, *server_configs):
24
+ """Initialize the MCP Agent with provided configuration"""
25
+ try:
26
+ # Clean up existing agent first
27
+ if self.agent:
28
+ await self.disconnect_agent()
29
+
30
+ # Build MCP servers configuration from form fields
31
+ # server_configs comes as a flat list: [url1, name1, type1, token1, url2, name2, type2, token2, ...]
32
+ mcp_servers = []
33
+ for i in range(0, len(server_configs), 4):
34
+ if i + 3 < len(server_configs):
35
+ server_url = server_configs[i]
36
+ server_name = server_configs[i + 1]
37
+ server_type = server_configs[i + 2]
38
+ bearer_token = server_configs[i + 3]
39
+
40
+ if server_url and server_url.strip():
41
+ server_config = {
42
+ 'url': server_url.strip(),
43
+ 'name': server_name.strip() if server_name and server_name.strip() else f'server_{i//4 + 1}',
44
+ 'type': server_type if server_type else 'http',
45
+ 'bearer_token': bearer_token.strip() if bearer_token and bearer_token.strip() else None
46
+ }
47
+ mcp_servers.append(server_config)
48
+
49
+ # Initialize agent
50
+ api_keys = {'openai_api_key': openai_api_key}
51
+ self.agent = MCP_Agent(api_keys=api_keys, mpc_server_urls=mcp_servers)
52
+
53
+ # Connect to MCP servers
54
+ await self.agent.connect()
55
+
56
+ server_count = len(mcp_servers)
57
+ if server_count == 0:
58
+ return True, "Agent initialized successfully (no MCP servers configured)!"
59
+ else:
60
+ return True, f"Agent initialized successfully with {server_count} MCP server(s)!"
61
+
62
+ except Exception as e:
63
+ return False, f"Error initializing agent: {str(e)}"
64
+
65
+ async def chat_with_agent(self, message):
66
+ """Handle text chat with the agent"""
67
+ if not self.agent:
68
+ return self.chat_history, "Please initialize the agent first by providing your OpenAI API key and clicking 'Initialize Agent'."
69
+
70
+ if not message or not message.strip():
71
+ return self.chat_history, "Please provide a message."
72
+
73
+ try:
74
+ # Get response from agent
75
+ response = await self.agent.chat(message.strip())
76
+
77
+ # Update chat history
78
+ self.chat_history.append([message.strip(), str(response)])
79
+
80
+ return self.chat_history, ""
81
+
82
+ except Exception as e:
83
+ error_msg = f"Error during chat: {str(e)}"
84
+ self.chat_history.append([message, error_msg])
85
+ return self.chat_history, error_msg
86
+
87
+ async def reset_agent(self):
88
+ """Reset the agent's conversation history"""
89
+ if self.agent:
90
+ self.agent.reset()
91
+ self.chat_history = []
92
+ return [], "Agent conversation history reset successfully!"
93
+ else:
94
+ return [], "No agent to reset. Please initialize the agent first."
95
+
96
+ async def disconnect_agent(self):
97
+ """Disconnect from MCP servers"""
98
+ if self.agent:
99
+ try:
100
+ await self.agent.disconnect()
101
+ except Exception as e:
102
+ print(f"Error during disconnect: {e}")
103
+ finally:
104
+ self.agent = None
105
+ self.chat_history = []
106
+ return [], "Agent disconnected successfully!"
107
+
108
+ async def cleanup(self):
109
+ """Clean up resources"""
110
+ if self.agent:
111
+ await self.disconnect_agent()
112
+ if self._loop and not self._loop.is_closed():
113
+ self._loop.close()
114
+
115
+ # Create the app instance
116
+ app_instance = GradioMCPApp()
117
+
118
+ def run_async_safely(coro, *args):
119
+ """Safely run async function with proper error handling"""
120
+ loop = app_instance.get_or_create_loop()
121
+ try:
122
+ return loop.run_until_complete(coro(*args))
123
+ except Exception as e:
124
+ print(f"Error in async operation: {e}")
125
+ return None, f"Error: {str(e)}"
126
+
127
+ # Define async wrapper functions for Gradio
128
+ def initialize_agent_wrapper(openai_api_key, *server_configs):
129
+ success, message = run_async_safely(
130
+ app_instance.initialize_agent,
131
+ openai_api_key, *server_configs
132
+ )
133
+ if success is None:
134
+ return gr.update(visible=False), gr.update(visible=True), message
135
+ return gr.update(visible=success), gr.update(visible=not success), message
136
+
137
+ def chat_wrapper(message):
138
+ chat_history, error_msg = run_async_safely(app_instance.chat_with_agent, message)
139
+ if chat_history is None:
140
+ return [], error_msg, ""
141
+ return chat_history, error_msg, "" # Clear input
142
+
143
+ def reset_wrapper():
144
+ chat_history, message = run_async_safely(app_instance.reset_agent)
145
+ if chat_history is None:
146
+ return [], message
147
+ return chat_history, message
148
+
149
+ def disconnect_wrapper():
150
+ chat_history, message = run_async_safely(app_instance.disconnect_agent)
151
+ if chat_history is None:
152
+ return [], gr.update(visible=False), gr.update(visible=True), message
153
+ return chat_history, gr.update(visible=False), gr.update(visible=True), message
154
+
155
+ # Cleanup function for graceful shutdown
156
+ def cleanup_on_exit():
157
+ """Cleanup function to run on exit"""
158
+ try:
159
+ loop = app_instance.get_or_create_loop()
160
+ if not loop.is_closed():
161
+ loop.run_until_complete(app_instance.cleanup())
162
+ except Exception as e:
163
+ print(f"Error during cleanup: {e}")
164
+
165
+ # Register cleanup function
166
+ atexit.register(cleanup_on_exit)
167
+
168
+ # Handle SIGINT (Ctrl+C) gracefully
169
+ def signal_handler(signum, frame):
170
+ print("\nReceived interrupt signal. Cleaning up...")
171
+ cleanup_on_exit()
172
+ sys.exit(0)
173
+
174
+ signal.signal(signal.SIGINT, signal_handler)
175
+
176
+ # Server management functions
177
+ def add_server(current_count):
178
+ """Show the next server configuration"""
179
+ new_count = min(current_count + 1, 3) # Max 3 servers
180
+ return (
181
+ new_count,
182
+ gr.update(visible=new_count >= 2), # server2_group
183
+ gr.update(visible=new_count >= 3), # server3_group
184
+ gr.update(interactive=new_count < 3), # add_server_btn
185
+ gr.update(interactive=new_count > 1) # remove_server_btn
186
+ )
187
+
188
+ def remove_server(current_count):
189
+ """Hide the last server configuration"""
190
+ new_count = max(current_count - 1, 1) # Min 1 server
191
+ return (
192
+ new_count,
193
+ gr.update(visible=new_count >= 2), # server2_group
194
+ gr.update(visible=new_count >= 3), # server3_group
195
+ gr.update(interactive=new_count < 3), # add_server_btn
196
+ gr.update(interactive=new_count > 1) # remove_server_btn
197
+ )
198
+
199
+
200
+
201
+ # Create the Gradio interface
202
+ with gr.Blocks(title="MCP Agent Chat", theme=gr.themes.Soft()) as demo:
203
+ with gr.Row():
204
+
205
+ gr.Markdown("# MCP Agent Chat Interface")
206
+ gr.HTML("") # Balance spacing
207
+
208
+
209
+
210
+ with gr.Sidebar():
211
+ # Sidebar for configuration
212
+ sidebar_column = gr.Column(scale=1, min_width=350)
213
+ with sidebar_column:
214
+ gr.Markdown("## 🔧 Configuration")
215
+
216
+ openai_key = gr.Textbox(
217
+ label="OpenAI API Key",
218
+ type="password",
219
+ placeholder="sk-...",
220
+ info="Your OpenAI API key for the language model"
221
+ )
222
+
223
+ gr.Markdown("### MCP Servers Setup")
224
+ gr.Markdown("Configure your MCP server connections (leave all URLs empty to run without MCP servers)")
225
+
226
+ # Container for dynamic server configurations
227
+ servers_container = gr.Column()
228
+
229
+ # Initial server configuration
230
+ with servers_container:
231
+ # Server 1 (always present)
232
+ with gr.Group():
233
+ gr.Markdown("#### Server 1")
234
+ server1_url = gr.Textbox(
235
+ label="Server URL",
236
+ placeholder="http://localhost:8000",
237
+ info="The URL of your MCP server"
238
+ )
239
+ server1_name = gr.Textbox(
240
+ label="Server Name",
241
+ placeholder="server_1",
242
+ info="A friendly name for your MCP server"
243
+ )
244
+ server1_type = gr.Dropdown(
245
+ label="Server Type",
246
+ choices=["http", "SSE"],
247
+ value="http",
248
+ info="The type of MCP server connection"
249
+ )
250
+ server1_token = gr.Textbox(
251
+ label="Bearer Token (Optional)",
252
+ type="password",
253
+ placeholder="Leave empty if not required",
254
+ info="Authentication token for the MCP server (if required)"
255
+ )
256
+
257
+ # Server 2 (optional)
258
+ server2_group = gr.Group(visible=False)
259
+ with server2_group:
260
+ gr.Markdown("#### Server 2")
261
+ server2_url = gr.Textbox(
262
+ label="Server URL",
263
+ placeholder="http://localhost:8001",
264
+ info="The URL of your MCP server"
265
+ )
266
+ server2_name = gr.Textbox(
267
+ label="Server Name",
268
+ placeholder="server_2",
269
+ info="A friendly name for your MCP server"
270
+ )
271
+ server2_type = gr.Dropdown(
272
+ label="Server Type",
273
+ choices=["http", "SSE"],
274
+ value="http",
275
+ info="The type of MCP server connection"
276
+ )
277
+ server2_token = gr.Textbox(
278
+ label="Bearer Token (Optional)",
279
+ type="password",
280
+ placeholder="Leave empty if not required",
281
+ info="Authentication token for the MCP server (if required)"
282
+ )
283
+
284
+ # Server 3 (optional)
285
+ server3_group = gr.Group(visible=False)
286
+ with server3_group:
287
+ gr.Markdown("#### Server 3")
288
+ server3_url = gr.Textbox(
289
+ label="Server URL",
290
+ placeholder="http://localhost:8002",
291
+ info="The URL of your MCP server"
292
+ )
293
+ server3_name = gr.Textbox(
294
+ label="Server Name",
295
+ placeholder="server_3",
296
+ info="A friendly name for your MCP server"
297
+ )
298
+ server3_type = gr.Dropdown(
299
+ label="Server Type",
300
+ choices=["http", "SSE"],
301
+ value="http",
302
+ info="The type of MCP server connection"
303
+ )
304
+ server3_token = gr.Textbox(
305
+ label="Bearer Token (Optional)",
306
+ type="password",
307
+ placeholder="Leave empty if not required",
308
+ info="Authentication token for the MCP server (if required)"
309
+ )
310
+
311
+ # Server management buttons
312
+ with gr.Row():
313
+ add_server_btn = gr.Button("+ Add Server", variant="secondary", size="sm")
314
+ remove_server_btn = gr.Button("- Remove Server", variant="secondary", size="sm", interactive=False)
315
+
316
+ # Track current server count
317
+ server_count_state = gr.State(1)
318
+
319
+ init_btn = gr.Button("Initialize Agent", variant="primary", size="lg")
320
+ init_status = gr.Textbox(label="Status", interactive=False, max_lines=3)
321
+
322
+ # Main chat area
323
+ chat_column = gr.Column(scale=2)
324
+ with chat_column:
325
+ with gr.Row():
326
+ gr.Markdown("## 💬 Chat with your MCP Agent")
327
+ config_status = gr.Markdown("", visible=False) # Status when sidebar is collapsed
328
+
329
+ chat_interface = gr.Column(visible=False)
330
+ with chat_interface:
331
+ chatbot = gr.Chatbot(
332
+ label="Conversation",
333
+ height=500,
334
+ show_copy_button=True,
335
+ avatar_images=("👤", "🤖")
336
+ )
337
+
338
+ with gr.Row():
339
+ msg = gr.Textbox(
340
+ label="Message",
341
+ placeholder="Type your message here...",
342
+ scale=4,
343
+ lines=2
344
+ )
345
+ send_btn = gr.Button("Send", variant="primary", scale=1)
346
+
347
+ error_display = gr.Textbox(
348
+ label="Error Messages",
349
+ visible=False,
350
+ interactive=False
351
+ )
352
+
353
+ with gr.Row():
354
+ reset_btn = gr.Button("Reset Conversation", variant="secondary")
355
+ disconnect_btn = gr.Button("Disconnect Agent", variant="secondary")
356
+
357
+ # Placeholder when agent is not initialized
358
+ placeholder = gr.Markdown(
359
+ "### 👋 Welcome!\n\nPlease configure and initialize your MCP Agent using the sidebar to start chatting.",
360
+ visible=True
361
+ )
362
+
363
+
364
+ # Server management event handlers
365
+ add_server_btn.click(
366
+ fn=add_server,
367
+ inputs=[server_count_state],
368
+ outputs=[server_count_state, server2_group, server3_group, add_server_btn, remove_server_btn]
369
+ )
370
+
371
+ remove_server_btn.click(
372
+ fn=remove_server,
373
+ inputs=[server_count_state],
374
+ outputs=[server_count_state, server2_group, server3_group, add_server_btn, remove_server_btn]
375
+ )
376
+
377
+ # Event handlers
378
+ init_btn.click(
379
+ fn=initialize_agent_wrapper,
380
+ inputs=[
381
+ openai_key,
382
+ server1_url, server1_name, server1_type, server1_token,
383
+ server2_url, server2_name, server2_type, server2_token,
384
+ server3_url, server3_name, server3_type, server3_token
385
+ ],
386
+ outputs=[chat_interface, placeholder, init_status]
387
+ )
388
+
389
+ # Chat functionality
390
+ def handle_chat(message):
391
+ if not message or not message.strip():
392
+ return app_instance.chat_history, "Please provide a message.", ""
393
+ return chat_wrapper(message)
394
+
395
+ send_btn.click(
396
+ fn=handle_chat,
397
+ inputs=[msg],
398
+ outputs=[chatbot, error_display, msg]
399
+ ).then(
400
+ lambda error: gr.update(visible=bool(error)),
401
+ inputs=[error_display],
402
+ outputs=[error_display]
403
+ )
404
+
405
+ msg.submit(
406
+ fn=handle_chat,
407
+ inputs=[msg],
408
+ outputs=[chatbot, error_display, msg]
409
+ ).then(
410
+ lambda error: gr.update(visible=bool(error)),
411
+ inputs=[error_display],
412
+ outputs=[error_display]
413
+ )
414
+
415
+ reset_btn.click(
416
+ fn=reset_wrapper,
417
+ outputs=[chatbot, error_display]
418
+ ).then(
419
+ lambda error: gr.update(visible=bool(error)),
420
+ inputs=[error_display],
421
+ outputs=[error_display]
422
+ )
423
+
424
+ disconnect_btn.click(
425
+ fn=disconnect_wrapper,
426
+ outputs=[chatbot, chat_interface, placeholder, init_status]
427
+ )
428
+
429
+ if __name__ == "__main__":
430
+ demo.launch(
431
+ server_name="0.0.0.0",
432
+ server_port=7860,
433
+ share=False,
434
+ show_error=True
435
+ )
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+
2
+ pydantic-ai==0.3.5
3
+
4
+ gradio==5.35.0