ChrisSacrumCor commited on
Commit
337e000
Β·
verified Β·
1 Parent(s): aa52493

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +357 -0
app.py ADDED
@@ -0,0 +1,357 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import openai
3
+ import requests
4
+ import json
5
+ from typing import Dict, List, Any
6
+ import os
7
+ from datetime import datetime
8
+
9
+ class MCPClient:
10
+ def __init__(self, openai_api_key: str):
11
+ """Initialize the MCP Client with OpenAI integration"""
12
+ self.openai_client = openai.OpenAI(api_key=openai_api_key)
13
+
14
+ # MCP Server configurations
15
+ self.mcp_servers = {
16
+ "terraform": {
17
+ "name": "MCP Terraform",
18
+ "space_url": "https://chrissacrumcor-mcp-terraform.hf.space",
19
+ "description": "Terraform infrastructure management"
20
+ },
21
+ "linux": {
22
+ "name": "MCP Linux",
23
+ "space_url": "https://chrissacrumcor-mcp-linux.hf.space",
24
+ "description": "Linux system operations"
25
+ },
26
+ "cisco": {
27
+ "name": "MCP Cisco",
28
+ "space_url": "https://chrissacrumcor-mcp-cisco.hf.space",
29
+ "description": "Cisco network management"
30
+ }
31
+ }
32
+
33
+ self.conversation_history = []
34
+
35
+ def call_mcp_server(self, server_key: str, endpoint: str, payload: Dict[str, Any]) -> Dict[str, Any]:
36
+ """Make a call to an MCP server"""
37
+ try:
38
+ server_config = self.mcp_servers[server_key]
39
+ url = f"{server_config['space_url']}/{endpoint}"
40
+
41
+ response = requests.post(
42
+ url,
43
+ json=payload,
44
+ headers={"Content-Type": "application/json"},
45
+ timeout=30
46
+ )
47
+
48
+ if response.status_code == 200:
49
+ return {"success": True, "data": response.json()}
50
+ else:
51
+ return {
52
+ "success": False,
53
+ "error": f"HTTP {response.status_code}: {response.text}"
54
+ }
55
+
56
+ except requests.exceptions.RequestException as e:
57
+ return {"success": False, "error": f"Request failed: {str(e)}"}
58
+ except Exception as e:
59
+ return {"success": False, "error": f"Unexpected error: {str(e)}"}
60
+
61
+ def get_available_tools(self, server_key: str) -> List[Dict[str, Any]]:
62
+ """Get available tools from an MCP server"""
63
+ result = self.call_mcp_server(server_key, "tools/list", {})
64
+ if result["success"]:
65
+ return result["data"].get("tools", [])
66
+ return []
67
+
68
+ def execute_tool(self, server_key: str, tool_name: str, arguments: Dict[str, Any]) -> Dict[str, Any]:
69
+ """Execute a tool on an MCP server"""
70
+ payload = {
71
+ "name": tool_name,
72
+ "arguments": arguments
73
+ }
74
+ return self.call_mcp_server(server_key, "tools/call", payload)
75
+
76
+ def analyze_user_request(self, user_input: str) -> Dict[str, Any]:
77
+ """Use OpenAI to analyze user request and determine which MCP server to use"""
78
+ system_prompt = f"""
79
+ You are an intelligent assistant that helps users interact with MCP (Model Context Protocol) servers.
80
+
81
+ Available MCP servers:
82
+ 1. Terraform: {self.mcp_servers['terraform']['description']}
83
+ 2. Linux: {self.mcp_servers['linux']['description']}
84
+ 3. Cisco: {self.mcp_servers['cisco']['description']}
85
+
86
+ Analyze the user's request and determine:
87
+ 1. Which MCP server(s) would be most appropriate
88
+ 2. What specific tools or operations might be needed
89
+ 3. Any parameters or arguments that should be passed
90
+
91
+ Respond in JSON format with:
92
+ {{
93
+ "recommended_server": "server_key",
94
+ "reasoning": "explanation of why this server was chosen",
95
+ "suggested_action": "what action to take",
96
+ "parameters": {{"key": "value"}}
97
+ }}
98
+ """
99
+
100
+ try:
101
+ response = self.openai_client.chat.completions.create(
102
+ model="gpt-4",
103
+ messages=[
104
+ {"role": "system", "content": system_prompt},
105
+ {"role": "user", "content": user_input}
106
+ ],
107
+ temperature=0.3
108
+ )
109
+
110
+ return json.loads(response.choices[0].message.content)
111
+
112
+ except Exception as e:
113
+ return {
114
+ "error": f"Failed to analyze request: {str(e)}",
115
+ "recommended_server": None
116
+ }
117
+
118
+ def generate_response(self, user_input: str, mcp_results: Dict[str, Any] = None) -> str:
119
+ """Generate a natural language response using OpenAI"""
120
+ context = ""
121
+ if mcp_results:
122
+ context = f"\nMCP Server Results: {json.dumps(mcp_results, indent=2)}"
123
+
124
+ messages = [
125
+ {
126
+ "role": "system",
127
+ "content": f"""
128
+ You are a helpful assistant that works with MCP servers for infrastructure management.
129
+ Provide clear, helpful responses based on the user's request and any MCP server results.
130
+
131
+ Available MCP Servers:
132
+ - Terraform: Infrastructure as Code management
133
+ - Linux: System administration and operations
134
+ - Cisco: Network device management
135
+
136
+ {context}
137
+ """
138
+ }
139
+ ]
140
+
141
+ # Add conversation history (last 6 messages)
142
+ messages.extend(self.conversation_history[-6:])
143
+ messages.append({"role": "user", "content": user_input})
144
+
145
+ try:
146
+ response = self.openai_client.chat.completions.create(
147
+ model="gpt-4",
148
+ messages=messages,
149
+ temperature=0.7,
150
+ max_tokens=1000
151
+ )
152
+
153
+ return response.choices[0].message.content
154
+
155
+ except Exception as e:
156
+ return f"Error generating response: {str(e)}"
157
+
158
+ def process_request(self, user_input: str, selected_server: str = None) -> tuple[str, str]:
159
+ """Process a user request end-to-end"""
160
+ timestamp = datetime.now().strftime("%H:%M:%S")
161
+
162
+ # Add to conversation history
163
+ self.conversation_history.append({"role": "user", "content": user_input})
164
+
165
+ try:
166
+ # Analyze request if no server specified
167
+ if not selected_server or selected_server == "Auto-detect":
168
+ analysis = self.analyze_user_request(user_input)
169
+ if "error" in analysis:
170
+ response = f"❌ Analysis Error: {analysis['error']}"
171
+ self.conversation_history.append({"role": "assistant", "content": response})
172
+ return response, f"[{timestamp}] Analysis failed"
173
+
174
+ selected_server = analysis.get("recommended_server", "terraform")
175
+ reasoning = analysis.get("reasoning", "No reasoning provided")
176
+ status = f"[{timestamp}] Selected: {self.mcp_servers[selected_server]['name']} - {reasoning}"
177
+ else:
178
+ status = f"[{timestamp}] Using: {self.mcp_servers[selected_server]['name']}"
179
+
180
+ # Get available tools for the selected server
181
+ tools = self.get_available_tools(selected_server)
182
+
183
+ if not tools:
184
+ response = f"⚠️ No tools available from {self.mcp_servers[selected_server]['name']} server"
185
+ self.conversation_history.append({"role": "assistant", "content": response})
186
+ return response, status + " - No tools available"
187
+
188
+ # For now, just show available tools and generate a helpful response
189
+ # In a full implementation, you'd use OpenAI to select and execute appropriate tools
190
+ tools_info = "\n".join([f"- {tool.get('name', 'Unknown')}: {tool.get('description', 'No description')}"
191
+ for tool in tools[:5]]) # Show first 5 tools
192
+
193
+ server_info = {
194
+ "server": self.mcp_servers[selected_server]['name'],
195
+ "available_tools": len(tools),
196
+ "sample_tools": tools_info
197
+ }
198
+
199
+ response = self.generate_response(user_input, server_info)
200
+ self.conversation_history.append({"role": "assistant", "content": response})
201
+
202
+ return response, status + f" - {len(tools)} tools available"
203
+
204
+ except Exception as e:
205
+ error_response = f"❌ Error processing request: {str(e)}"
206
+ self.conversation_history.append({"role": "assistant", "content": error_response})
207
+ return error_response, f"[{timestamp}] Error occurred"
208
+
209
+ def create_gradio_interface():
210
+ """Create the Gradio interface"""
211
+
212
+ # Initialize MCP Client (will be set when API key is provided)
213
+ mcp_client = None
214
+
215
+ def initialize_client(api_key):
216
+ nonlocal mcp_client
217
+ if not api_key:
218
+ return "❌ Please provide your OpenAI API key", ""
219
+
220
+ try:
221
+ mcp_client = MCPClient(api_key)
222
+ return "βœ… OpenAI client initialized successfully!", ""
223
+ except Exception as e:
224
+ return f"❌ Failed to initialize: {str(e)}", ""
225
+
226
+ def process_message(message, server_choice, history):
227
+ if not mcp_client:
228
+ return history + [["Please initialize the OpenAI client first.", ""]], ""
229
+
230
+ if not message.strip():
231
+ return history, ""
232
+
233
+ try:
234
+ response, status = mcp_client.process_request(message, server_choice)
235
+ history.append([message, response])
236
+ return history, ""
237
+ except Exception as e:
238
+ error_msg = f"❌ Error: {str(e)}"
239
+ history.append([message, error_msg])
240
+ return history, ""
241
+
242
+ def get_server_status():
243
+ status_info = "## MCP Servers Status\n\n"
244
+ for key, config in mcp_client.mcp_servers.items() if mcp_client else []:
245
+ try:
246
+ # Simple health check
247
+ response = requests.get(f"{config['space_url']}/health", timeout=5)
248
+ status = "🟒 Online" if response.status_code == 200 else "🟑 Issues"
249
+ except:
250
+ status = "πŸ”΄ Offline"
251
+
252
+ status_info += f"**{config['name']}**: {status}\n"
253
+ status_info += f"- URL: {config['space_url']}\n"
254
+ status_info += f"- Description: {config['description']}\n\n"
255
+
256
+ return status_info
257
+
258
+ # Create Gradio interface
259
+ with gr.Blocks(title="MCP Client with OpenAI", theme=gr.themes.Soft()) as app:
260
+ gr.Markdown("# πŸ€– MCP Client with OpenAI Integration")
261
+ gr.Markdown("Connect to your Terraform, Linux, and Cisco MCP servers with AI-powered assistance")
262
+
263
+ with gr.Row():
264
+ with gr.Column(scale=3):
265
+ # API Key input
266
+ api_key = gr.Textbox(
267
+ label="OpenAI API Key",
268
+ type="password",
269
+ placeholder="Enter your OpenAI API key..."
270
+ )
271
+ init_btn = gr.Button("Initialize Client", variant="primary")
272
+ init_status = gr.Textbox(label="Status", interactive=False)
273
+
274
+ # Server selection
275
+ server_choice = gr.Dropdown(
276
+ choices=["Auto-detect", "terraform", "linux", "cisco"],
277
+ value="Auto-detect",
278
+ label="Select MCP Server"
279
+ )
280
+
281
+ # Chat interface
282
+ chatbot = gr.Chatbot(
283
+ label="Conversation",
284
+ height=400,
285
+ show_label=True
286
+ )
287
+
288
+ msg = gr.Textbox(
289
+ label="Message",
290
+ placeholder="Ask about infrastructure, Linux systems, or Cisco networks...",
291
+ lines=2
292
+ )
293
+
294
+ with gr.Row():
295
+ send_btn = gr.Button("Send", variant="primary")
296
+ clear_btn = gr.Button("Clear Chat")
297
+
298
+ with gr.Column(scale=1):
299
+ gr.Markdown("## πŸ“Š Server Status")
300
+ server_status = gr.Markdown("Initialize client to see server status")
301
+ refresh_btn = gr.Button("Refresh Status")
302
+
303
+ gr.Markdown("## πŸ’‘ Example Queries")
304
+ gr.Markdown("""
305
+ **Terraform:**
306
+ - "Show me the current infrastructure state"
307
+ - "Plan a new deployment"
308
+
309
+ **Linux:**
310
+ - "Check system resources"
311
+ - "List running processes"
312
+
313
+ **Cisco:**
314
+ - "Show interface status"
315
+ - "Check network configuration"
316
+ """)
317
+
318
+ # Event handlers
319
+ init_btn.click(
320
+ initialize_client,
321
+ inputs=[api_key],
322
+ outputs=[init_status, server_status]
323
+ )
324
+
325
+ send_btn.click(
326
+ process_message,
327
+ inputs=[msg, server_choice, chatbot],
328
+ outputs=[chatbot, msg]
329
+ )
330
+
331
+ msg.submit(
332
+ process_message,
333
+ inputs=[msg, server_choice, chatbot],
334
+ outputs=[chatbot, msg]
335
+ )
336
+
337
+ clear_btn.click(
338
+ lambda: [],
339
+ outputs=[chatbot]
340
+ )
341
+
342
+ refresh_btn.click(
343
+ get_server_status,
344
+ outputs=[server_status]
345
+ )
346
+
347
+ return app
348
+
349
+ if __name__ == "__main__":
350
+ # Create and launch the Gradio app
351
+ app = create_gradio_interface()
352
+ app.launch(
353
+ server_name="0.0.0.0",
354
+ server_port=7860,
355
+ share=True,
356
+ debug=True
357
+ )