serichard1 commited on
Commit
ed99863
·
1 Parent(s): f91b5b2

fix async error2

Browse files
Files changed (3) hide show
  1. app.py +171 -588
  2. gradio_mcp_server.py +36 -1
  3. requirements.txt +0 -2
app.py CHANGED
@@ -1,159 +1,51 @@
1
  import asyncio
2
  import os
3
  import json
4
- from typing import List, Dict, Any, Union, Optional
5
  from contextlib import AsyncExitStack
6
- import mimetypes
7
- import tempfile
8
- import threading
9
- from concurrent.futures import ThreadPoolExecutor
10
 
11
  import gradio as gr
12
  from gradio.components.chatbot import ChatMessage
13
  from mcp import ClientSession, StdioServerParameters
14
  from mcp.client.stdio import stdio_client
15
  from anthropic import Anthropic
16
- from openai import OpenAI
17
- from mistralai.client import MistralClient
18
  from dotenv import load_dotenv
19
 
20
  load_dotenv()
21
 
 
 
 
22
  class MCPClientWrapper:
23
  def __init__(self):
24
- self.session: Optional[ClientSession] = None
25
- self.exit_stack: Optional[AsyncExitStack] = None
 
26
  self.tools = []
27
  self.connected = False
28
- self._connection_lock = threading.Lock()
29
-
30
- # Initialize all LLM clients
31
- self.anthropic_client = None
32
- self.openai_client = None
33
- self.mistral_client = None
34
- self.llama_client = None
35
-
36
- # Current selected provider and model
37
- self.current_provider = "claude"
38
- self.current_model = "claude-3-5-sonnet-20241022"
39
-
40
- self._initialize_clients()
41
 
42
- def _initialize_clients(self):
43
- """Initialize available LLM clients based on environment variables."""
44
- try:
45
- if os.getenv("ANTHROPIC_API_KEY"):
46
- self.anthropic_client = Anthropic()
47
- except Exception as e:
48
- print(f"⚠️ Failed to initialize Anthropic client: {e}")
49
 
50
- try:
51
- if os.getenv("OPENAI_API_KEY"):
52
- self.openai_client = OpenAI()
53
- except Exception as e:
54
- print(f"⚠️ Failed to initialize OpenAI client: {e}")
55
 
56
- try:
57
- if os.getenv("MISTRAL_API_KEY"):
58
- self.mistral_client = MistralClient(api_key=os.getenv("MISTRAL_API_KEY"))
59
- except Exception as e:
60
- print(f"⚠️ Failed to initialize Mistral client: {e}")
 
 
61
 
62
  try:
63
- if os.getenv("LLAMAINDEX_API_KEY"):
64
- self.llama_client = OpenAI(
65
- api_key=os.getenv("LLAMAINDEX_API_KEY"),
66
- base_url="https://api.llamaindex.ai/v1"
67
- )
68
- except Exception as e:
69
- print(f"⚠️ Failed to initialize Llama client: {e}")
70
-
71
- def get_available_providers(self):
72
- """Get list of available LLM providers."""
73
- providers = {}
74
- if self.anthropic_client:
75
- providers["claude"] = {
76
- "name": "Claude (Anthropic)",
77
- "models": [
78
- "claude-3-5-sonnet-20241022",
79
- "claude-3-5-haiku-20241022",
80
- "claude-3-opus-20240229"
81
- ]
82
- }
83
- if self.openai_client:
84
- providers["openai"] = {
85
- "name": "OpenAI",
86
- "models": [
87
- "gpt-4o",
88
- "gpt-4o-mini",
89
- "gpt-4-turbo",
90
- "gpt-3.5-turbo"
91
- ]
92
- }
93
- if self.mistral_client:
94
- providers["mistral"] = {
95
- "name": "Mistral AI",
96
- "models": [
97
- "mistral-large-latest",
98
- "mistral-medium-latest",
99
- "mistral-small-latest",
100
- "open-mixtral-8x7b"
101
- ]
102
- }
103
- if self.llama_client:
104
- providers["llama"] = {
105
- "name": "Llama",
106
- "models": [
107
- "llama-3.1-70b-instruct",
108
- "llama-3.1-8b-instruct",
109
- "llama-2-70b-chat"
110
- ]
111
- }
112
- return providers
113
-
114
- def update_provider(self, provider: str, model: str):
115
- """Update the current provider and model."""
116
- self.current_provider = provider
117
- self.current_model = model
118
- return f"✅ Switched to {provider}: {model}"
119
-
120
- async def _cleanup_connection(self):
121
- """Safely cleanup existing connection."""
122
- if self.exit_stack:
123
- try:
124
- await self.exit_stack.aclose()
125
- except Exception as e:
126
- print(f"Warning: Error during cleanup: {e}")
127
- finally:
128
- self.exit_stack = None
129
- self.session = None
130
- self.connected = False
131
-
132
- async def _establish_connection(self) -> str:
133
- """Establish MCP connection in proper async context."""
134
- try:
135
- # Clean up any existing connection
136
- await self._cleanup_connection()
137
-
138
- self.exit_stack = AsyncExitStack()
139
-
140
- server_path = "gradio_mcp_server.py"
141
- server_params = StdioServerParameters(
142
- command="python",
143
- args=[server_path],
144
- env={"PYTHONIOENCODING": "utf-8", "PYTHONUNBUFFERED": "1"}
145
- )
146
-
147
- # Enter the async context managers
148
- stdio_transport = await self.exit_stack.enter_async_context(
149
- stdio_client(server_params)
150
- )
151
- stdio, write = stdio_transport
152
-
153
- self.session = await self.exit_stack.enter_async_context(
154
- ClientSession(stdio, write)
155
- )
156
 
 
157
  await self.session.initialize()
158
 
159
  response = await self.session.list_tools()
@@ -166,203 +58,21 @@ class MCPClientWrapper:
166
  self.connected = True
167
  tool_names = [tool["name"] for tool in self.tools]
168
  return f"✅ Connected to MCP Weather Server. Available tools: {', '.join(tool_names)}"
169
-
170
  except Exception as e:
171
  self.connected = False
172
- await self._cleanup_connection()
173
  return f"❌ Failed to connect to MCP server: {str(e)}"
174
 
175
- def connect(self) -> str:
176
- """Thread-safe connection method for Gradio."""
177
- with self._connection_lock:
178
- try:
179
- # Create new event loop for this operation
180
- try:
181
- loop = asyncio.get_event_loop()
182
- except RuntimeError:
183
- loop = asyncio.new_event_loop()
184
- asyncio.set_event_loop(loop)
185
-
186
- if loop.is_running():
187
- # If loop is already running, we need to run in a thread
188
- import concurrent.futures
189
- with concurrent.futures.ThreadPoolExecutor() as executor:
190
- future = executor.submit(self._run_connection_in_new_loop)
191
- return future.result()
192
- else:
193
- return loop.run_until_complete(self._establish_connection())
194
- except Exception as e:
195
- return f"❌ Connection error: {str(e)}"
196
-
197
- def _run_connection_in_new_loop(self) -> str:
198
- """Run connection in a new event loop (for thread safety)."""
199
- loop = asyncio.new_event_loop()
200
- asyncio.set_event_loop(loop)
201
- try:
202
- return loop.run_until_complete(self._establish_connection())
203
- finally:
204
- loop.close()
205
-
206
- def read_uploaded_file(self, file_path: str) -> str:
207
- """Read and process uploaded file content."""
208
- if not file_path or not os.path.exists(file_path):
209
- return ""
210
-
211
- try:
212
- file_size = os.path.getsize(file_path)
213
- file_name = os.path.basename(file_path)
214
- mime_type, _ = mimetypes.guess_type(file_path)
215
-
216
- if file_size > 10 * 1024 * 1024:
217
- return f"\n\n📄 **File Upload Error**: {file_name} is too large (>10MB). Please upload a smaller file."
218
-
219
- encodings_to_try = ['utf-8', 'utf-16', 'latin-1', 'cp1252']
220
-
221
- for encoding in encodings_to_try:
222
- try:
223
- with open(file_path, 'r', encoding=encoding) as f:
224
- content = f.read()
225
-
226
- max_chars = 50000
227
- if len(content) > max_chars:
228
- content = content[:max_chars] + f"\n\n[Content truncated - showing first {max_chars} characters of {len(content)} total]"
229
-
230
- file_info = f"\n\n📄 **Uploaded File**: {file_name}"
231
- if mime_type:
232
- file_info += f" ({mime_type})"
233
- file_info += f" - {file_size:,} bytes\n\n```\n{content}\n```"
234
-
235
- return file_info
236
-
237
- except UnicodeDecodeError:
238
- continue
239
-
240
- return f"\n\n📄 **File Upload**: {file_name} appears to be a binary file and cannot be displayed as text."
241
-
242
- except Exception as e:
243
- return f"\n\n📄 **File Upload Error**: Could not read {file_name}: {str(e)}"
244
-
245
- def _convert_tools_for_provider(self, provider: str):
246
- """Convert MCP tools format to provider-specific format."""
247
- if provider == "claude":
248
- return self.tools
249
- elif provider in ["openai", "llama"]:
250
- openai_tools = []
251
- for tool in self.tools:
252
- openai_tools.append({
253
- "type": "function",
254
- "function": {
255
- "name": tool["name"],
256
- "description": tool["description"],
257
- "parameters": tool["input_schema"]
258
- }
259
- })
260
- return openai_tools
261
- elif provider == "mistral":
262
- mistral_tools = []
263
- for tool in self.tools:
264
- mistral_tools.append({
265
- "type": "function",
266
- "function": {
267
- "name": tool["name"],
268
- "description": tool["description"],
269
- "parameters": tool["input_schema"]
270
- }
271
- })
272
- return mistral_tools
273
- else:
274
- return []
275
-
276
- async def _call_llm(self, messages: List[Dict], provider: str, model: str):
277
- """Call the appropriate LLM based on provider."""
278
- try:
279
- if provider == "claude" and self.anthropic_client:
280
- return self.anthropic_client.messages.create(
281
- model=model,
282
- max_tokens=1500,
283
- messages=messages,
284
- tools=self._convert_tools_for_provider(provider)
285
- )
286
- elif provider == "openai" and self.openai_client:
287
- return self.openai_client.chat.completions.create(
288
- model=model,
289
- max_tokens=1500,
290
- messages=messages,
291
- tools=self._convert_tools_for_provider(provider)
292
- )
293
- elif provider == "llama" and self.llama_client:
294
- return self.llama_client.chat.completions.create(
295
- model=model,
296
- max_tokens=1500,
297
- messages=messages,
298
- tools=self._convert_tools_for_provider(provider)
299
- )
300
- elif provider == "mistral" and self.mistral_client:
301
- return self.mistral_client.chat(
302
- model=model,
303
- max_tokens=1500,
304
- messages=messages,
305
- tools=self._convert_tools_for_provider(provider)
306
- )
307
- else:
308
- raise Exception(f"Provider {provider} not available or not initialized")
309
- except Exception as e:
310
- raise Exception(f"Error calling {provider}: {str(e)}")
311
-
312
- def process_message(self, message: str, history: List[Union[Dict[str, Any], ChatMessage]], uploaded_file) -> tuple:
313
- """Process message in thread-safe manner."""
314
  if not self.session or not self.connected:
315
  return history + [
316
  {"role": "user", "content": message},
317
  {"role": "assistant", "content": "❌ MCP weather server is not connected. Please check the connection status above."}
318
- ], gr.Textbox(value=""), gr.File(value=None)
319
-
320
- # Process uploaded file if present
321
- file_content = ""
322
- if uploaded_file:
323
- file_content = self.read_uploaded_file(uploaded_file.name if hasattr(uploaded_file, 'name') else uploaded_file)
324
 
325
- # Combine message with file content
326
- full_message = message + file_content
327
-
328
- try:
329
- # Run async processing in new event loop
330
- new_messages = self._run_async_processing(full_message, history)
331
- return history + [{"role": "user", "content": full_message}] + new_messages, gr.Textbox(value=""), gr.File(value=None)
332
- except Exception as e:
333
- return history + [
334
- {"role": "user", "content": full_message},
335
- {"role": "assistant", "content": f"❌ Error processing message: {str(e)}"}
336
- ], gr.Textbox(value=""), gr.File(value=None)
337
-
338
- def _run_async_processing(self, message: str, history: List[Union[Dict[str, Any], ChatMessage]]):
339
- """Run async message processing in new event loop."""
340
- try:
341
- loop = asyncio.get_event_loop()
342
- except RuntimeError:
343
- loop = asyncio.new_event_loop()
344
- asyncio.set_event_loop(loop)
345
-
346
- if loop.is_running():
347
- # Run in thread if event loop is already running
348
- import concurrent.futures
349
- with concurrent.futures.ThreadPoolExecutor() as executor:
350
- future = executor.submit(self._process_in_new_loop, message, history)
351
- return future.result()
352
- else:
353
- return loop.run_until_complete(self._process_query(message, history))
354
-
355
- def _process_in_new_loop(self, message: str, history: List[Union[Dict[str, Any], ChatMessage]]):
356
- """Process query in a completely new event loop."""
357
- loop = asyncio.new_event_loop()
358
- asyncio.set_event_loop(loop)
359
- try:
360
- return loop.run_until_complete(self._process_query(message, history))
361
- finally:
362
- loop.close()
363
 
364
  async def _process_query(self, message: str, history: List[Union[Dict[str, Any], ChatMessage]]):
365
- """Process the actual query with LLM and tools."""
366
  claude_messages = []
367
  for msg in history:
368
  if isinstance(msg, ChatMessage):
@@ -375,23 +85,13 @@ class MCPClientWrapper:
375
 
376
  claude_messages.append({"role": "user", "content": message})
377
 
378
- try:
379
- response = await self._call_llm(claude_messages, self.current_provider, self.current_model)
380
- except Exception as e:
381
- return [{"role": "assistant", "content": f"❌ Error with {self.current_provider}: {str(e)}"}]
 
 
382
 
383
- # Handle different response formats based on provider
384
- if self.current_provider == "claude":
385
- return await self._process_claude_response(response, claude_messages)
386
- elif self.current_provider in ["openai", "llama"]:
387
- return await self._process_openai_response(response, claude_messages)
388
- elif self.current_provider == "mistral":
389
- return await self._process_mistral_response(response, claude_messages)
390
-
391
- return []
392
-
393
- async def _process_claude_response(self, response, claude_messages):
394
- """Process Claude API response."""
395
  result_messages = []
396
 
397
  for content in response.content:
@@ -416,210 +116,158 @@ class MCPClientWrapper:
416
  }
417
  })
418
 
419
- result = await self.session.call_tool(tool_name, tool_args)
420
- result_content = result.content
421
- if isinstance(result_content, list):
422
- result_content = "\n".join(str(item) for item in result_content)
423
-
424
- formatted_response = self._format_weather_response(result_content, tool_name)
425
- result_messages.append(formatted_response)
426
-
427
- claude_messages.append({"role": "user", "content": f"Tool result for {tool_name}: {result_content}"})
428
- next_response = await self._call_llm(claude_messages, self.current_provider, self.current_model)
429
-
430
- if hasattr(next_response, 'content') and next_response.content and next_response.content[0].type == 'text':
431
- result_messages.append({
432
- "role": "assistant",
433
- "content": next_response.content[0].text
434
- })
435
-
436
- return result_messages
437
-
438
- async def _process_openai_response(self, response, claude_messages):
439
- """Process OpenAI/Llama API response."""
440
- result_messages = []
441
-
442
- message = response.choices[0].message
443
-
444
- if message.content:
445
- result_messages.append({
446
- "role": "assistant",
447
- "content": message.content
448
- })
449
-
450
- if message.tool_calls:
451
- for tool_call in message.tool_calls:
452
- tool_name = tool_call.function.name
453
- tool_args = json.loads(tool_call.function.arguments)
454
-
455
  result_messages.append({
456
  "role": "assistant",
457
- "content": f"🔧 I'll use the **{tool_name}** tool to fetch the weather data you requested."
 
 
 
 
 
458
  })
459
 
460
  result = await self.session.call_tool(tool_name, tool_args)
461
- result_content = result.content
462
- if isinstance(result_content, list):
463
- result_content = "\n".join(str(item) for item in result_content)
464
 
465
- formatted_response = self._format_weather_response(result_content, tool_name)
466
- result_messages.append(formatted_response)
467
-
468
- return result_messages
469
-
470
- async def _process_mistral_response(self, response, claude_messages):
471
- """Process Mistral API response."""
472
- result_messages = []
473
-
474
- message = response.choices[0].message
475
-
476
- if message.content:
477
- result_messages.append({
478
- "role": "assistant",
479
- "content": message.content
480
- })
481
-
482
- if hasattr(message, 'tool_calls') and message.tool_calls:
483
- for tool_call in message.tool_calls:
484
- tool_name = tool_call.function.name
485
- tool_args = json.loads(tool_call.function.arguments)
486
 
487
- result_messages.append({
488
- "role": "assistant",
489
- "content": f"🔧 I'll use the **{tool_name}** tool to fetch the weather data you requested."
490
- })
491
-
492
- result = await self.session.call_tool(tool_name, tool_args)
493
  result_content = result.content
494
  if isinstance(result_content, list):
495
  result_content = "\n".join(str(item) for item in result_content)
496
 
497
- formatted_response = self._format_weather_response(result_content, tool_name)
498
- result_messages.append(formatted_response)
499
-
500
- return result_messages
501
-
502
- def _format_weather_response(self, result_content: str, tool_name: str):
503
- """Format weather data response."""
504
- try:
505
- result_json = json.loads(result_content)
506
-
507
- if isinstance(result_json, dict):
508
- if result_json.get("type") == "success":
509
- station_code = result_json.get("station_code", "Unknown")
510
- weather_data = result_json.get("data", {})
511
-
512
- formatted_response = f"## 🌤️ Weather Data for Station: {station_code}\n\n"
513
 
514
- if isinstance(weather_data, dict):
515
- if "reports" in weather_data:
516
- reports = weather_data["reports"]
517
- if isinstance(reports, list) and len(reports) > 0:
518
- formatted_response += f"**Found {len(reports)} weather reports**\n\n"
519
- for i, report in enumerate(reports[:3]):
520
- if isinstance(report, dict):
521
- timestamp = report.get("timestamp", "Unknown time")
522
- temperature = report.get("temperature", "N/A")
523
- humidity = report.get("humidity", "N/A")
524
- formatted_response += f"**Report {i+1}** ({timestamp}):\n"
525
- formatted_response += f"- Temperature: {temperature}\n"
526
- formatted_response += f"- Humidity: {humidity}\n\n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
527
 
528
- if len(reports) > 3:
529
- formatted_response += f"... and {len(reports) - 3} more reports\n\n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
530
 
531
- formatted_response += "**Raw Data:**\n```json\n" + json.dumps(weather_data, indent=2) + "\n```"
532
-
533
- return {
534
  "role": "assistant",
535
- "content": formatted_response,
536
  "metadata": {
537
- "title": f"Weather Data Retrieved",
538
  "status": "done",
539
- "id": f"success_result_{tool_name}"
540
  }
541
- }
542
 
543
- elif result_json.get("type") == "error":
544
- error_msg = result_json.get("message", "Unknown error occurred")
545
- station_code = result_json.get("station_code", "Unknown")
546
-
547
- error_response = f"## ❌ Error Fetching Weather Data\n\n"
548
- error_response += f"**Station:** {station_code}\n"
549
- error_response += f"**Error:** {error_msg}\n\n"
550
-
551
- return {
 
552
  "role": "assistant",
553
- "content": error_response,
554
- "metadata": {
555
- "title": "Weather API Error",
556
- "status": "error",
557
- "id": f"error_result_{tool_name}"
558
- }
559
- }
560
-
561
- except json.JSONDecodeError:
562
- pass
563
-
564
- return {
565
- "role": "assistant",
566
- "content": "```\n" + result_content + "\n```",
567
- "metadata": {
568
- "title": "Raw Tool Response",
569
- "status": "done",
570
- "id": f"raw_result_{tool_name}"
571
- }
572
- }
573
 
574
- # Initialize client
575
  client = MCPClientWrapper()
576
 
577
  def gradio_interface():
578
  with gr.Blocks(title="MCP LEXICON", theme=gr.themes.Soft()) as demo:
579
- gr.Markdown("# 🌤️ LEXICON CHATBOT - Multi-LLM Weather Assistant")
580
  gr.Markdown(
581
- "Ask me about weather data from any weather station! I support multiple AI providers "
582
- "and can process uploaded files for additional context. Choose your preferred AI model below."
 
583
  )
584
 
585
- # LLM Provider Selection
586
- with gr.Row():
587
- with gr.Column(scale=2):
588
- available_providers = client.get_available_providers()
589
- if not available_providers:
590
- gr.Markdown("⚠️ **No LLM providers available**. Please check your API keys in environment variables.")
591
- provider_dropdown = gr.Dropdown(choices=[], value=None, label="🤖 AI Provider", interactive=False)
592
- model_dropdown = gr.Dropdown(choices=[], value=None, label="🎯 Model", interactive=False)
593
- else:
594
- provider_choices = [(info["name"], key) for key, info in available_providers.items()]
595
- default_provider = list(available_providers.keys())[0]
596
-
597
- provider_dropdown = gr.Dropdown(
598
- choices=provider_choices,
599
- value=default_provider,
600
- label="🤖 AI Provider",
601
- interactive=True
602
- )
603
-
604
- model_dropdown = gr.Dropdown(
605
- choices=available_providers[default_provider]["models"],
606
- value=available_providers[default_provider]["models"][0],
607
- label="🎯 Model",
608
- interactive=True
609
- )
610
-
611
- with gr.Column(scale=1):
612
- current_model_display = gr.Textbox(
613
- label="🔄 Current Selection",
614
- value=f"{client.current_provider}: {client.current_model}",
615
- interactive=False
616
- )
617
-
618
- # Connection status
619
  status = gr.Textbox(
620
  label="🔌 Connection Status",
621
  interactive=False,
622
- value="🔄 Ready to connect..."
623
  )
624
 
625
  # Main chat interface
@@ -628,29 +276,18 @@ def gradio_interface():
628
  height=600,
629
  type="messages",
630
  show_copy_button=True,
631
- avatar_images=("👤", "🤖")
632
- )
633
-
634
- # File upload component
635
- file_upload = gr.File(
636
- label="📎 Upload File (optional)",
637
- file_count="single",
638
- file_types=[
639
- ".txt", ".md", ".py", ".js", ".html", ".css", ".json", ".csv",
640
- ".xml", ".yml", ".yaml", ".ini", ".cfg", ".log", ".sql"
641
- ],
642
- height=100
643
  )
644
 
645
  # Input row
646
  with gr.Row(equal_height=True):
647
  msg = gr.Textbox(
648
  label="💬 Ask about weather data",
649
- placeholder="e.g., 'Get weather data for station NYC001' or upload a file with additional context",
650
  scale=4
651
  )
652
  with gr.Column(scale=1):
653
- send_btn = gr.Button("📤 Send", size="lg", variant="primary")
654
  clear_btn = gr.Button("🗑️ Clear Chat", size="lg")
655
  reconnect_btn = gr.Button("🔄 Reconnect", size="lg")
656
 
@@ -661,89 +298,35 @@ def gradio_interface():
661
  "What weather stations are available?",
662
  "Get weather data for station ABC123",
663
  "Show me the latest hourly reports for station NYC001",
664
- "Analyze the uploaded data and compare it with weather patterns",
665
- "Explain the weather trends from the uploaded CSV file"
666
  ],
667
  inputs=msg,
668
  label="💡 Example Queries"
669
  )
670
 
671
- # Provider/Model update functions
672
- def update_models(provider):
673
- available_providers = client.get_available_providers()
674
- if provider in available_providers:
675
- models = available_providers[provider]["models"]
676
- return gr.Dropdown(choices=models, value=models[0])
677
- return gr.Dropdown(choices=[], value=None)
678
-
679
- def update_current_selection(provider, model):
680
- if provider and model:
681
- status_msg = client.update_provider(provider, model)
682
- return f"{provider}: {model}", status_msg
683
- return current_model_display.value, "❌ Please select both provider and model"
684
-
685
- # Auto-connect function
686
  def auto_connect():
687
  return client.connect()
688
 
689
- def clear_all():
690
- return [], gr.File(value=None)
691
-
692
  # Event handlers
693
  demo.load(auto_connect, outputs=status)
694
-
695
- # Provider/Model selection
696
- provider_dropdown.change(update_models, provider_dropdown, model_dropdown)
697
- model_dropdown.change(
698
- update_current_selection,
699
- [provider_dropdown, model_dropdown],
700
- [current_model_display, status]
701
- )
702
-
703
- # Send message on button click or enter key
704
- send_btn.click(
705
- client.process_message,
706
- [msg, chatbot, file_upload],
707
- [chatbot, msg, file_upload]
708
- )
709
- msg.submit(
710
- client.process_message,
711
- [msg, chatbot, file_upload],
712
- [chatbot, msg, file_upload]
713
- )
714
-
715
- # Clear chat and file
716
- clear_btn.click(clear_all, None, [chatbot, file_upload])
717
-
718
- # Reconnect
719
  reconnect_btn.click(auto_connect, outputs=status)
720
 
721
  return demo
722
 
723
  if __name__ == "__main__":
724
- # Check for API keys
725
- api_keys = {
726
- "ANTHROPIC_API_KEY": os.getenv("ANTHROPIC_API_KEY"),
727
- "OPENAI_API_KEY": os.getenv("OPENAI_API_KEY"),
728
- "MISTRAL_API_KEY": os.getenv("MISTRAL_API_KEY"),
729
- "LLAMAINDEX_API_KEY": os.getenv("LLAMAINDEX_API_KEY")
730
- }
731
-
732
- available_keys = [key for key, value in api_keys.items() if value]
733
-
734
- if not available_keys:
735
- print("⚠️ Warning: No API keys found in environment.")
736
- print("Please set at least one of the following in your .env file:")
737
- for key in api_keys.keys():
738
- print(f" {key}=your_api_key_here")
739
- else:
740
- print("🔑 Available API keys:", ", ".join(available_keys))
741
 
742
- print("🚀 Starting Multi-LLM MCP Weather Client...")
743
- print("🔡 Will auto-connect to gradio_mcp_server.py")
744
- print("🌐 Weather API endpoint: https://lexicon.osfarm.org/weather/stations")
745
- print("📎 File upload enabled - supports text, code, and data files")
746
- print("🤖 Multi-LLM support: Claude, OpenAI, Mistral, Llama")
747
 
748
  interface = gradio_interface()
749
  interface.launch(debug=True, share=True)
 
1
  import asyncio
2
  import os
3
  import json
4
+ from typing import List, Dict, Any, Union
5
  from contextlib import AsyncExitStack
 
 
 
 
6
 
7
  import gradio as gr
8
  from gradio.components.chatbot import ChatMessage
9
  from mcp import ClientSession, StdioServerParameters
10
  from mcp.client.stdio import stdio_client
11
  from anthropic import Anthropic
 
 
12
  from dotenv import load_dotenv
13
 
14
  load_dotenv()
15
 
16
+ loop = asyncio.new_event_loop()
17
+ asyncio.set_event_loop(loop)
18
+
19
  class MCPClientWrapper:
20
  def __init__(self):
21
+ self.session = None
22
+ self.exit_stack = None
23
+ self.anthropic = Anthropic()
24
  self.tools = []
25
  self.connected = False
 
 
 
 
 
 
 
 
 
 
 
 
 
26
 
27
+ def connect(self) -> str:
28
+ return loop.run_until_complete(self._connect())
29
+
30
+ async def _connect(self) -> str:
31
+ if self.exit_stack:
32
+ await self.exit_stack.aclose()
 
33
 
34
+ self.exit_stack = AsyncExitStack()
 
 
 
 
35
 
36
+ server_path = "gradio_mcp_server.py"
37
+
38
+ server_params = StdioServerParameters(
39
+ command="python",
40
+ args=[server_path],
41
+ env={"PYTHONIOENCODING": "utf-8", "PYTHONUNBUFFERED": "1"}
42
+ )
43
 
44
  try:
45
+ stdio_transport = await self.exit_stack.enter_async_context(stdio_client(server_params))
46
+ self.stdio, self.write = stdio_transport
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
 
48
+ self.session = await self.exit_stack.enter_async_context(ClientSession(self.stdio, self.write))
49
  await self.session.initialize()
50
 
51
  response = await self.session.list_tools()
 
58
  self.connected = True
59
  tool_names = [tool["name"] for tool in self.tools]
60
  return f"✅ Connected to MCP Weather Server. Available tools: {', '.join(tool_names)}"
 
61
  except Exception as e:
62
  self.connected = False
 
63
  return f"❌ Failed to connect to MCP server: {str(e)}"
64
 
65
+ def process_message(self, message: str, history: List[Union[Dict[str, Any], ChatMessage]]) -> tuple:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66
  if not self.session or not self.connected:
67
  return history + [
68
  {"role": "user", "content": message},
69
  {"role": "assistant", "content": "❌ MCP weather server is not connected. Please check the connection status above."}
70
+ ], gr.Textbox(value="")
 
 
 
 
 
71
 
72
+ new_messages = loop.run_until_complete(self._process_query(message, history))
73
+ return history + [{"role": "user", "content": message}] + new_messages, gr.Textbox(value="")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74
 
75
  async def _process_query(self, message: str, history: List[Union[Dict[str, Any], ChatMessage]]):
 
76
  claude_messages = []
77
  for msg in history:
78
  if isinstance(msg, ChatMessage):
 
85
 
86
  claude_messages.append({"role": "user", "content": message})
87
 
88
+ response = self.anthropic.messages.create(
89
+ model="claude-3-5-sonnet-20241022",
90
+ max_tokens=1500,
91
+ messages=claude_messages,
92
+ tools=self.tools
93
+ )
94
 
 
 
 
 
 
 
 
 
 
 
 
 
95
  result_messages = []
96
 
97
  for content in response.content:
 
116
  }
117
  })
118
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
119
  result_messages.append({
120
  "role": "assistant",
121
+ "content": "```json\n" + json.dumps(tool_args, indent=2, ensure_ascii=True) + "\n```",
122
+ "metadata": {
123
+ "parent_id": f"tool_call_{tool_name}",
124
+ "id": f"params_{tool_name}",
125
+ "title": "Tool Parameters"
126
+ }
127
  })
128
 
129
  result = await self.session.call_tool(tool_name, tool_args)
 
 
 
130
 
131
+ if result_messages and "metadata" in result_messages[-2]:
132
+ result_messages[-2]["metadata"]["status"] = "done"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
133
 
 
 
 
 
 
 
134
  result_content = result.content
135
  if isinstance(result_content, list):
136
  result_content = "\n".join(str(item) for item in result_content)
137
 
138
+ # Parse and format the weather data response
139
+ try:
140
+ result_json = json.loads(result_content)
 
 
 
 
 
 
 
 
 
 
 
 
 
141
 
142
+ if isinstance(result_json, dict):
143
+ if result_json.get("type") == "success":
144
+ # Format successful weather data response
145
+ station_code = result_json.get("station_code", "Unknown")
146
+ weather_data = result_json.get("data", {})
147
+
148
+ # Create a nicely formatted response
149
+ formatted_response = f"## 🌤️ Weather Data for Station: {station_code}\n\n"
150
+
151
+ if isinstance(weather_data, dict):
152
+ # Show key weather information if available
153
+ if "reports" in weather_data:
154
+ reports = weather_data["reports"]
155
+ if isinstance(reports, list) and len(reports) > 0:
156
+ formatted_response += f"**Found {len(reports)} weather reports**\n\n"
157
+ # Show first few reports as example
158
+ for i, report in enumerate(reports[:3]):
159
+ if isinstance(report, dict):
160
+ timestamp = report.get("timestamp", "Unknown time")
161
+ temperature = report.get("temperature", "N/A")
162
+ humidity = report.get("humidity", "N/A")
163
+ formatted_response += f"**Report {i+1}** ({timestamp}):\n"
164
+ formatted_response += f"- Temperature: {temperature}\n"
165
+ formatted_response += f"- Humidity: {humidity}\n\n"
166
+
167
+ if len(reports) > 3:
168
+ formatted_response += f"... and {len(reports) - 3} more reports\n\n"
169
 
170
+ formatted_response += "**Raw Data:**\n```json\n" + json.dumps(weather_data, indent=2) + "\n```"
171
+ else:
172
+ formatted_response += "**Raw Data:**\n```json\n" + json.dumps(weather_data, indent=2) + "\n```"
173
+
174
+ result_messages.append({
175
+ "role": "assistant",
176
+ "content": formatted_response,
177
+ "metadata": {
178
+ "title": f"Weather Data Retrieved",
179
+ "status": "done",
180
+ "id": f"success_result_{tool_name}"
181
+ }
182
+ })
183
+
184
+ elif result_json.get("type") == "error":
185
+ # Format error response
186
+ error_msg = result_json.get("message", "Unknown error occurred")
187
+ station_code = result_json.get("station_code", "Unknown")
188
+
189
+ error_response = f"## ❌ Error Fetching Weather Data\n\n"
190
+ error_response += f"**Station:** {station_code}\n"
191
+ error_response += f"**Error:** {error_msg}\n\n"
192
+ error_response += "**Suggestions:**\n"
193
+ error_response += "- Check if the station code is correct\n"
194
+ error_response += "- Ensure the weather API service is running on localhost:8888\n"
195
+ error_response += "- Try a different station code\n"
196
+
197
+ result_messages.append({
198
+ "role": "assistant",
199
+ "content": error_response,
200
+ "metadata": {
201
+ "title": "Weather API Error",
202
+ "status": "error",
203
+ "id": f"error_result_{tool_name}"
204
+ }
205
+ })
206
+ else:
207
+ # Unknown response format
208
+ result_messages.append({
209
+ "role": "assistant",
210
+ "content": "```json\n" + result_content + "\n```",
211
+ "metadata": {
212
+ "title": "Raw Tool Response",
213
+ "status": "done",
214
+ "id": f"raw_result_{tool_name}"
215
+ }
216
+ })
217
+ else:
218
+ result_messages.append({
219
+ "role": "assistant",
220
+ "content": "```\n" + result_content + "\n```",
221
+ "metadata": {
222
+ "title": "Raw Tool Response",
223
+ "status": "done",
224
+ "id": f"raw_result_{tool_name}"
225
+ }
226
+ })
227
 
228
+ except json.JSONDecodeError:
229
+ result_messages.append({
 
230
  "role": "assistant",
231
+ "content": "```\n" + result_content + "\n```",
232
  "metadata": {
233
+ "title": "Raw Tool Response",
234
  "status": "done",
235
+ "id": f"raw_result_{tool_name}"
236
  }
237
+ })
238
 
239
+ # Let Claude analyze and respond to the weather data
240
+ claude_messages.append({"role": "user", "content": f"Tool result for {tool_name}: {result_content}"})
241
+ next_response = self.anthropic.messages.create(
242
+ model="claude-3-5-sonnet-20241022",
243
+ max_tokens=1500,
244
+ messages=claude_messages,
245
+ )
246
+
247
+ if next_response.content and next_response.content[0].type == 'text':
248
+ result_messages.append({
249
  "role": "assistant",
250
+ "content": next_response.content[0].text
251
+ })
252
+
253
+ return result_messages
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
254
 
 
255
  client = MCPClientWrapper()
256
 
257
  def gradio_interface():
258
  with gr.Blocks(title="MCP LEXICON", theme=gr.themes.Soft()) as demo:
259
+ gr.Markdown("# 🌤️ LEXICON CHATBOT - ask me anything")
260
  gr.Markdown(
261
+ "Ask me about weather data from any weather station! I can fetch hourly reports, "
262
+ "help you explore weather patterns, and answer questions about specific stations. "
263
+ "Just ask naturally - for example: *'Get weather data for station ABC123'* or *'What stations are available?'*"
264
  )
265
 
266
+ # Connection status (auto-updates on load)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
267
  status = gr.Textbox(
268
  label="🔌 Connection Status",
269
  interactive=False,
270
+ value="🔄 Connecting to weather server..."
271
  )
272
 
273
  # Main chat interface
 
276
  height=600,
277
  type="messages",
278
  show_copy_button=True,
279
+ avatar_images=("👤", "🤖"),
280
+ bubble_full_width=False
 
 
 
 
 
 
 
 
 
 
281
  )
282
 
283
  # Input row
284
  with gr.Row(equal_height=True):
285
  msg = gr.Textbox(
286
  label="💬 Ask about weather data",
287
+ placeholder="e.g., 'Get weather data for station NYC001' or 'Show me available weather stations' or 'What's the latest data from station LAX123?'",
288
  scale=4
289
  )
290
  with gr.Column(scale=1):
 
291
  clear_btn = gr.Button("🗑️ Clear Chat", size="lg")
292
  reconnect_btn = gr.Button("🔄 Reconnect", size="lg")
293
 
 
298
  "What weather stations are available?",
299
  "Get weather data for station ABC123",
300
  "Show me the latest hourly reports for station NYC001",
301
+ "Get weather data for station LAX789 from page 2",
302
+ "Fetch weather data for station CHI456 between 2024-01-01 and 2024-01-31"
303
  ],
304
  inputs=msg,
305
  label="💡 Example Queries"
306
  )
307
 
308
+ # Auto-connect when the interface loads
 
 
 
 
 
 
 
 
 
 
 
 
 
 
309
  def auto_connect():
310
  return client.connect()
311
 
 
 
 
312
  # Event handlers
313
  demo.load(auto_connect, outputs=status)
314
+ msg.submit(client.process_message, [msg, chatbot], [chatbot, msg])
315
+ clear_btn.click(lambda: [], None, chatbot)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
316
  reconnect_btn.click(auto_connect, outputs=status)
317
 
318
  return demo
319
 
320
  if __name__ == "__main__":
321
+ if not os.getenv("ANTHROPIC_API_KEY"):
322
+ print("⚠️ Warning: ANTHROPIC_API_KEY not found in environment.")
323
+ print("Please set it in your .env file or environment variables.")
324
+ print("Example .env file content:")
325
+ print("ANTHROPIC_API_KEY=your_api_key_here")
 
 
 
 
 
 
 
 
 
 
 
 
326
 
327
+ print("🚀 Starting MCP Weather Client...")
328
+ print("📡 Will auto-connect to gradio_mcp_server.py")
329
+ print("🌐 Weather API endpoint: http://localhost:8888/weather/stations")
 
 
330
 
331
  interface = gradio_interface()
332
  interface.launch(debug=True, share=True)
gradio_mcp_server.py CHANGED
@@ -76,4 +76,39 @@ async def get_weather_data(station_code: str, page: int = 1, start: str = None,
76
  "type": "error",
77
  "station_code": station_code,
78
  "message": f"Unexpected error fetching weather data: {str(e)}"
79
- })
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76
  "type": "error",
77
  "station_code": station_code,
78
  "message": f"Unexpected error fetching weather data: {str(e)}"
79
+ })
80
+
81
+ @mcp.tool()
82
+ async def list_available_stations() -> str:
83
+ """Get a list of available weather stations from the API.
84
+
85
+ Returns:
86
+ JSON string containing available stations or error information
87
+ """
88
+ base_url = "http://localhost:8888/weather/stations"
89
+
90
+ try:
91
+ response = requests.get(base_url, timeout=30)
92
+ response.raise_for_status()
93
+
94
+ stations_data = response.json()
95
+
96
+ return json.dumps({
97
+ "type": "success",
98
+ "data": stations_data,
99
+ "message": "Successfully retrieved list of available weather stations"
100
+ }, indent=2)
101
+
102
+ except requests.exceptions.ConnectionError:
103
+ return json.dumps({
104
+ "type": "error",
105
+ "message": "Could not connect to weather API at localhost:8888. Please ensure the weather service is running."
106
+ })
107
+ except Exception as e:
108
+ return json.dumps({
109
+ "type": "error",
110
+ "message": f"Error fetching station list: {str(e)}"
111
+ })
112
+
113
+ if __name__ == "__main__":
114
+ mcp.run(transport='stdio')
requirements.txt CHANGED
@@ -1,5 +1,3 @@
1
  gradio[mcp]
2
  anthropic
3
  mcp
4
- openai
5
- mistralai
 
1
  gradio[mcp]
2
  anthropic
3
  mcp