JC321 commited on
Commit
9159817
·
verified ·
1 Parent(s): 9fc18d6

Upload 4 files

Browse files
Files changed (1) hide show
  1. app.py +222 -53
app.py CHANGED
@@ -8,11 +8,13 @@ from huggingface_hub import InferenceClient
8
  MCP_SERVICES = {
9
  "financial": {
10
  "name": "SEC Financial Reports",
11
- "url": "https://jc321-easyreportdatemcp.hf.space/mcp"
 
12
  },
13
  "market": {
14
  "name": "Market & Stock Data (Finnhub)",
15
- "url": "https://jc321-marketandstockmcp.hf.space/mcp"
 
16
  }
17
  }
18
 
@@ -110,14 +112,14 @@ MCP_TOOLS = [
110
  }
111
  ]
112
 
113
- # 工具路由工具名 -> 服务 URL
114
  TOOL_ROUTING = {
115
- "advanced_search_company": MCP_SERVICES["financial"]["url"],
116
- "get_latest_financial_data": MCP_SERVICES["financial"]["url"],
117
- "extract_financial_metrics": MCP_SERVICES["financial"]["url"],
118
- "get_quote": MCP_SERVICES["market"]["url"],
119
- "get_market_news": MCP_SERVICES["market"]["url"],
120
- "get_company_news": MCP_SERVICES["market"]["url"],
121
  }
122
 
123
  # ========== 初始化 LLM 客户端 ==========
@@ -145,45 +147,179 @@ You have access to TWO data sources:
145
 
146
  Automatically use the right tools and provide clear, data-driven insights."""
147
 
148
- # ========== 核心函数调用 MCP 工具 ==========
149
  def call_mcp_tool(tool_name, arguments):
150
- """调用 MCP 工具(统一使用标准 MCP JSON-RPC 协议"""
151
- mcp_url = TOOL_ROUTING.get(tool_name)
152
- if not mcp_url:
153
  return {"error": f"Unknown tool: {tool_name}"}
154
 
 
 
 
 
 
 
155
  try:
156
- response = requests.post(
157
- mcp_url,
158
- json={
159
- "jsonrpc": "2.0",
160
- "method": "tools/call",
161
- "params": {"name": tool_name, "arguments": arguments},
162
- "id": 1
163
- },
164
- headers={"Content-Type": "application/json"},
165
- timeout=60
166
- )
167
-
168
- if response.status_code == 200:
169
- data = response.json()
170
- # 解包 JSON-RPC 响应,只返回 result 字段给模型
171
- if isinstance(data, dict) and "result" in data:
172
- return data["result"]
173
- return data
174
  else:
175
- return {"error": f"HTTP {response.status_code}", "detail": response.text[:200]}
 
176
  except Exception as e:
177
- return {"error": str(e)}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
178
 
179
- # ========== 核心函数AI 助手 ==========
180
  def chatbot_response(message, history):
181
- """AI 助手主函数"""
182
  try:
183
  # 构建消息历史
184
  messages = [{"role": "system", "content": SYSTEM_PROMPT}]
185
 
186
- # 添加对话历史最近5轮
187
  if history:
188
  for item in history[-5:]:
189
  if isinstance(item, dict):
@@ -195,19 +331,22 @@ def chatbot_response(message, history):
195
 
196
  messages.append({"role": "user", "content": message})
197
 
198
- # LLM 调用循环(支持多轮工具调用
199
  tool_calls_log = []
 
200
  max_iterations = 5
201
 
 
202
  for iteration in range(max_iterations):
203
- # 调用 LLM
204
  response = client.chat_completion(
205
  messages=messages,
206
  model="Qwen/Qwen2.5-72B-Instruct:novita",
207
  tools=MCP_TOOLS,
208
  max_tokens=3000,
209
  temperature=0.7,
210
- tool_choice="auto"
 
211
  )
212
 
213
  choice = response.choices[0]
@@ -236,29 +375,59 @@ def chatbot_response(message, history):
236
 
237
  continue # 继续下一轮
238
  else:
239
- # 无工具调用,返回最终答案
240
- response_text = choice.message.content
241
  break
242
 
243
- # 构建最终响应
244
- final_response = ""
 
 
245
 
246
- # 显示模型信息
247
- final_response += f"<div style='padding: 8px; background: #e3f2fd; border-left: 3px solid #2196f3; margin-bottom: 10px; font-size: 0.9em;'>🤖 <strong>Model:</strong> Qwen/Qwen2.5-72B-Instruct:novita</div>\n\n"
248
-
249
- # 显示工具调用日志
250
  if tool_calls_log:
251
- final_response += "**🛠️ MCP Tools Used:**\n\n"
 
 
252
  for i, tool_call in enumerate(tool_calls_log, 1):
253
- final_response += f"{i}. `{tool_call['name']}` - {json.dumps(tool_call['arguments'])}\n"
254
- final_response += "\n---\n\n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
255
 
256
- final_response += response_text
 
 
 
 
 
 
 
 
 
 
257
 
258
- return final_response
 
 
 
 
259
 
260
  except Exception as e:
261
- return f"❌ Error: {str(e)}"
262
 
263
  # ========== Gradio 界面 ==========
264
  with gr.Blocks(title="Financial & Market AI Assistant") as demo:
 
8
  MCP_SERVICES = {
9
  "financial": {
10
  "name": "SEC Financial Reports",
11
+ "url": "https://jc321-easyreportdatemcp.hf.space/mcp",
12
+ "type": "fastmcp" # 标准 FastMCP (HTTP JSON-RPC)
13
  },
14
  "market": {
15
  "name": "Market & Stock Data (Finnhub)",
16
+ "url": "https://jc321-marketandstockmcp.hf.space",
17
+ "type": "gradio" # Gradio API
18
  }
19
  }
20
 
 
112
  }
113
  ]
114
 
115
+ # 工具路由:工具名 -> 服务配置
116
  TOOL_ROUTING = {
117
+ "advanced_search_company": MCP_SERVICES["financial"],
118
+ "get_latest_financial_data": MCP_SERVICES["financial"],
119
+ "extract_financial_metrics": MCP_SERVICES["financial"],
120
+ "get_quote": MCP_SERVICES["market"],
121
+ "get_market_news": MCP_SERVICES["market"],
122
+ "get_company_news": MCP_SERVICES["market"],
123
  }
124
 
125
  # ========== 初始化 LLM 客户端 ==========
 
147
 
148
  Automatically use the right tools and provide clear, data-driven insights."""
149
 
150
+ # ========== 核心函数:调用 MCP 工具 ==========
151
  def call_mcp_tool(tool_name, arguments):
152
+ """调用 MCP 工具(支持 FastMCP Gradio 两种协议)"""
153
+ service_config = TOOL_ROUTING.get(tool_name)
154
+ if not service_config:
155
  return {"error": f"Unknown tool: {tool_name}"}
156
 
157
+ service_type = service_config["type"]
158
+ service_url = service_config["url"]
159
+
160
+ print(f"\n[DEBUG] Calling tool: {tool_name}")
161
+ print(f"[DEBUG] Service type: {service_type}, URL: {service_url}")
162
+
163
  try:
164
+ if service_type == "fastmcp":
165
+ # FastMCP: 标准 MCP JSON-RPC 协议
166
+ return _call_fastmcp(service_url, tool_name, arguments)
167
+ elif service_type == "gradio":
168
+ # Gradio: Gradio API 协议
169
+ return _call_gradio_api(service_url, tool_name, arguments)
 
 
 
 
 
 
 
 
 
 
 
 
170
  else:
171
+ return {"error": f"Unknown service type: {service_type}"}
172
+
173
  except Exception as e:
174
+ error_msg = f"Exception: {str(e)}"
175
+ print(f"[DEBUG] {error_msg}")
176
+ return {"error": error_msg}
177
+
178
+
179
+ def _call_fastmcp(service_url, tool_name, arguments):
180
+ """\u8c03\u7528 FastMCP \u670d\u52a1 (\u6807\u51c6 MCP JSON-RPC \u534f\u8bae)"""
181
+ response = requests.post(
182
+ service_url,
183
+ json={
184
+ "jsonrpc": "2.0",
185
+ "method": "tools/call",
186
+ "params": {"name": tool_name, "arguments": arguments},
187
+ "id": 1
188
+ },
189
+ headers={"Content-Type": "application/json"},
190
+ timeout=60
191
+ )
192
+
193
+ if response.status_code == 200:
194
+ data = response.json()
195
+ print(f"[DEBUG] FastMCP raw response: {json.dumps(data, ensure_ascii=False)[:500]}")
196
+
197
+ # 解包 JSON-RPC 响应
198
+ if isinstance(data, dict) and "result" in data:
199
+ result = data["result"]
200
+
201
+ # MCP 协议格式: {"content": [{"type": "text", "text": "..."}]}
202
+ if isinstance(result, dict) and "content" in result:
203
+ content = result["content"]
204
+
205
+ # 提取第一个 content item 的 text
206
+ if isinstance(content, list) and len(content) > 0:
207
+ first_item = content[0]
208
+ if isinstance(first_item, dict) and "text" in first_item:
209
+ text_data = first_item["text"]
210
+
211
+ # text 可能是 JSON 字符串,尝试解析
212
+ try:
213
+ parsed_data = json.loads(text_data)
214
+ print(f"[DEBUG] Parsed data: {json.dumps(parsed_data, ensure_ascii=False)[:300]}")
215
+ return parsed_data
216
+ except (json.JSONDecodeError, TypeError):
217
+ # 如果不是 JSON,直接返回文本
218
+ print(f"[DEBUG] Returning text as-is")
219
+ return {"text": text_data}
220
+
221
+ # 如果不是 content 格式,直接返回 result
222
+ print(f"[DEBUG] Returning result directly")
223
+ return result
224
+
225
+ # 如果没有 result 字段,返回整个响应
226
+ print(f"[DEBUG] No result field, returning full response")
227
+ return data
228
+ else:
229
+ error_msg = f"HTTP {response.status_code}: {response.text[:200]}"
230
+ print(f"[DEBUG] {error_msg}")
231
+ return {"error": error_msg}
232
+
233
+
234
+ def _call_gradio_api(service_url, tool_name, arguments):
235
+ """\u8c03\u7528 Gradio API (Gradio \u5185\u7f6e MCP \u670d\u52a1)"""
236
+ # Gradio API 工具名映射
237
+ gradio_tool_map = {
238
+ "get_quote": "test_quote_tool",
239
+ "get_market_news": "test_market_news_tool",
240
+ "get_company_news": "test_company_news_tool"
241
+ }
242
+
243
+ gradio_fn_name = gradio_tool_map.get(tool_name)
244
+ if not gradio_fn_name:
245
+ return {"error": f"No Gradio mapping for tool: {tool_name}"}
246
+
247
+ # 步骤1: 提交调用请求
248
+ call_url = f"{service_url}/call/{gradio_fn_name}"
249
+
250
+ # 构造 Gradio API 参数格式
251
+ if tool_name == "get_quote":
252
+ data_params = [arguments.get("symbol", "")]
253
+ elif tool_name == "get_market_news":
254
+ data_params = [arguments.get("category", "general")]
255
+ elif tool_name == "get_company_news":
256
+ data_params = [
257
+ arguments.get("symbol", ""),
258
+ arguments.get("from_date", ""),
259
+ arguments.get("to_date", "")
260
+ ]
261
+ else:
262
+ data_params = []
263
+
264
+ print(f"[DEBUG] Gradio call URL: {call_url}")
265
+ print(f"[DEBUG] Gradio data params: {data_params}")
266
+
267
+ # 提交请求
268
+ response = requests.post(
269
+ call_url,
270
+ json={"data": data_params},
271
+ headers={"Content-Type": "application/json"},
272
+ timeout=10
273
+ )
274
+
275
+ if response.status_code != 200:
276
+ return {"error": f"Gradio call failed: HTTP {response.status_code}"}
277
+
278
+ call_data = response.json()
279
+ event_id = call_data.get("event_id")
280
+
281
+ if not event_id:
282
+ return {"error": "No event_id returned from Gradio"}
283
+
284
+ print(f"[DEBUG] Got event_id: {event_id}")
285
+
286
+ # 步骤2: 轮询获取结果 (SSE 流)
287
+ result_url = f"{call_url}/{event_id}"
288
+ result_response = requests.get(result_url, stream=True, timeout=30)
289
+
290
+ if result_response.status_code != 200:
291
+ return {"error": f"Failed to get result: HTTP {result_response.status_code}"}
292
+
293
+ # 解析 SSE 流
294
+ result_text = ""
295
+ for line in result_response.iter_lines():
296
+ if line:
297
+ line_str = line.decode('utf-8')
298
+ if line_str.startswith('data: '):
299
+ data_part = line_str[6:] # 移除 'data: ' 前缀
300
+ try:
301
+ result_data = json.loads(data_part)
302
+ if isinstance(result_data, list) and len(result_data) > 0:
303
+ result_text = result_data[0]
304
+ print(f"[DEBUG] Gradio result: {result_text[:200]}")
305
+ break
306
+ except json.JSONDecodeError:
307
+ continue
308
+
309
+ if not result_text:
310
+ return {"error": "No result received from Gradio"}
311
+
312
+ # 返回统一格式
313
+ return {"text": result_text, "_source": "gradio_api"}
314
 
315
+ # ========== 核心函数:AI 助手 ==========
316
  def chatbot_response(message, history):
317
+ """AI 助手主函数(流式输出)"""
318
  try:
319
  # 构建消息历史
320
  messages = [{"role": "system", "content": SYSTEM_PROMPT}]
321
 
322
+ # 添加对话历史(最近5轮)
323
  if history:
324
  for item in history[-5:]:
325
  if isinstance(item, dict):
 
331
 
332
  messages.append({"role": "user", "content": message})
333
 
334
+ # 工具调用日志和响应前缀
335
  tool_calls_log = []
336
+ response_prefix = ""
337
  max_iterations = 5
338
 
339
+ # LLM 调用循环(支持多轮工具调用)
340
  for iteration in range(max_iterations):
341
+ # 调用 LLM(非流式,用于工具调用判断)
342
  response = client.chat_completion(
343
  messages=messages,
344
  model="Qwen/Qwen2.5-72B-Instruct:novita",
345
  tools=MCP_TOOLS,
346
  max_tokens=3000,
347
  temperature=0.7,
348
+ tool_choice="auto",
349
+ stream=False
350
  )
351
 
352
  choice = response.choices[0]
 
375
 
376
  continue # 继续下一轮
377
  else:
378
+ # 无工具调用,准备流式输出最终答案
 
379
  break
380
 
381
+ # 构建响应前缀(模型信息+工具调用)
382
+ response_prefix += "<div style='padding: 10px; background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); border-radius: 8px; margin-bottom: 12px; box-shadow: 0 2px 8px rgba(0,0,0,0.1);'>\n"
383
+ response_prefix += "<div style='color: white; font-size: 0.95em; font-weight: 500;'>🤖 <strong>Model:</strong> Qwen/Qwen2.5-72B-Instruct:novita</div>\n"
384
+ response_prefix += "</div>\n\n"
385
 
386
+ # 显示工具调用日志(精致卡片样式)
 
 
 
387
  if tool_calls_log:
388
+ response_prefix += "<div style='background: #f8f9fa; border-radius: 8px; padding: 12px; margin-bottom: 16px; border: 1px solid #e9ecef;'>\n"
389
+ response_prefix += "<div style='font-weight: 600; color: #495057; margin-bottom: 10px; font-size: 1em;'>🛠️ MCP Tools Used</div>\n"
390
+
391
  for i, tool_call in enumerate(tool_calls_log, 1):
392
+ # 工具图标映射
393
+ tool_icons = {
394
+ "advanced_search_company": "🔍",
395
+ "get_latest_financial_data": "📊",
396
+ "extract_financial_metrics": "📈",
397
+ "get_quote": "💹",
398
+ "get_market_news": "📰",
399
+ "get_company_news": "📢"
400
+ }
401
+ icon = tool_icons.get(tool_call['name'], "⚙️")
402
+
403
+ response_prefix += f"<div style='background: white; padding: 8px 12px; margin: 6px 0; border-radius: 6px; border-left: 3px solid #28a745; font-size: 0.9em;'>\n"
404
+ response_prefix += f"<span style='color: #28a745; font-weight: 600;'>{icon} {tool_call['name']}</span>\n"
405
+ response_prefix += f"<div style='color: #6c757d; margin-top: 4px; font-family: monospace; font-size: 0.85em;'>{json.dumps(tool_call['arguments'], ensure_ascii=False)}</div>\n"
406
+ response_prefix += "</div>\n"
407
+
408
+ response_prefix += "</div>\n\n"
409
+ response_prefix += "<div style='border-top: 2px solid #dee2e6; margin: 16px 0;'></div>\n\n"
410
 
411
+ # 流式输出最终答案
412
+ yield response_prefix
413
+
414
+ # 流式调用 LLM
415
+ stream = client.chat_completion(
416
+ messages=messages,
417
+ model="Qwen/Qwen2.5-72B-Instruct:novita",
418
+ max_tokens=3000,
419
+ temperature=0.7,
420
+ stream=True
421
+ )
422
 
423
+ accumulated_text = ""
424
+ for chunk in stream:
425
+ if chunk.choices[0].delta.content:
426
+ accumulated_text += chunk.choices[0].delta.content
427
+ yield response_prefix + accumulated_text
428
 
429
  except Exception as e:
430
+ yield f"❌ Error: {str(e)}"
431
 
432
  # ========== Gradio 界面 ==========
433
  with gr.Blocks(title="Financial & Market AI Assistant") as demo: