JC321 commited on
Commit
2cc2c71
·
verified ·
1 Parent(s): c9bd1a2

Upload 3 files

Browse files
Files changed (2) hide show
  1. README.md +2 -2
  2. app.py +16 -12
README.md CHANGED
@@ -16,7 +16,7 @@ A Gradio-based AI-powered web application for querying SEC financial data throug
16
 
17
  ## ✨ Features
18
 
19
- - 🤖 **Intelligent AI Assistant**: Chat naturally with **Qwen/Qwen2.5-VL-72B-Instruct:nebius** model
20
  - 🛠️ **Automatic Tool Calling**: AI automatically selects and calls MCP tools based on your questions
21
  - 🔍 Search companies by name or ticker symbol
22
  - 📈 View latest financial data
@@ -96,7 +96,7 @@ SEC EDGAR data via MCP Server: https://huggingface.co/spaces/JC321/EasyReportDat
96
 
97
  - **Frontend**: Gradio 6.0.1
98
  - **Backend**: Python with requests
99
- - **AI Model**: Qwen/Qwen2.5-VL-72B-Instruct:nebius (via Hugging Face Inference API)
100
  - **MCP Protocol**: FastMCP with HTTP transport (stateless)
101
  - **Data Source**: SEC EDGAR via MCP Server
102
 
 
16
 
17
  ## ✨ Features
18
 
19
+ - 🤖 **Intelligent AI Assistant**: Chat naturally with **Qwen/Qwen2.5-72B-Instruct** model (supports tool calling)
20
  - 🛠️ **Automatic Tool Calling**: AI automatically selects and calls MCP tools based on your questions
21
  - 🔍 Search companies by name or ticker symbol
22
  - 📈 View latest financial data
 
96
 
97
  - **Frontend**: Gradio 6.0.1
98
  - **Backend**: Python with requests
99
+ - **AI Model**: Qwen/Qwen2.5-72B-Instruct (via Hugging Face Inference API)
100
  - **MCP Protocol**: FastMCP with HTTP transport (stateless)
101
  - **Data Source**: SEC EDGAR via MCP Server
102
 
app.py CHANGED
@@ -35,19 +35,22 @@ def create_session_with_retry():
35
  session = create_session_with_retry()
36
 
37
  # 初始化 Hugging Face Inference Client
38
- # 使用 Qwen/Qwen2.5-VL-72B-Instruct:nebius 模型(更强大的视觉语言模型)
39
  try:
40
  # Hugging Face Space 会自动提供 HF_TOKEN 环境变量
41
- # 无需手动配置 Secret!
42
- hf_token = os.environ.get("HF_TOKEN")
 
43
  if hf_token:
44
  client = InferenceClient(api_key=hf_token)
45
- print(f"✅ Hugging Face client initialized with Qwen/Qwen2.5-VL-72B-Instruct:nebius model")
46
- print(f" Using auto-generated HF Space token")
47
  else:
48
- # 如果没有 token,使用无认证模式(免费层,有限制)
49
  client = InferenceClient()
50
  print("⚠️ Using Hugging Face Inference API without authentication (rate limited)")
 
 
51
  except Exception as e:
52
  print(f"❌ Warning: Failed to initialize Hugging Face client: {e}")
53
  client = None
@@ -679,11 +682,11 @@ You can handle any financial question - from simple data queries to complex mult
679
 
680
  response = client.chat_completion(
681
  messages=messages,
682
- model="Qwen/Qwen2.5-VL-72B-Instruct:nebius", # 使用更强大的视觉语言模型
683
  tools=MCP_TOOLS,
684
  max_tokens=3000,
685
  temperature=0.7,
686
- tool_choice="auto" # 让模型自动决定是否使用工具
687
  )
688
 
689
  print(f"✅ LLM response received (iteration {iteration})")
@@ -745,7 +748,7 @@ You can handle any financial question - from simple data queries to complex mult
745
  try:
746
  response = client.chat_completion(
747
  messages=messages,
748
- model="Qwen/Qwen2.5-VL-72B-Instruct:nebius",
749
  max_tokens=3000,
750
  temperature=0.7
751
  )
@@ -760,7 +763,7 @@ You can handle any financial question - from simple data queries to complex mult
760
  final_response = ""
761
 
762
  # 显示调试信息:使用的模型
763
- final_response += f"<div style='padding: 8px; background: #e3f2fd; border-left: 3px solid #2196f3; margin-bottom: 10px; font-size: 0.9em;'>🤖 <strong>Model:</strong> Qwen/Qwen2.5-VL-72B-Instruct:nebius | <strong>Iterations:</strong> {iteration}</div>\n\n"
764
 
765
  # 如果有工具调用,显示调用日志
766
  if tool_calls_log:
@@ -844,7 +847,7 @@ with gr.Blocks(title="SEC Financial Data Query Assistant") as demo:
844
  # 显示 AI 功能说明
845
  gr.Markdown("""
846
  <div style='padding: 15px; background: #d4edda; border-left: 4px solid #28a745; margin: 10px 0; border-radius: 4px;'>
847
- <strong>✅ AI Assistant Enabled:</strong> Powered by Qwen/Qwen2.5-VL-72B-Instruct:nebius model with automatic MCP tool calling.
848
  <br>
849
  <strong>💬 Ask me anything:</strong> I can understand natural language and automatically fetch financial data when needed!
850
  </div>
@@ -924,5 +927,6 @@ if __name__ == "__main__":
924
  demo.launch(
925
  server_name="0.0.0.0",
926
  server_port=7860,
927
- show_error=True
 
928
  )
 
35
  session = create_session_with_retry()
36
 
37
  # 初始化 Hugging Face Inference Client
38
+ # 使用 Qwen/Qwen2.5-72B-Instruct 模型(支持 tool calling)
39
  try:
40
  # Hugging Face Space 会自动提供 HF_TOKEN 环境变量
41
+ # 也支持手动在 Settings > Secrets 中配置 HF_TOKEN
42
+ hf_token = os.environ.get("HF_TOKEN") or os.environ.get("HUGGING_FACE_HUB_TOKEN")
43
+
44
  if hf_token:
45
  client = InferenceClient(api_key=hf_token)
46
+ print(f"✅ Hugging Face client initialized with Qwen/Qwen2.5-72B-Instruct model")
47
+ print(f" Using authenticated access (HF_TOKEN detected)")
48
  else:
49
+ # 如果没有 token,使用无认证模式(免费层,有速率限制)
50
  client = InferenceClient()
51
  print("⚠️ Using Hugging Face Inference API without authentication (rate limited)")
52
+ print("💡 To remove rate limits, add HF_TOKEN in Space Settings > Repository secrets")
53
+ print(" Get your token from: https://huggingface.co/settings/tokens")
54
  except Exception as e:
55
  print(f"❌ Warning: Failed to initialize Hugging Face client: {e}")
56
  client = None
 
682
 
683
  response = client.chat_completion(
684
  messages=messages,
685
+ model="Qwen/Qwen2.5-72B-Instruct", # 支持 tool calling
686
  tools=MCP_TOOLS,
687
  max_tokens=3000,
688
  temperature=0.7,
689
+ tool_choice="auto"
690
  )
691
 
692
  print(f"✅ LLM response received (iteration {iteration})")
 
748
  try:
749
  response = client.chat_completion(
750
  messages=messages,
751
+ model="Qwen/Qwen2.5-72B-Instruct",
752
  max_tokens=3000,
753
  temperature=0.7
754
  )
 
763
  final_response = ""
764
 
765
  # 显示调试信息:使用的模型
766
+ final_response += f"<div style='padding: 8px; background: #e3f2fd; border-left: 3px solid #2196f3; margin-bottom: 10px; font-size: 0.9em;'>🤖 <strong>Model:</strong> Qwen/Qwen2.5-72B-Instruct | <strong>Iterations:</strong> {iteration}</div>\n\n"
767
 
768
  # 如果有工具调用,显示调用日志
769
  if tool_calls_log:
 
847
  # 显示 AI 功能说明
848
  gr.Markdown("""
849
  <div style='padding: 15px; background: #d4edda; border-left: 4px solid #28a745; margin: 10px 0; border-radius: 4px;'>
850
+ <strong>✅ AI Assistant Enabled:</strong> Powered by Qwen/Qwen2.5-72B-Instruct model with automatic MCP tool calling.
851
  <br>
852
  <strong>💬 Ask me anything:</strong> I can understand natural language and automatically fetch financial data when needed!
853
  </div>
 
927
  demo.launch(
928
  server_name="0.0.0.0",
929
  server_port=7860,
930
+ show_error=True,
931
+ ssr_mode=False # 禁用 SSR 模式,避免 asyncio 文件描述符错误
932
  )