JC321 commited on
Commit
88d78b9
·
verified ·
1 Parent(s): 02f9722

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +71 -9
app.py CHANGED
@@ -37,11 +37,23 @@ session = create_session_with_retry()
37
  # 初始化 Hugging Face Inference Client
38
  # 使用环境变量或者免费的公开模型
39
  HF_TOKEN = os.getenv("HF_TOKEN", None) # 可选:如果需要访问私有模型
40
- try:
41
- client = InferenceClient(token=HF_TOKEN)
42
- except Exception as e:
43
- print(f"Warning: Failed to initialize Hugging Face client: {e}")
44
- client = None
 
 
 
 
 
 
 
 
 
 
 
 
45
 
46
  # 定义可用的 MCP 工具
47
  MCP_TOOLS = [
@@ -666,6 +678,7 @@ Always be helpful, accurate, and cite the data sources when providing financial
666
  try:
667
  # 检查 client 是否可用
668
  if client is None:
 
669
  return fallback_chatbot_response(message)
670
 
671
  response = client.chat_completion(
@@ -676,17 +689,22 @@ Always be helpful, accurate, and cite the data sources when providing financial
676
  temperature=0.7
677
  )
678
 
 
 
679
  choice = response.choices[0]
680
 
681
  # 检查是否有工具调用
682
  if choice.message.tool_calls:
683
  # 有工具调用
 
684
  messages.append(choice.message)
685
 
686
  for tool_call in choice.message.tool_calls:
687
  tool_name = tool_call.function.name
688
  tool_args = json.loads(tool_call.function.arguments)
689
 
 
 
690
  # 记录工具调用
691
  tool_calls_log.append({
692
  "name": tool_name,
@@ -696,6 +714,8 @@ Always be helpful, accurate, and cite the data sources when providing financial
696
  # 调用 MCP 工具
697
  tool_result = call_mcp_tool(tool_name, tool_args)
698
 
 
 
699
  # 将工具结果添加到消息列表
700
  messages.append({
701
  "role": "tool",
@@ -708,11 +728,23 @@ Always be helpful, accurate, and cite the data sources when providing financial
708
  continue
709
  else:
710
  # 没有工具调用,直接返回回答
 
711
  response_text = choice.message.content
712
  break
713
 
 
 
 
 
 
 
 
 
 
714
  except Exception as e:
715
- # 如果 LLM API 失败,退回到简单逻辑
 
 
716
  return fallback_chatbot_response(message)
717
 
718
  # 构建最终响应
@@ -794,6 +826,30 @@ def query_with_status(company, query_type):
794
  with gr.Blocks(title="SEC Financial Data Query Assistant") as demo:
795
  gr.Markdown("# 🤖 SEC Financial Data Query Assistant")
796
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
797
  with gr.Tab("AI Assistant"):
798
  # 使用 Gradio ChatInterface(兼容 4.44.1)
799
  chat = gr.ChatInterface(
@@ -801,10 +857,16 @@ with gr.Blocks(title="SEC Financial Data Query Assistant") as demo:
801
  examples=[
802
  "Show me Apple's latest financial data",
803
  "What's NVIDIA's 3-year financial trend?",
804
- "Get Microsoft's 5-year financial trends",
805
- "How is Tesla performing financially?"
 
 
 
 
806
  ],
807
- cache_examples=False
 
 
808
  )
809
 
810
  with gr.Tab("Direct Query"):
 
37
  # 初始化 Hugging Face Inference Client
38
  # 使用环境变量或者免费的公开模型
39
  HF_TOKEN = os.getenv("HF_TOKEN", None) # 可选:如果需要访问私有模型
40
+
41
+ # 尝试初始化 client
42
+ if HF_TOKEN:
43
+ try:
44
+ client = InferenceClient(token=HF_TOKEN)
45
+ print("✅ Hugging Face client initialized successfully with token")
46
+ except Exception as e:
47
+ print(f"⚠️ Warning: Failed to initialize Hugging Face client with token: {e}")
48
+ client = None
49
+ else:
50
+ # 没有 token,使用公开访问(有速率限制)
51
+ try:
52
+ client = InferenceClient()
53
+ print("⚠️ Using Hugging Face client without token (rate limited)")
54
+ except Exception as e:
55
+ print(f"❌ Warning: Failed to initialize Hugging Face client: {e}")
56
+ client = None
57
 
58
  # 定义可用的 MCP 工具
59
  MCP_TOOLS = [
 
678
  try:
679
  # 检查 client 是否可用
680
  if client is None:
681
+ print("⚠️ LLM client not available, using fallback")
682
  return fallback_chatbot_response(message)
683
 
684
  response = client.chat_completion(
 
689
  temperature=0.7
690
  )
691
 
692
+ print(f"✅ LLM response received (iteration {iteration})")
693
+
694
  choice = response.choices[0]
695
 
696
  # 检查是否有工具调用
697
  if choice.message.tool_calls:
698
  # 有工具调用
699
+ print(f"🔧 Tool calls detected: {len(choice.message.tool_calls)}")
700
  messages.append(choice.message)
701
 
702
  for tool_call in choice.message.tool_calls:
703
  tool_name = tool_call.function.name
704
  tool_args = json.loads(tool_call.function.arguments)
705
 
706
+ print(f" → Calling tool: {tool_name} with args: {tool_args}")
707
+
708
  # 记录工具调用
709
  tool_calls_log.append({
710
  "name": tool_name,
 
714
  # 调用 MCP 工具
715
  tool_result = call_mcp_tool(tool_name, tool_args)
716
 
717
+ print(f" ← Tool result received")
718
+
719
  # 将工具结果添加到消息列表
720
  messages.append({
721
  "role": "tool",
 
728
  continue
729
  else:
730
  # 没有工具调用,直接返回回答
731
+ print(f"💬 Final response generated")
732
  response_text = choice.message.content
733
  break
734
 
735
+ except ValueError as ve:
736
+ # API Key 相关错误
737
+ error_msg = str(ve)
738
+ if "api_key" in error_msg.lower() or "token" in error_msg.lower():
739
+ print(f"❌ LLM API authentication error: {ve}")
740
+ print("ℹ️ Falling back to simple response logic")
741
+ return fallback_chatbot_response(message)
742
+ else:
743
+ raise
744
  except Exception as e:
745
+ # 其他 LLM API 错误
746
+ print(f"❌ LLM API error: {e}")
747
+ print("ℹ️ Falling back to simple response logic")
748
  return fallback_chatbot_response(message)
749
 
750
  # 构建最终响应
 
826
  with gr.Blocks(title="SEC Financial Data Query Assistant") as demo:
827
  gr.Markdown("# 🤖 SEC Financial Data Query Assistant")
828
 
829
+ # 显示 HF_TOKEN 状态提示
830
+ if not HF_TOKEN:
831
+ gr.Markdown("""
832
+ <div style='padding: 15px; background: #fff3cd; border-left: 4px solid #ffc107; margin: 10px 0; border-radius: 4px;'>
833
+ <strong>⚠️ Note:</strong> AI Assistant is running in <strong>rate-limited mode</strong> without HF_TOKEN.
834
+ For better performance, set the <code>HF_TOKEN</code> environment variable in your Space settings.
835
+ <br><br>
836
+ <strong>How to get your token:</strong>
837
+ <ol>
838
+ <li>Go to <a href="https://huggingface.co/settings/tokens" target="_blank">Hugging Face Tokens</a></li>
839
+ <li>Create a new token (Read access is sufficient)</li>
840
+ <li>Add it as a Secret in your Space settings: Settings → Repository secrets → New secret</li>
841
+ <li>Name: <code>HF_TOKEN</code>, Value: <code>your_token_here</code></li>
842
+ <li>Restart the Space</li>
843
+ </ol>
844
+ </div>
845
+ """)
846
+ else:
847
+ gr.Markdown("""
848
+ <div style='padding: 15px; background: #d4edda; border-left: 4px solid #28a745; margin: 10px 0; border-radius: 4px;'>
849
+ <strong>✅ AI Mode Active:</strong> Full LLM capabilities enabled with Qwen/Qwen2.5-72B-Instruct model.
850
+ </div>
851
+ """)
852
+
853
  with gr.Tab("AI Assistant"):
854
  # 使用 Gradio ChatInterface(兼容 4.44.1)
855
  chat = gr.ChatInterface(
 
857
  examples=[
858
  "Show me Apple's latest financial data",
859
  "What's NVIDIA's 3-year financial trend?",
860
+ "Compare Tesla's revenue with its expenses",
861
+ "How is Microsoft performing financially?",
862
+ "What can you tell me about Amazon's recent earnings?",
863
+ "Give me Alibaba's 5-year financial overview",
864
+ "Hello! What can you help me with?",
865
+ "What's the weather like today?", # 测试非财务问题
866
  ],
867
+ cache_examples=False,
868
+ title="🤖 Intelligent Financial Assistant",
869
+ description="Ask me anything! I can help with financial data queries or general conversations. When you ask about companies, I'll automatically fetch real-time SEC EDGAR data."
870
  )
871
 
872
  with gr.Tab("Direct Query"):