GilbertoEwaldFilho commited on
Commit
8d7ad03
·
verified ·
1 Parent(s): 53a8408

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +42 -34
app.py CHANGED
@@ -1,11 +1,12 @@
1
  import os
2
- import re
3
  import gradio as gr
4
  import requests
5
  import pandas as pd
 
6
 
7
  from huggingface_hub import InferenceClient
8
- from smolagents import CodeAgent, DuckDuckGoSearchTool, HfApiModel
 
9
 
10
  # --- Constants ---
11
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
@@ -71,56 +72,63 @@ SYSTEM_PROMPT = (
71
 
72
  class BasicAgent:
73
  """
74
- Agente usando smolagents + HfApiModel + DuckDuckGoSearchTool,
75
- ajustado para o cenário do GAIA (EXACT MATCH).
 
 
76
  """
77
 
78
  def __init__(self):
79
- print("Initializing GAIA agent with HfApiModel + DuckDuckGoSearchTool...")
80
 
81
- # ferramenta de busca da própria smolagents (usa ddgs por baixo)
82
- self.search_tool = DuckDuckGoSearchTool()
 
 
 
83
 
84
- # Modelo chamado via API do Hugging Face
85
- # Você pode trocar o model_id se o curso recomendar outro
86
- self.model = HfApiModel(
87
- model_id="Qwen/Qwen2.5-Coder-32B-Instruct",
88
- max_new_tokens=256,
89
- temperature=0.2,
90
  )
91
 
92
- # CodeAgent: agente que consegue chamar tools e escrever código
93
- self.agent = CodeAgent(
94
- model=self.model,
95
- tools=[self.search_tool],
96
- add_base_tools=False, # deixamos a search tool pra ficar simples
 
 
 
 
97
  )
98
 
99
  def __call__(self, question: str) -> str:
100
- print(f"Processing question: {question[:80]}...")
101
 
102
- # Instruções de EXACT MATCH embutidas no prompt
103
  prompt = (
104
- "You are solving GAIA Level 1 questions.\n"
105
- "You have access to a web search tool and MUST use it whenever needed "
106
- "to obtain accurate, up-to-date information.\n"
107
- "RULES:\n"
108
- " - Return ONLY the final answer as a short string.\n"
109
- " - NO explanations.\n"
110
- " - NO 'Final answer', 'Answer:', etc.\n"
111
- " - If the answer is a number, output just the number.\n"
112
- "Question:\n"
113
- f"{question}\n"
114
  )
115
 
116
  try:
117
- raw = self.agent.run(prompt)
 
 
 
 
 
 
 
 
118
  final = clean_answer(raw)
119
- print(f"Raw answer: {raw!r}")
120
- print(f"Final cleaned answer: {final!r}")
121
  return final
122
  except Exception as e:
123
- print(f"Error while running agent: {e}")
124
  return ""
125
 
126
  # =========================================================
 
1
  import os
 
2
  import gradio as gr
3
  import requests
4
  import pandas as pd
5
+ import re
6
 
7
  from huggingface_hub import InferenceClient
8
+
9
+ from smolagents import CodeAgent, DuckDuckGoSearchTool, InferenceClientModel
10
 
11
  # --- Constants ---
12
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
 
72
 
73
  class BasicAgent:
74
  """
75
+ Agente simples que usa diretamente o InferenceClient do Hugging Face
76
+ para responder as questões do GAIA.
77
+
78
+ Não usa ferramentas externas (search), mas é MUITO mais estável no Space.
79
  """
80
 
81
  def __init__(self):
82
+ print("Initializing Simple GAIA Agent with InferenceClient...")
83
 
84
+ hf_token = os.getenv("HF_TOKEN")
85
+ if not hf_token:
86
+ raise ValueError(
87
+ "HF_TOKEN not found! Configure it como Secret em Settings → Variables."
88
+ )
89
 
90
+ # escolha de modelo (pode trocar por outro compatível com Inference API)
91
+ self.client = InferenceClient(
92
+ model="Qwen/Qwen2.5-72B-Instruct",
93
+ token=hf_token,
 
 
94
  )
95
 
96
+ # prompt base com regras de EXACT MATCH
97
+ self.system_instructions = (
98
+ "You are solving GAIA benchmark questions.\n"
99
+ "Rules:\n"
100
+ "- Answer ONLY with the final answer.\n"
101
+ "- No explanations, no reasoning, no extra words.\n"
102
+ "- Do NOT write 'Final answer', 'Answer:', etc.\n"
103
+ "- If the answer is a number, output just the number.\n"
104
+ "- Your output will be compared with EXACT MATCH.\n"
105
  )
106
 
107
  def __call__(self, question: str) -> str:
108
+ print(f"\n=== NEW QUESTION ===\n{question}\n")
109
 
 
110
  prompt = (
111
+ self.system_instructions
112
+ + "\nQuestion:\n"
113
+ + question
114
+ + "\n\nAnswer (remember: ONLY the final answer):"
 
 
 
 
 
 
115
  )
116
 
117
  try:
118
+ # usamos text_generation para evitar problemas com chat_completion
119
+ raw = self.client.text_generation(
120
+ prompt,
121
+ max_new_tokens=64,
122
+ temperature=0.1,
123
+ top_p=0.9,
124
+ stop_sequences=["\n"], # para não vir um parágrafo gigante
125
+ )
126
+ print("RAW MODEL OUTPUT:", repr(raw))
127
  final = clean_answer(raw)
128
+ print("CLEANED ANSWER:", repr(final))
 
129
  return final
130
  except Exception as e:
131
+ print("ERROR calling InferenceClient:", e)
132
  return ""
133
 
134
  # =========================================================