Ghisalbertifederico commited on
Commit
e193ac9
·
verified ·
1 Parent(s): 2ca3461

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -19
app.py CHANGED
@@ -60,12 +60,8 @@ def _download_task_file(task_id: str, api_url: str = DEFAULT_API_URL) -> tuple[b
60
  # {},
61
  # ]:
62
  try:
63
- resp = requests.get(url, timeout=30)
64
- # resp = requests.get(url, headers=headers, timeout=30)
65
- # resp.raise_for_status()
66
- # with open(local_path, "wb") as fh:
67
- # fh.write(resp.content)
68
- # print(f"Downloaded task file: {local_path} ({len(resp.content)} bytes)")
69
  except requests.exceptions.HTTPError as e:
70
  status = e.response.status_code if e.response is not None else "?"
71
  print(f"Download attempt for {task_id} returned {status}")
@@ -227,8 +223,12 @@ def call_tools(state: AgentState) -> AgentState:
227
  state["context"] = web_search.invoke({"query": question})
228
  elif label == "research":
229
  print("[TOOL] web search")
230
- search_json = web_search.invoke({"query": question})
231
- wiki_text = wikipedia_search.invoke({"query": question})
 
 
 
 
232
  state["context"] = f"{search_json}\n\n{wiki_text}"
233
  else:
234
  print("[TOOL] reasoning only (no search)")
@@ -255,17 +255,24 @@ def synthesize_response(state: AgentState) -> AgentState:
255
  reasoning = _llm_answer.invoke(reasoning_prompt).content.strip()
256
  print(f"\n[REASONING]\n{reasoning}\n")
257
 
258
- # Pass 2: extract the final answer cleanly
259
- extract_prompt = [
260
- SystemMessage(content=get_prompt("extract_system")),
261
- HumanMessage(
262
- content=get_prompt(
263
- prompt_key="extract_user",
264
- reasoning=reasoning,
265
- )
266
- ),
267
- ]
268
- answer = _llm_answer.invoke(extract_prompt).content.strip()
 
 
 
 
 
 
 
269
  state["answer"] = answer
270
  return state
271
 
 
60
  # {},
61
  # ]:
62
  try:
63
+ headers = {"Authorization": f"Bearer {HF_TOKEN}"}
64
+ resp = requests.get(url, headers=headers, timeout=30)
 
 
 
 
65
  except requests.exceptions.HTTPError as e:
66
  status = e.response.status_code if e.response is not None else "?"
67
  print(f"Download attempt for {task_id} returned {status}")
 
223
  state["context"] = web_search.invoke({"query": question})
224
  elif label == "research":
225
  print("[TOOL] web search")
226
+ # Generate a focused search query (not the full question text)
227
+ search_query_prompt = f"Write a short Google search query (max 10 words) to answer this question. Output ONLY the query, nothing else.\n\nQuestion: {question}"
228
+ focused_query = _llm_router.invoke(search_query_prompt).content.strip().strip('"')
229
+ print(f"[TOOL] search query: {focused_query}")
230
+ search_json = web_search.invoke({"query": focused_query})
231
+ wiki_text = wikipedia_search.invoke({"query": focused_query})
232
  state["context"] = f"{search_json}\n\n{wiki_text}"
233
  else:
234
  print("[TOOL] reasoning only (no search)")
 
255
  reasoning = _llm_answer.invoke(reasoning_prompt).content.strip()
256
  print(f"\n[REASONING]\n{reasoning}\n")
257
 
258
+ # Try to extract FINAL ANSWER directly from reasoning text (avoids second LLM call hallucinating)
259
+ fa_match = re.search(r"FINAL ANSWER:\s*(.+)", reasoning, re.IGNORECASE)
260
+ if fa_match:
261
+ answer = fa_match.group(1).strip().split('\n')[0].strip()
262
+ elif reasoning.strip():
263
+ # Pass 2: ask LLM to extract only if no FINAL ANSWER marker found
264
+ extract_prompt = [
265
+ SystemMessage(content=get_prompt("extract_system")),
266
+ HumanMessage(
267
+ content=get_prompt(
268
+ prompt_key="extract_user",
269
+ reasoning=reasoning,
270
+ )
271
+ ),
272
+ ]
273
+ answer = _llm_answer.invoke(extract_prompt).content.strip()
274
+ else:
275
+ answer = "ERROR: no reasoning produced"
276
  state["answer"] = answer
277
  return state
278