abdulaziz744 commited on
Commit
eb094b9
·
verified ·
1 Parent(s): 6bf7b79

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +76 -44
app.py CHANGED
@@ -13,70 +13,102 @@ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
13
  # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
14
  import os
15
  from huggingface_hub import InferenceClient
16
- from langchain_core.tools import Tool
17
- from ddgs import DDGS
18
  from langchain_core.prompts import PromptTemplate
 
19
  import re
20
 
21
  class BasicAgent:
22
  def __init__(self):
 
23
  self.client = InferenceClient(
24
  model="meta-llama/Meta-Llama-3.1-70B-Instruct",
25
  token=os.getenv("HF_TOKEN")
26
  )
27
- # Define tools
28
- self.search = DuckDuckGoSearchRun() # Web search tool
29
- self.tools = {
30
- "web_search": Tool(
31
- name="web_search",
32
- func=self.search.run,
33
- description="Search the web for current or historical facts. Input: query string."
34
- )
35
- }
36
- self.tool_names = list(self.tools.keys())
 
 
 
 
 
 
 
37
 
38
  def __call__(self, question: str) -> str:
39
- # ReAct prompt: Guides model to think, act, observe
40
- react_prompt = PromptTemplate.from_template("""
41
- You are an AI agent solving GAIA questions. Answer exactly, no extra text.
42
 
43
  Rules:
44
- - Think step-by-step: Thought, Action (if needed), Observation, then repeat until Final Answer.
45
- - Use tools only if necessary: Available tools: {tool_names}.
46
- - For Action: Output 'Action: tool_name [input]'.
47
- - Stop with 'Final Answer: <exact answer>'.
48
- - Be precise, no punctuation unless needed.
 
 
 
49
 
50
  Question: {question}
51
 
52
  Thought:
53
  """)
54
- prompt = react_prompt.format(question=question, tool_names=", ".join(self.tool_names))
55
-
56
- max_steps = 5 # Limit for Level 1
57
- response = ""
58
- for _ in range(max_steps):
59
- # Generate next step
60
- gen = self.client.text_generation(prompt + response, max_new_tokens=200, temperature=0.1)
61
- response += gen
62
-
63
- # Parse Action if present
64
- action_match = re.search(r"Action: (\w+) \[(.*)\]", response)
65
- if action_match:
66
- tool_name, tool_input = action_match.groups()
67
- if tool_name in self.tools:
68
- obs = self.tools[tool_name].func(tool_input)
69
- response += f"\nObservation: {obs[:1000]}...\nThought: " # Truncate long obs
70
- else:
71
- response += "\nObservation: Invalid tool.\nThought: "
72
- # Check for Final Answer
73
- if "Final Answer:" in response:
74
- answer = response.split("Final Answer:")[-1].strip().split("\n")[0].strip()
75
- return answer.replace(".", "").replace(",", "").strip() # Normalize as in your code
76
-
77
- return "Unable to answer" # Fallback
78
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
79
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
80
 
81
 
82
  def run_and_submit_all( profile: gr.OAuthProfile | None):
 
13
  # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
14
  import os
15
  from huggingface_hub import InferenceClient
 
 
16
  from langchain_core.prompts import PromptTemplate
17
+ from ddgs import DDGS
18
  import re
19
 
20
  class BasicAgent:
21
  def __init__(self):
22
+ print("BasicAgent initialized with Llama-3.1-70B-Instruct + DDGS search")
23
  self.client = InferenceClient(
24
  model="meta-llama/Meta-Llama-3.1-70B-Instruct",
25
  token=os.getenv("HF_TOKEN")
26
  )
27
+
28
+ def search_web(self, query: str) -> str:
29
+ """Simple DuckDuckGo search helper"""
30
+ try:
31
+ with DDGS() as ddgs:
32
+ results = list(ddgs.text(query, max_results=5))
33
+ if not results:
34
+ return "No results found."
35
+ formatted = []
36
+ for r in results:
37
+ title = r.get("title", "N/A")
38
+ body = r.get("body", "N/A")
39
+ href = r.get("href", "N/A")
40
+ formatted.append(f"{title}\n{body}\n{href}")
41
+ return "\n\n".join(formatted)
42
+ except Exception as e:
43
+ return f"Search error: {str(e)[:400]}"
44
 
45
  def __call__(self, question: str) -> str:
46
+ prompt_template = PromptTemplate.from_template("""
47
+ You are solving a GAIA Level 1 question. Your goal is to give **only** the exact final answer — no extra words.
 
48
 
49
  Rules:
50
+ - Think step by step using format: Thought: ... → Action: web_search[query] Observation: ... Thought: ...
51
+ - Use the tool **only** when you need external information or verification.
52
+ - Tool format must be exactly: Action: web_search[your precise query]
53
+ - When confident → output exactly: Final Answer: <short exact answer>
54
+ - No punctuation at the end unless part of the answer.
55
+ - Keep observations short when reasoning.
56
+
57
+ Available tool: web_search
58
 
59
  Question: {question}
60
 
61
  Thought:
62
  """)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63
 
64
+ full_prompt_start = prompt_template.format(question=question)
65
+
66
+ max_steps = 6
67
+ history = ""
68
+
69
+ for step in range(max_steps):
70
+ current_prompt = full_prompt_start + history
71
+
72
+ try:
73
+ generation = self.client.text_generation(
74
+ current_prompt,
75
+ max_new_tokens=320,
76
+ temperature=0.12,
77
+ stop_sequences=["Final Answer:", "Observation:"]
78
+ )
79
+ except Exception as e:
80
+ print(f"Generation failed: {e}")
81
+ return "Generation error"
82
+
83
+ new_text = generation.strip()
84
+ history += new_text + "\n"
85
+
86
+ # Look for tool call
87
+ action_match = re.search(
88
+ r"Action:\s*web_search\s*\[([^\]]+)\]",
89
+ history,
90
+ re.IGNORECASE | re.DOTALL
91
+ )
92
 
93
+ if action_match:
94
+ query = action_match.group(1).strip()
95
+ print(f"Tool call → {query}")
96
+ observation = self.search_web(query)
97
+ obs_short = observation[:1400] + "..." if len(observation) > 1400 else observation
98
+ history += f"\nObservation: {obs_short}\nThought: "
99
+
100
+ # Look for final answer
101
+ if "Final Answer:" in history:
102
+ parts = history.split("Final Answer:")
103
+ answer_part = parts[-1].strip()
104
+ # Take first clean line, remove trailing punctuation if not needed
105
+ answer = answer_part.split("\n")[0].strip()
106
+ answer = answer.replace(".", "").replace(",", "").strip()
107
+ print(f"→ Final answer: {answer}")
108
+ return answer
109
+
110
+ print("Max steps reached - no final answer")
111
+ return "Unable to answer"
112
 
113
 
114
  def run_and_submit_all( profile: gr.OAuthProfile | None):