abdulaziz744 commited on
Commit
1063615
·
verified ·
1 Parent(s): 3e93230

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +52 -39
app.py CHANGED
@@ -13,55 +13,68 @@ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
13
  # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
14
  import os
15
  from huggingface_hub import InferenceClient
 
 
 
 
16
 
17
  class BasicAgent:
18
  def __init__(self):
19
- print("BasicAgent initialized with Llama 3.1-8B Instruct.")
20
- # Make sure HF_TOKEN is set in your Space secrets
21
  self.client = InferenceClient(
22
- model="meta-llama/Llama-3.1-8B-Instruct",
23
  token=os.getenv("HF_TOKEN")
24
  )
 
 
 
 
 
 
 
 
 
 
25
 
26
  def __call__(self, question: str) -> str:
27
- print(f"Agent received question (first 50 chars): {question[:50]}...")
 
 
28
 
29
- # GAIA prompt optimized for exact match
30
- prompt = f"""
31
- Answer the question **with only the exact answer**.
32
  Rules:
33
- - Only write the answer.
34
- - No extra words, no explanations.
35
- - No punctuation unless required.
36
- - Do not write 'Final Answer'.
37
-
38
- Question:
39
- {question}
40
-
41
- Answer:
42
- """
43
-
44
- try:
45
- # Use chat-completion API
46
- response = self.client.chat.completions.create(
47
- model="meta-llama/Llama-3.1-8B-Instruct",
48
- messages=[{"role": "user", "content": prompt}],
49
- max_tokens=32, # Level 1 questions are short
50
- temperature=0.0, # deterministic output
51
- )
52
-
53
- # Take first line only and strip extra spaces
54
- answer = response.choices[0].message.content.strip().split("\n")[0].strip()
55
-
56
- # Optional: normalize case/punctuation for GAIA exact match
57
- answer = answer.replace(".", "").replace(",", "").strip()
58
-
59
- except Exception as e:
60
- print(f"Error generating answer: {e}")
61
- answer = "AGENT ERROR"
62
-
63
- print(f"Agent answer: {answer}")
64
- return answer
 
 
65
 
66
 
67
 
 
13
  # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
14
  import os
15
  from huggingface_hub import InferenceClient
16
+ from langchain.tools import Tool
17
+ from langchain_community.utilities import DuckDuckGoSearchRun
18
+ from langchain.prompts import PromptTemplate
19
+ import re
20
 
21
  class BasicAgent:
22
  def __init__(self):
 
 
23
  self.client = InferenceClient(
24
+ model="meta-llama/Meta-Llama-3.1-70B-Instruct",
25
  token=os.getenv("HF_TOKEN")
26
  )
27
+ # Define tools
28
+ self.search = DuckDuckGoSearchRun() # Web search tool
29
+ self.tools = {
30
+ "web_search": Tool(
31
+ name="web_search",
32
+ func=self.search.run,
33
+ description="Search the web for current or historical facts. Input: query string."
34
+ )
35
+ }
36
+ self.tool_names = list(self.tools.keys())
37
 
38
  def __call__(self, question: str) -> str:
39
+ # ReAct prompt: Guides model to think, act, observe
40
+ react_prompt = PromptTemplate.from_template("""
41
+ You are an AI agent solving GAIA questions. Answer exactly, no extra text.
42
 
 
 
 
43
  Rules:
44
+ - Think step-by-step: Thought, Action (if needed), Observation, then repeat until Final Answer.
45
+ - Use tools only if necessary: Available tools: {tool_names}.
46
+ - For Action: Output 'Action: tool_name [input]'.
47
+ - Stop with 'Final Answer: <exact answer>'.
48
+ - Be precise, no punctuation unless needed.
49
+
50
+ Question: {question}
51
+
52
+ Thought:
53
+ """)
54
+ prompt = react_prompt.format(question=question, tool_names=", ".join(self.tool_names))
55
+
56
+ max_steps = 5 # Limit for Level 1
57
+ response = ""
58
+ for _ in range(max_steps):
59
+ # Generate next step
60
+ gen = self.client.text_generation(prompt + response, max_new_tokens=200, temperature=0.1)
61
+ response += gen
62
+
63
+ # Parse Action if present
64
+ action_match = re.search(r"Action: (\w+) \[(.*)\]", response)
65
+ if action_match:
66
+ tool_name, tool_input = action_match.groups()
67
+ if tool_name in self.tools:
68
+ obs = self.tools[tool_name].func(tool_input)
69
+ response += f"\nObservation: {obs[:1000]}...\nThought: " # Truncate long obs
70
+ else:
71
+ response += "\nObservation: Invalid tool.\nThought: "
72
+ # Check for Final Answer
73
+ if "Final Answer:" in response:
74
+ answer = response.split("Final Answer:")[-1].strip().split("\n")[0].strip()
75
+ return answer.replace(".", "").replace(",", "").strip() # Normalize as in your code
76
+
77
+ return "Unable to answer" # Fallback
78
 
79
 
80