abdulaziz744 commited on
Commit
6079cbe
·
verified ·
1 Parent(s): df3260b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -13
app.py CHANGED
@@ -11,21 +11,27 @@ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
11
 
12
  # --- Basic Agent Definition ---
13
  # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
 
 
 
14
  class BasicAgent:
15
  def __init__(self):
16
- print("BasicAgent initialized.")
 
17
  self.client = InferenceClient(
18
- model="mistralai/Mistral-7B-Instruct-v0.2",
19
  token=os.getenv("HF_TOKEN")
20
  )
21
 
22
  def __call__(self, question: str) -> str:
23
  print(f"Agent received question (first 50 chars): {question[:50]}...")
24
 
 
25
  prompt = f"""
26
- Answer the question EXACTLY.
27
 
28
  Rules:
 
29
  - No explanations
30
  - No extra words
31
  - No punctuation unless required
@@ -35,21 +41,25 @@ Question:
35
  {question}
36
  """
37
 
38
- response = self.client.chat.completions.create(
39
- messages=[
40
- {"role": "system", "content": "You return only the final answer."},
41
- {"role": "user", "content": prompt}
42
- ],
43
- max_tokens=64,
44
- temperature=0.0,
45
- )
 
 
 
 
 
46
 
47
- answer = response.choices[0].message.content
48
- answer = answer.strip().replace("\n", "")
49
  print(f"Agent answer: {answer}")
50
  return answer
51
 
52
 
 
53
  def run_and_submit_all( profile: gr.OAuthProfile | None):
54
  """
55
  Fetches all questions, runs the BasicAgent on them, submits all answers,
 
11
 
12
  # --- Basic Agent Definition ---
13
  # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
14
+ import os
15
+ from huggingface_hub import InferenceClient
16
+
17
  class BasicAgent:
18
  def __init__(self):
19
+ print("BasicAgent initialized with Llama 3.1-8B Instruct.")
20
+ # Make sure you set HF_TOKEN in your Space secrets
21
  self.client = InferenceClient(
22
+ model="meta-llama/Llama-3.1-8B-Instruct",
23
  token=os.getenv("HF_TOKEN")
24
  )
25
 
26
  def __call__(self, question: str) -> str:
27
  print(f"Agent received question (first 50 chars): {question[:50]}...")
28
 
29
+ # GAIA exact match prompt rules
30
  prompt = f"""
31
+ You are answering a benchmark question.
32
 
33
  Rules:
34
+ - Answer EXACTLY
35
  - No explanations
36
  - No extra words
37
  - No punctuation unless required
 
41
  {question}
42
  """
43
 
44
+ # Use chat-completion for instruction-tuned Llama 3.1
45
+ try:
46
+ response = self.client.chat.completions.create(
47
+ model="meta-llama/Llama-3.1-8B-Instruct",
48
+ messages=[{"role": "user", "content": prompt}],
49
+ max_tokens=128,
50
+ temperature=0.0,
51
+ )
52
+ # Extract the assistant's message text
53
+ answer = response.choices[0].message.content.strip().replace("\n", " ")
54
+ except Exception as e:
55
+ print(f"Error generating answer: {e}")
56
+ answer = "AGENT ERROR"
57
 
 
 
58
  print(f"Agent answer: {answer}")
59
  return answer
60
 
61
 
62
+
63
  def run_and_submit_all( profile: gr.OAuthProfile | None):
64
  """
65
  Fetches all questions, runs the BasicAgent on them, submits all answers,