Snaseem2026 commited on
Commit
98bd787
Β·
verified Β·
1 Parent(s): a2546f7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -25
app.py CHANGED
@@ -9,7 +9,7 @@ import re
9
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
10
 
11
  # --- Enhanced Agent ---
12
- class EnhancedAgent:
13
  def __init__(self):
14
  print("Initializing Enhanced Agent...")
15
 
@@ -25,7 +25,7 @@ class EnhancedAgent:
25
  from duckduckgo_search import DDGS
26
  self.search = DDGS()
27
  print("βœ… Search ready")
28
- except:
29
  self.search = None
30
  print("⚠️ Search unavailable")
31
 
@@ -34,7 +34,7 @@ class EnhancedAgent:
34
  if not self.search:
35
  return ""
36
 
37
- try:
38
  results = list(self.search.text(query, max_results=max_results))
39
  if not results:
40
  return ""
@@ -58,24 +58,24 @@ class EnhancedAgent:
58
  patterns_to_remove = [
59
  r"^according to.*? [,:]",
60
  r"^based on.*?[,:]",
61
- r"^the answer is: ? ",
62
- r"^answer: ?",
63
- r"^final answer:?",
64
- r"^in summary:?",
65
- r"^therefore:?",
66
- r"^thus:?",
67
- r"^so:?",
68
  ]
69
 
70
  cleaned = text. strip()
71
- for pattern in patterns_to_remove:
72
  cleaned = re.sub(pattern, "", cleaned, flags=re.IGNORECASE).strip()
73
 
74
  # If answer has multiple sentences, often the last one is the direct answer
75
  sentences = cleaned.split('.')
76
  if len(sentences) > 2:
77
  # Check if last sentence looks like a direct answer
78
- last = sentences[-1]. strip()
79
  if last and len(last) < 100:
80
  return last
81
 
@@ -86,7 +86,7 @@ class EnhancedAgent:
86
  print(f"Q: {question[: 150]}")
87
 
88
  # Determine if we need search
89
- needs_search = any(keyword in question.lower() for keyword in [
90
  'current', 'latest', 'recent', 'today', 'now', '2024', '2025', '2026',
91
  'who is', 'what is', 'where is', 'when did', 'how many'
92
  ])
@@ -99,7 +99,7 @@ class EnhancedAgent:
99
  print(f"βœ… Search: {len(search_context)} chars")
100
 
101
  # Enhanced system prompt with better instructions
102
- system_prompt = """You are an expert AI that provides accurate, direct answers.
103
 
104
  CRITICAL RULES:
105
  1. Give ONLY the final answer - no explanations unless asked
@@ -119,7 +119,7 @@ Q: "Who is the CEO of Tesla?" β†’ A: "Elon Musk"
119
  Q: "What is 15 + 27?" β†’ A: "42"
120
  Q: "How many planets in the solar system?" β†’ A: "8"
121
 
122
- DO NOT start with "The answer is" or "According to" - just give the answer directly! """
123
 
124
  # Build prompt
125
  messages = [{"role": "system", "content": system_prompt}]
@@ -146,7 +146,7 @@ DO NOT start with "The answer is" or "According to" - just give the answer direc
146
  temperature=0.05 # Very low for maximum accuracy
147
  )
148
 
149
- raw_answer = response.choices[0].message.content.strip()
150
 
151
  # Clean up the answer
152
  answer = self.extract_answer(raw_answer)
@@ -160,7 +160,7 @@ DO NOT start with "The answer is" or "According to" - just give the answer direc
160
  answer = line
161
  break
162
 
163
- print(f"βœ… A: {answer[: 200]}")
164
  return answer
165
 
166
  except Exception as e:
@@ -169,14 +169,14 @@ DO NOT start with "The answer is" or "According to" - just give the answer direc
169
  # Last resort: try a simpler call
170
  try:
171
  simple_prompt = f"Answer this question concisely:\n\n{question}\n\nAnswer:"
172
- response = self. client.text_generation(
173
  simple_prompt,
174
  model="mistralai/Mixtral-8x7B-Instruct-v0.1",
175
  max_new_tokens=200,
176
  temperature=0.1
177
  )
178
  return response.strip()
179
- except:
180
  return "Unable to generate answer."
181
 
182
  return "Unable to generate answer."
@@ -229,7 +229,7 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
229
  task_id = item.get("task_id")
230
  question_text = item. get("question")
231
 
232
- if not task_id or not question_text:
233
  continue
234
 
235
  print(f"[{idx}/{total}] {task_id[: 8]}...")
@@ -247,7 +247,7 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
247
  "task_id": task_id,
248
  "submitted_answer": "Error processing question"
249
  })
250
- results_log.append((idx, question_text[: 60], f"Error: {str(e)[: 50]}"))
251
 
252
  print(f"\n{'='*70}")
253
  print(f"βœ… Processed all {len(answers_payload)} questions")
@@ -284,7 +284,7 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
284
 
285
  ### πŸ† Score: **{score}%**
286
 
287
- {'### πŸŽ“ Amazing! You completed Unit 4 of the Hugging Face Agents Course!' if passed else f'### πŸ“ˆ You got {score}% - need 30% to pass. The agent is now much better, try again! '}
288
 
289
  **Details:**
290
  - πŸ‘€ User: `{username}`
@@ -330,12 +330,11 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
330
  submit_button = gr.Button(
331
  "πŸš€ Run Evaluation & Submit",
332
  variant="primary",
333
- size="lg",
334
- scale=2
335
  )
336
 
337
  output_text = gr.Markdown()
338
- output_table = gr.Dataframe(label="πŸ“ Results Preview", wrap=True, height=400)
339
 
340
  submit_button.click(
341
  run_and_submit_all,
 
9
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
10
 
11
  # --- Enhanced Agent ---
12
+ class EnhancedAgent:
13
  def __init__(self):
14
  print("Initializing Enhanced Agent...")
15
 
 
25
  from duckduckgo_search import DDGS
26
  self.search = DDGS()
27
  print("βœ… Search ready")
28
+ except:
29
  self.search = None
30
  print("⚠️ Search unavailable")
31
 
 
34
  if not self.search:
35
  return ""
36
 
37
+ try:
38
  results = list(self.search.text(query, max_results=max_results))
39
  if not results:
40
  return ""
 
58
  patterns_to_remove = [
59
  r"^according to.*? [,:]",
60
  r"^based on.*?[,:]",
61
+ r"^the answer is: ? ? ",
62
+ r"^answer: ? ?",
63
+ r"^final answer:? ?",
64
+ r"^in summary:? ?",
65
+ r"^therefore:? ?",
66
+ r"^thus:? ?",
67
+ r"^so:? ?",
68
  ]
69
 
70
  cleaned = text. strip()
71
+ for pattern in patterns_to_remove:
72
  cleaned = re.sub(pattern, "", cleaned, flags=re.IGNORECASE).strip()
73
 
74
  # If answer has multiple sentences, often the last one is the direct answer
75
  sentences = cleaned.split('.')
76
  if len(sentences) > 2:
77
  # Check if last sentence looks like a direct answer
78
+ last = sentences[-1].strip()
79
  if last and len(last) < 100:
80
  return last
81
 
 
86
  print(f"Q: {question[: 150]}")
87
 
88
  # Determine if we need search
89
+ needs_search = any(keyword in question. lower() for keyword in [
90
  'current', 'latest', 'recent', 'today', 'now', '2024', '2025', '2026',
91
  'who is', 'what is', 'where is', 'when did', 'how many'
92
  ])
 
99
  print(f"βœ… Search: {len(search_context)} chars")
100
 
101
  # Enhanced system prompt with better instructions
102
+ system_prompt = """You are an expert AI that provides accurate, direct answers.
103
 
104
  CRITICAL RULES:
105
  1. Give ONLY the final answer - no explanations unless asked
 
119
  Q: "What is 15 + 27?" β†’ A: "42"
120
  Q: "How many planets in the solar system?" β†’ A: "8"
121
 
122
+ DO NOT start with "The answer is" or "According to" - just give the answer directly!"""
123
 
124
  # Build prompt
125
  messages = [{"role": "system", "content": system_prompt}]
 
146
  temperature=0.05 # Very low for maximum accuracy
147
  )
148
 
149
+ raw_answer = response.choices[0].message.content. strip()
150
 
151
  # Clean up the answer
152
  answer = self.extract_answer(raw_answer)
 
160
  answer = line
161
  break
162
 
163
+ print(f"βœ… A: {answer[:200]}")
164
  return answer
165
 
166
  except Exception as e:
 
169
  # Last resort: try a simpler call
170
  try:
171
  simple_prompt = f"Answer this question concisely:\n\n{question}\n\nAnswer:"
172
+ response = self.client.text_generation(
173
  simple_prompt,
174
  model="mistralai/Mixtral-8x7B-Instruct-v0.1",
175
  max_new_tokens=200,
176
  temperature=0.1
177
  )
178
  return response.strip()
179
+ except:
180
  return "Unable to generate answer."
181
 
182
  return "Unable to generate answer."
 
229
  task_id = item.get("task_id")
230
  question_text = item. get("question")
231
 
232
+ if not task_id or not question_text:
233
  continue
234
 
235
  print(f"[{idx}/{total}] {task_id[: 8]}...")
 
247
  "task_id": task_id,
248
  "submitted_answer": "Error processing question"
249
  })
250
+ results_log.append((idx, question_text[: 60], f"Error: {str(e)[:50]}"))
251
 
252
  print(f"\n{'='*70}")
253
  print(f"βœ… Processed all {len(answers_payload)} questions")
 
284
 
285
  ### πŸ† Score: **{score}%**
286
 
287
+ {'### πŸŽ“ Amazing! You completed Unit 4 of the Hugging Face Agents Course!' if passed else f'### πŸ“ˆ You got {score}% - need 30% to pass. The agent is now much better, try again!'}
288
 
289
  **Details:**
290
  - πŸ‘€ User: `{username}`
 
330
  submit_button = gr.Button(
331
  "πŸš€ Run Evaluation & Submit",
332
  variant="primary",
333
+ size="lg"
 
334
  )
335
 
336
  output_text = gr.Markdown()
337
+ output_table = gr.Dataframe(label="πŸ“ Results Preview", wrap=True)
338
 
339
  submit_button.click(
340
  run_and_submit_all,