Shivangsinha commited on
Commit
fb7db6d
·
verified ·
1 Parent(s): 7d1c446

update the final code to fix the too many request error

Browse files
Files changed (1) hide show
  1. app.py +27 -18
app.py CHANGED
@@ -6,20 +6,17 @@ import inspect
6
  import pandas as pd
7
  from smolagents import (
8
  CodeAgent,
9
- LiteLLMModel,
10
  DuckDuckGoSearchTool,
11
  WikipediaSearchTool,
12
  PythonInterpreterTool,
13
  tool,
14
  )
15
 
16
- # (Keep Constants as is)
17
  # --- Constants ---
18
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
19
 
20
  # --- Basic Agent Definition ---
21
- # --- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
22
-
23
  @tool
24
  def get_current_date_time() -> str:
25
  """Returns the current date and time in ISO format."""
@@ -29,26 +26,27 @@ def get_current_date_time() -> str:
29
  class BasicAgent:
30
  def __init__(self):
31
  print("BasicAgent initialized.")
32
- gemini_api_key = os.getenv("GEMINI_API_KEY")
33
- if not gemini_api_key:
34
- raise ValueError("GEMINI_API_KEY environment variable not set")
35
- self.model = LiteLLMModel(
36
- model_id="gemini/gemini-2.0-flash-lite",
37
- api_key=gemini_api_key,
38
  )
 
39
  self.tools = [
40
  DuckDuckGoSearchTool(),
41
  WikipediaSearchTool(),
42
  PythonInterpreterTool(),
43
  get_current_date_time,
44
  ]
 
45
  self.agent = CodeAgent(
46
  tools=self.tools,
47
  model=self.model,
48
  max_steps=8,
49
  additional_authorized_imports=["datetime", "re", "json", "math", "collections"],
50
  )
51
- print("BasicAgent ready with Gemini 2.0 Flash-Lite (CodeAgent).")
52
 
53
  def __call__(self, question: str) -> str:
54
  print(f"Agent received question: {question[:80]}...")
@@ -69,8 +67,7 @@ class BasicAgent:
69
  return f"Error: {err}"
70
  return "Error: Rate limit exceeded after retries"
71
 
72
-
73
- # --- The rest of the code (keep as-is) ---
74
  def run_and_submit_all(profile: gr.OAuthProfile | None):
75
  space_id = os.getenv("SPACE_ID")
76
  if profile:
@@ -79,17 +76,21 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
79
  else:
80
  print("User not logged in.")
81
  return "Please Login to Hugging Face with the button.", None
 
82
  api_url = DEFAULT_API_URL
83
  questions_url = f"{api_url}/questions"
84
  submit_url = f"{api_url}/submit"
 
85
  try:
86
  agent = BasicAgent()
87
  except Exception as e:
88
  print(f"Error instantiating agent: {e}")
89
  return f"Error initializing agent: {e}", None
 
90
  agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
91
  print(f"Agent code: {agent_code}")
92
  print(f"Fetching questions from: {questions_url}")
 
93
  try:
94
  response = requests.get(questions_url, timeout=15)
95
  response.raise_for_status()
@@ -100,14 +101,17 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
100
  except Exception as e:
101
  print(f"Error fetching questions: {e}")
102
  return f"Error fetching questions: {e}", None
 
103
  results_log = []
104
  answers_payload = []
105
  print(f"Running agent on {len(questions_data)} questions...")
 
106
  for i, item in enumerate(questions_data):
107
  task_id = item.get("task_id")
108
  question_text = item.get("question")
109
  if not task_id or not question_text:
110
  continue
 
111
  try:
112
  submitted_answer = agent(question_text)
113
  answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
@@ -115,11 +119,16 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
115
  except Exception as e:
116
  print(f"Error on task {task_id}: {e}")
117
  results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"ERROR: {e}"})
118
- time.sleep(3)
 
 
 
119
  if not answers_payload:
120
  return "No answers.", pd.DataFrame(results_log)
 
121
  submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
122
  print(f"Submitting {len(answers_payload)} answers...")
 
123
  try:
124
  response = requests.post(submit_url, json=submission_data, timeout=60)
125
  response.raise_for_status()
@@ -137,22 +146,22 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
137
  print(f"Submission error: {e}")
138
  return f"Submission failed: {e}", pd.DataFrame(results_log)
139
 
140
-
141
  # --- Build Gradio UI ---
142
  with gr.Blocks() as demo:
143
  gr.Markdown("# Basic Agent Evaluation Runner")
144
  gr.Markdown(
145
  """
146
  **Instructions:**
147
- 1. Clone this space and set `GEMINI_API_KEY` in your Space secrets.
148
  2. Log in with your Hugging Face account below.
149
- 3. Click 'Run Evaluation & Submit' to start.
150
  """
151
  )
152
  gr.LoginButton()
153
  run_button = gr.Button("Run Evaluation & Submit All Answers")
154
  status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
155
  results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
 
156
  run_button.click(
157
  fn=run_and_submit_all,
158
  outputs=[status_output, results_table]
@@ -160,4 +169,4 @@ with gr.Blocks() as demo:
160
 
161
  if __name__ == "__main__":
162
  print("Starting Gradio app...")
163
- demo.launch(debug=True, share=False)
 
6
  import pandas as pd
7
  from smolagents import (
8
  CodeAgent,
9
+ HfApiModel,
10
  DuckDuckGoSearchTool,
11
  WikipediaSearchTool,
12
  PythonInterpreterTool,
13
  tool,
14
  )
15
 
 
16
  # --- Constants ---
17
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
18
 
19
  # --- Basic Agent Definition ---
 
 
20
  @tool
21
  def get_current_date_time() -> str:
22
  """Returns the current date and time in ISO format."""
 
26
  class BasicAgent:
27
  def __init__(self):
28
  print("BasicAgent initialized.")
29
+
30
+ # Using Hugging Face's free Serverless Inference API
31
+ # Qwen2.5-Coder-32B-Instruct is the default and highly recommended for this course
32
+ self.model = HfApiModel(
33
+ model_id="Qwen/Qwen2.5-Coder-32B-Instruct",
 
34
  )
35
+
36
  self.tools = [
37
  DuckDuckGoSearchTool(),
38
  WikipediaSearchTool(),
39
  PythonInterpreterTool(),
40
  get_current_date_time,
41
  ]
42
+
43
  self.agent = CodeAgent(
44
  tools=self.tools,
45
  model=self.model,
46
  max_steps=8,
47
  additional_authorized_imports=["datetime", "re", "json", "math", "collections"],
48
  )
49
+ print("BasicAgent ready with Qwen2.5-Coder-32B-Instruct (CodeAgent).")
50
 
51
  def __call__(self, question: str) -> str:
52
  print(f"Agent received question: {question[:80]}...")
 
67
  return f"Error: {err}"
68
  return "Error: Rate limit exceeded after retries"
69
 
70
+ # --- The rest of the code ---
 
71
  def run_and_submit_all(profile: gr.OAuthProfile | None):
72
  space_id = os.getenv("SPACE_ID")
73
  if profile:
 
76
  else:
77
  print("User not logged in.")
78
  return "Please Login to Hugging Face with the button.", None
79
+
80
  api_url = DEFAULT_API_URL
81
  questions_url = f"{api_url}/questions"
82
  submit_url = f"{api_url}/submit"
83
+
84
  try:
85
  agent = BasicAgent()
86
  except Exception as e:
87
  print(f"Error instantiating agent: {e}")
88
  return f"Error initializing agent: {e}", None
89
+
90
  agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
91
  print(f"Agent code: {agent_code}")
92
  print(f"Fetching questions from: {questions_url}")
93
+
94
  try:
95
  response = requests.get(questions_url, timeout=15)
96
  response.raise_for_status()
 
101
  except Exception as e:
102
  print(f"Error fetching questions: {e}")
103
  return f"Error fetching questions: {e}", None
104
+
105
  results_log = []
106
  answers_payload = []
107
  print(f"Running agent on {len(questions_data)} questions...")
108
+
109
  for i, item in enumerate(questions_data):
110
  task_id = item.get("task_id")
111
  question_text = item.get("question")
112
  if not task_id or not question_text:
113
  continue
114
+
115
  try:
116
  submitted_answer = agent(question_text)
117
  answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
 
119
  except Exception as e:
120
  print(f"Error on task {task_id}: {e}")
121
  results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"ERROR: {e}"})
122
+
123
+ # Wait 10 seconds between questions to play nicely with HF inference servers
124
+ time.sleep(10)
125
+
126
  if not answers_payload:
127
  return "No answers.", pd.DataFrame(results_log)
128
+
129
  submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
130
  print(f"Submitting {len(answers_payload)} answers...")
131
+
132
  try:
133
  response = requests.post(submit_url, json=submission_data, timeout=60)
134
  response.raise_for_status()
 
146
  print(f"Submission error: {e}")
147
  return f"Submission failed: {e}", pd.DataFrame(results_log)
148
 
 
149
  # --- Build Gradio UI ---
150
  with gr.Blocks() as demo:
151
  gr.Markdown("# Basic Agent Evaluation Runner")
152
  gr.Markdown(
153
  """
154
  **Instructions:**
155
+ 1. Ensure you have your `HF_TOKEN` in your Space secrets (Settings -> Secrets).
156
  2. Log in with your Hugging Face account below.
157
+ 3. Click 'Run Evaluation & Submit' to start. Please be patient, as inference will take a few minutes to process 20 questions securely.
158
  """
159
  )
160
  gr.LoginButton()
161
  run_button = gr.Button("Run Evaluation & Submit All Answers")
162
  status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
163
  results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
164
+
165
  run_button.click(
166
  fn=run_and_submit_all,
167
  outputs=[status_output, results_table]
 
169
 
170
  if __name__ == "__main__":
171
  print("Starting Gradio app...")
172
+ demo.launch(debug=True, share=False)