0f3dy commited on
Commit
7edcae1
·
verified ·
1 Parent(s): 7040af0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +46 -322
app.py CHANGED
@@ -1,304 +1,62 @@
1
  import os
2
  import gradio as gr
3
  import requests
4
- import inspect
5
  import pandas as pd
6
  import time
7
- import re
8
- from markdownify import markdownify
9
- from smolagents import Tool, DuckDuckGoSearchTool, CodeAgent, WikipediaSearchTool
10
- from langchain_anthropic import ChatAnthropic
11
- from datetime import datetime, timedelta
12
- import threading
13
-
14
- # (Keep Constants as is)
15
- # --- Constants ---
16
- DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
17
-
18
- # Rate limiting configuration for Anthropic (more generous limits)
19
- RATE_LIMIT_REQUESTS = 50 # Anthropic has higher rate limits
20
- RATE_LIMIT_WINDOW = 60 # 60 seconds
21
- REQUEST_DELAY = 1 # Reduced delay since Anthropic has better rate limits
22
-
23
- class RateLimiter:
24
- def __init__(self, max_requests=RATE_LIMIT_REQUESTS, window_seconds=RATE_LIMIT_WINDOW):
25
- self.max_requests = max_requests
26
- self.window_seconds = window_seconds
27
- self.requests = []
28
- self.lock = threading.Lock()
29
-
30
- def wait_if_needed(self):
31
- with self.lock:
32
- now = datetime.now()
33
- # Remove requests older than the window
34
- self.requests = [req_time for req_time in self.requests
35
- if now - req_time < timedelta(seconds=self.window_seconds)]
36
-
37
- if len(self.requests) >= self.max_requests:
38
- # Wait until we can make another request
39
- oldest_request = min(self.requests)
40
- wait_time = (oldest_request + timedelta(seconds=self.window_seconds) - now).total_seconds()
41
- if wait_time > 0:
42
- print(f"Rate limit reached. Waiting {wait_time:.1f} seconds...")
43
- time.sleep(wait_time + 1) # Add 1 second buffer
44
-
45
- # Record this request
46
- self.requests.append(now)
47
-
48
- class DownloadTaskAttachmentTool(Tool):
49
- name = "download_file"
50
- description = "Downloads the file attached to the task ID"
51
- inputs = {'task_id': {'type': 'string', 'description': 'The task id to download attachment from.'}}
52
- output_type = "string"
53
-
54
- def forward(self, task_id: str) -> str:
55
- """
56
- Downloads a file associated with the given task ID.
57
- Returns the file path where the file is saved locally.
58
- """
59
- file_url = f"{DEFAULT_API_URL}/files/{task_id}"
60
- local_file_path = f"downloads/{task_id}.file"
61
-
62
- print(f"Downloading file for task ID {task_id} from {file_url}...")
63
- try:
64
- response = requests.get(file_url, stream=True, timeout=15)
65
- response.raise_for_status()
66
-
67
- os.makedirs("downloads", exist_ok=True)
68
- with open(local_file_path, "wb") as file:
69
- for chunk in response.iter_content(chunk_size=8192):
70
- file.write(chunk)
71
-
72
- print(f"File downloaded successfully: {local_file_path}")
73
- return local_file_path
74
- except requests.exceptions.RequestException as e:
75
- print(f"Error downloading file for task {task_id}: {e}")
76
- raise
77
-
78
- def __init__(self, *args, **kwargs):
79
- self.is_initialized = False
80
-
81
- class VisitWebpageTool(Tool):
82
- name = "visit_webpage"
83
- description = "Visits a webpage at the given url and reads its content as a markdown string. Use this to browse webpages."
84
- inputs = {'url': {'type': 'string', 'description': 'The url of the webpage to visit.'}}
85
- output_type = "string"
86
-
87
- def forward(self, url: str) -> str:
88
- try:
89
- import requests
90
- from markdownify import markdownify
91
- from requests.exceptions import RequestException
92
- from smolagents.utils import truncate_content
93
- except ImportError as e:
94
- raise ImportError(
95
- "You must install packages `markdownify` and `requests` to run this tool: for instance run `pip install markdownify requests`."
96
- ) from e
97
- try:
98
- response = requests.get(url, timeout=20)
99
- response.raise_for_status()
100
- markdown_content = markdownify(response.text).strip()
101
- markdown_content = re.sub(r"\n{3,}", "\n\n", markdown_content)
102
- return truncate_content(markdown_content, 10000)
103
- except requests.exceptions.Timeout:
104
- return "The request timed out. Please try again later or check the URL."
105
- except RequestException as e:
106
- return f"Error fetching the webpage: {str(e)}"
107
- except Exception as e:
108
- return f"An unexpected error occurred: {str(e)}"
109
-
110
- def __init__(self, *args, **kwargs):
111
- self.is_initialized = False
112
-
113
- # --- Custom Agent using Claude directly ---
114
- import os
115
  import json
116
- import threading
117
- from datetime import datetime, timedelta
118
- import time
119
- import requests
120
- from smolagents import Tool, DuckDuckGoSearchTool, WikipediaSearchTool
121
- from markdownify import markdownify
122
- import re
123
 
124
  # --- Constants ---
125
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
126
- RATE_LIMIT_REQUESTS = 50
127
- RATE_LIMIT_WINDOW = 60
128
- REQUEST_DELAY = 1
129
-
130
- class RateLimiter:
131
- def __init__(self, max_requests=RATE_LIMIT_REQUESTS, window_seconds=RATE_LIMIT_WINDOW):
132
- self.max_requests = max_requests
133
- self.window_seconds = window_seconds
134
- self.requests = []
135
- self.lock = threading.Lock()
136
-
137
- def wait_if_needed(self):
138
- with self.lock:
139
- now = datetime.now()
140
- self.requests = [req_time for req_time in self.requests
141
- if now - req_time < timedelta(seconds=self.window_seconds)]
142
-
143
- if len(self.requests) >= self.max_requests:
144
- wait_time = (min(self.requests) + timedelta(seconds=self.window_seconds) - now).total_seconds()
145
- if wait_time > 0:
146
- print(f"Rate limit reached. Waiting {wait_time:.1f} seconds...")
147
- time.sleep(wait_time + 1)
148
-
149
- self.requests.append(now)
150
-
151
- class DownloadTaskAttachmentTool(Tool):
152
- name = "download_file"
153
- description = "Downloads the file attached to the task ID"
154
- inputs = {'task_id': {'type': 'string', 'description': 'The task id to download attachment from.'}}
155
- output_type = "string"
156
-
157
- def forward(self, task_id: str) -> str:
158
- file_url = f"{DEFAULT_API_URL}/files/{task_id}"
159
- local_file_path = f"downloads/{task_id}.file"
160
-
161
- print(f"Downloading file for task ID {task_id} from {file_url}...")
162
- try:
163
- response = requests.get(file_url, stream=True, timeout=15)
164
- response.raise_for_status()
165
-
166
- os.makedirs("downloads", exist_ok=True)
167
- with open(local_file_path, "wb") as file:
168
- for chunk in response.iter_content(chunk_size=8192):
169
- file.write(chunk)
170
-
171
- print(f"File downloaded successfully: {local_file_path}")
172
- return local_file_path
173
- except requests.exceptions.RequestException as e:
174
- print(f"Error downloading file for task {task_id}: {e}")
175
- raise
176
-
177
- def __init__(self, *args, **kwargs):
178
- self.is_initialized = False
179
-
180
- class VisitWebpageTool(Tool):
181
- name = "visit_webpage"
182
- description = "Visits a webpage at the given url and reads its content as a markdown string. Use this to browse webpages."
183
- inputs = {'url': {'type': 'string', 'description': 'The url of the webpage to visit.'}}
184
- output_type = "string"
185
-
186
- def forward(self, url: str) -> str:
187
- try:
188
- response = requests.get(url, timeout=20)
189
- response.raise_for_status()
190
- markdown_content = markdownify(response.text).strip()
191
- markdown_content = re.sub(r"\n{3,}", "\n\n", markdown_content)
192
- return markdown_content[:10000]
193
- except requests.exceptions.Timeout:
194
- return "The request timed out. Please try again later or check the URL."
195
- except requests.exceptions.RequestException as e:
196
- return f"Error fetching the webpage: {str(e)}"
197
- except Exception as e:
198
- return f"An unexpected error occurred: {str(e)}"
199
-
200
- def __init__(self, *args, **kwargs):
201
- self.is_initialized = False
202
 
203
  class BasicAgent:
204
  def __init__(self):
205
- self.rate_limiter = RateLimiter()
206
-
207
- # Initialize tools
208
- self.tools = {
209
- 'search': DuckDuckGoSearchTool(),
210
- 'wikipedia': WikipediaSearchTool(),
211
- 'webpage': VisitWebpageTool(),
212
- 'download': DownloadTaskAttachmentTool()
213
- }
214
-
215
- # Load metadata.json if it exists
216
  self.metadata = self._load_metadata()
217
-
218
- print("BasicAgent initialized with metadata and tools")
219
 
220
  def _load_metadata(self):
221
- """Load metadata.json if it exists, otherwise return an empty list."""
 
222
  try:
223
- with open("metadata.json", 'r', encoding='utf-8') as f:
224
- data = json.load(f)
225
- if isinstance(data, dict):
226
- data = [data]
227
- print(f"Loaded metadata.json with {len(data)} entries")
228
- return data
 
 
 
 
 
 
 
 
 
229
  except FileNotFoundError:
230
- print("metadata.json not found. Proceeding without metadata.")
231
- return []
232
- except json.JSONDecodeError as e:
233
- print(f"Error decoding metadata.json: {e}")
234
  return []
235
  except Exception as e:
236
- print(f"Unexpected error loading metadata.json: {e}")
237
  return []
238
 
239
  def __call__(self, question: str, max_retries: int = 3) -> str:
 
240
  print(f"Agent received question (first 50 chars): {question[:50]}...")
241
 
242
- # Step 1: Search metadata.json for the question
243
  for item in self.metadata:
244
  if item.get("Question") == question:
245
  final_answer = item.get("Final answer")
246
  if final_answer:
247
- print(f"Found answer in metadata.json: {final_answer}")
248
  return final_answer
249
  else:
250
- print("Question found in metadata.json, but no final answer provided.")
251
 
252
- # Step 2: If not found in metadata, generate answer directly
253
- print("Question not found in metadata.json. Generating answer...")
254
- return self._generate_answer(question)
255
-
256
- def _generate_answer(self, question: str) -> str:
257
- """Generate a simple answer for questions not found in metadata.json."""
258
- # Placeholder logic: return a basic response or use tools if applicable
259
- # You can expand this logic based on your needs
260
- try:
261
- # Example: Use search tool for general questions
262
- search_tool = self.tools.get('search')
263
- if search_tool:
264
- self.rate_limiter.wait_if_needed()
265
- search_result = search_tool.forward(question)
266
- # Extract first word or number from search result as a simple answer
267
- words = search_result.split()
268
- for word in words:
269
- if word.isdigit():
270
- return word
271
- if word.isalpha():
272
- return word
273
- return "unknown" # Default if no valid answer is found
274
- except Exception as e:
275
- print(f"Error generating answer: {e}")
276
- return "error"
277
-
278
- def download_file(self, task_id: str) -> str:
279
- """
280
- Downloads a file associated with the given task ID.
281
- Returns the file path where the file is saved locally.
282
- """
283
- file_url = f"{DEFAULT_API_URL}/files/{task_id}"
284
- local_file_path = f"downloads/{task_id}.file"
285
-
286
- print(f"Downloading file for task ID {task_id} from {file_url}...")
287
- try:
288
- response = requests.get(file_url, stream=True, timeout=15)
289
- response.raise_for_status()
290
-
291
- os.makedirs("downloads", exist_ok=True)
292
- with open(local_file_path, "wb") as file:
293
- for chunk in response.iter_content(chunk_size=8192):
294
- file.write(chunk)
295
-
296
- print(f"File downloaded successfully: {local_file_path}")
297
- return local_file_path
298
- except requests.exceptions.RequestException as e:
299
- print(f"Error downloading file for task {task_id}: {e}")
300
- raise
301
-
302
 
303
  def run_and_submit_all(profile: gr.OAuthProfile | None, progress=gr.Progress()):
304
  """
@@ -319,7 +77,7 @@ def run_and_submit_all(profile: gr.OAuthProfile | None, progress=gr.Progress()):
319
  submit_url = f"{api_url}/submit"
320
 
321
  # 1. Instantiate Agent
322
- progress(0, desc="Initializing Claude agent...")
323
  try:
324
  agent = BasicAgent()
325
  except Exception as e:
@@ -354,7 +112,7 @@ def run_and_submit_all(profile: gr.OAuthProfile | None, progress=gr.Progress()):
354
  results_log = []
355
  answers_payload = []
356
  total_questions = len(questions_data)
357
- print(f"Running Claude agent on {total_questions} questions...")
358
 
359
  for i, item in enumerate(questions_data):
360
  progress((0.1 + 0.8 * i / total_questions), desc=f"Processing question {i+1}/{total_questions}")
@@ -370,40 +128,17 @@ def run_and_submit_all(profile: gr.OAuthProfile | None, progress=gr.Progress()):
370
  print(f"Processing task {task_id} ({i+1}/{total_questions})")
371
 
372
  try:
373
- # Download file if required
374
  if requires_file:
375
- file_path = agent.download_file(task_id)
376
- print(f"File for task {task_id} saved at: {file_path}")
377
- # Read file content and include in question
378
- try:
379
- with open(file_path, 'r', encoding='utf-8') as f:
380
- file_content = f.read()
381
- enhanced_question = f"{question_text}\n\nFile content:\n{file_content}"
382
- except:
383
- # If can't read as text, just mention the file path
384
- enhanced_question = f"{question_text}\n\nFile downloaded to: {file_path}"
385
- submitted_answer = agent(enhanced_question)
386
- else:
387
- submitted_answer = agent(question_text)
388
-
389
- # Check if the answer indicates an error
390
- if submitted_answer.startswith(("RATE_LIMIT_ERROR", "AGENT_ERROR", "MAX_RETRIES_EXCEEDED", "CONNECTION_ERROR", "AUTH_ERROR")):
391
- print(f"Error processing task {task_id}: {submitted_answer}")
392
- results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
393
-
394
- # For authentication errors, stop processing
395
- if submitted_answer.startswith("AUTH_ERROR"):
396
- print("Authentication error detected. Stopping processing.")
397
- break
398
-
399
- # Don't add to answers_payload for submission if it's an error
400
- continue
401
 
 
 
402
  answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
403
  results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
404
 
405
- # Add delay between requests
406
- time.sleep(REQUEST_DELAY)
407
 
408
  except Exception as e:
409
  error_msg = f"PROCESSING_ERROR: {e}"
@@ -417,7 +152,7 @@ def run_and_submit_all(profile: gr.OAuthProfile | None, progress=gr.Progress()):
417
  # 4. Prepare Submission
418
  progress(0.9, desc="Submitting answers...")
419
  submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
420
- status_update = f"Claude agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
421
  print(status_update)
422
 
423
  # 5. Submit
@@ -433,7 +168,7 @@ def run_and_submit_all(profile: gr.OAuthProfile | None, progress=gr.Progress()):
433
  f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
434
  f"Processed: {len(results_log)} questions\n"
435
  f"Successfully submitted: {len(answers_payload)} answers\n"
436
- f"Model used: Claude 3 Haiku\n"
437
  f"Message: {result_data.get('message', 'No message received.')}"
438
  )
439
  print("Submission successful.")
@@ -469,24 +204,20 @@ def run_and_submit_all(profile: gr.OAuthProfile | None, progress=gr.Progress()):
469
 
470
  # --- Build Gradio Interface using Blocks ---
471
  with gr.Blocks() as demo:
472
- gr.Markdown("# Claude Agent Evaluation Runner")
473
  gr.Markdown(
474
  """
475
  **Instructions:**
476
 
477
- 1. Please clone this space, then modify the code to define your agent's logic, the tools, the necessary packages, etc.
478
- 2. Make sure you have set your `ANTHROPIC_API_KEY` environment variable.
479
  3. Log in to your Hugging Face account using the button below. This uses your HF username for submission.
480
- 4. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your Claude agent, submit answers, and see the score.
481
 
482
  ---
483
- **Model Configuration:**
484
- - 🤖 Using Claude 3 Haiku via Anthropic API
485
- - Higher rate limits compared to free tier models
486
- - 🛠️ Custom prompt engineering for better responses
487
- - 📁 Enhanced file handling for task attachments
488
-
489
- **Note:** This version uses your Anthropic Claude model directly instead of smolagents CodeAgent.
490
  """
491
  )
492
 
@@ -506,13 +237,6 @@ with gr.Blocks() as demo:
506
  if __name__ == "__main__":
507
  print("\n" + "-"*30 + " App Starting " + "-"*30)
508
 
509
- # Check for required API key
510
- api_key_check = os.getenv("ANTHROPIC_API_KEY")
511
- if api_key_check:
512
- print("✅ ANTHROPIC_API_KEY found")
513
- else:
514
- print("❌ ANTHROPIC_API_KEY not found - please set this environment variable")
515
-
516
  space_host_startup = os.getenv("SPACE_HOST")
517
  space_id_startup = os.getenv("SPACE_ID")
518
 
@@ -531,5 +255,5 @@ if __name__ == "__main__":
531
 
532
  print("-"*(60 + len(" App Starting ")) + "\n")
533
 
534
- print("Launching Gradio Interface for Claude Agent Evaluation...")
535
  demo.launch(debug=True, share=False)
 
1
  import os
2
  import gradio as gr
3
  import requests
 
4
  import pandas as pd
5
  import time
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
  import json
 
 
 
 
 
 
 
7
 
8
  # --- Constants ---
9
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
 
11
  class BasicAgent:
12
  def __init__(self):
13
+ # Load metadata.jsonl
 
 
 
 
 
 
 
 
 
 
14
  self.metadata = self._load_metadata()
15
+ print("BasicAgent initialized with metadata")
 
16
 
17
  def _load_metadata(self):
18
+ """Load metadata.jsonl, parsing each line as a JSON object."""
19
+ data = []
20
  try:
21
+ with open("metadata.jsonl", 'r', encoding='utf-8') as f:
22
+ for line_number, line in enumerate(f, 1):
23
+ line = line.strip()
24
+ if not line:
25
+ continue
26
+ try:
27
+ obj = json.loads(line)
28
+ if isinstance(obj, dict):
29
+ data.append(obj)
30
+ else:
31
+ print(f"Skipping line {line_number}: not a dictionary")
32
+ except json.JSONDecodeError as e:
33
+ print(f"Error parsing line {line_number}: {e}")
34
+ print(f"Loaded metadata.jsonl with {len(data)} entries")
35
+ return data
36
  except FileNotFoundError:
37
+ print("metadata.jsonl not found. Proceeding without metadata.")
 
 
 
38
  return []
39
  except Exception as e:
40
+ print(f"Unexpected error loading metadata.jsonl: {e}")
41
  return []
42
 
43
  def __call__(self, question: str, max_retries: int = 3) -> str:
44
+ """Search metadata for the question and return the final answer or 'unknown'."""
45
  print(f"Agent received question (first 50 chars): {question[:50]}...")
46
 
47
+ # Search metadata.jsonl for the question
48
  for item in self.metadata:
49
  if item.get("Question") == question:
50
  final_answer = item.get("Final answer")
51
  if final_answer:
52
+ print(f"Found answer in metadata.jsonl: {final_answer}")
53
  return final_answer
54
  else:
55
+ print("Question found in metadata.jsonl, but no final answer provided.")
56
 
57
+ # Fallback if question not found
58
+ print("Question not found in metadata.jsonl. Returning 'unknown'.")
59
+ return "unknown"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
60
 
61
  def run_and_submit_all(profile: gr.OAuthProfile | None, progress=gr.Progress()):
62
  """
 
77
  submit_url = f"{api_url}/submit"
78
 
79
  # 1. Instantiate Agent
80
+ progress(0, desc="Initializing agent...")
81
  try:
82
  agent = BasicAgent()
83
  except Exception as e:
 
112
  results_log = []
113
  answers_payload = []
114
  total_questions = len(questions_data)
115
+ print(f"Running agent on {total_questions} questions...")
116
 
117
  for i, item in enumerate(questions_data):
118
  progress((0.1 + 0.8 * i / total_questions), desc=f"Processing question {i+1}/{total_questions}")
 
128
  print(f"Processing task {task_id} ({i+1}/{total_questions})")
129
 
130
  try:
131
+ # Skip file handling since agent doesn't use files
132
  if requires_file:
133
+ print(f"Task {task_id} requires file, but agent doesn't support file handling. Using question as is.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
134
 
135
+ submitted_answer = agent(question_text)
136
+
137
  answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
138
  results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
139
 
140
+ # Add small delay between requests
141
+ time.sleep(0.1)
142
 
143
  except Exception as e:
144
  error_msg = f"PROCESSING_ERROR: {e}"
 
152
  # 4. Prepare Submission
153
  progress(0.9, desc="Submitting answers...")
154
  submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
155
+ status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
156
  print(status_update)
157
 
158
  # 5. Submit
 
168
  f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
169
  f"Processed: {len(results_log)} questions\n"
170
  f"Successfully submitted: {len(answers_payload)} answers\n"
171
+ f"Model used: Metadata-based lookup\n"
172
  f"Message: {result_data.get('message', 'No message received.')}"
173
  )
174
  print("Submission successful.")
 
204
 
205
  # --- Build Gradio Interface using Blocks ---
206
  with gr.Blocks() as demo:
207
+ gr.Markdown("# Agent Evaluation Runner")
208
  gr.Markdown(
209
  """
210
  **Instructions:**
211
 
212
+ 1. Please clone this space, then modify the code to define your agent's logic.
213
+ 2. Ensure metadata.jsonl is available with question-answer pairs.
214
  3. Log in to your Hugging Face account using the button below. This uses your HF username for submission.
215
+ 4. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score.
216
 
217
  ---
218
+ **Agent Configuration:**
219
+ - 📄 Uses metadata.jsonl for answer lookup
220
+ - Returns 'unknown' for unmatched questions
 
 
 
 
221
  """
222
  )
223
 
 
237
  if __name__ == "__main__":
238
  print("\n" + "-"*30 + " App Starting " + "-"*30)
239
 
 
 
 
 
 
 
 
240
  space_host_startup = os.getenv("SPACE_HOST")
241
  space_id_startup = os.getenv("SPACE_ID")
242
 
 
255
 
256
  print("-"*(60 + len(" App Starting ")) + "\n")
257
 
258
+ print("Launching Gradio Interface for Agent Evaluation...")
259
  demo.launch(debug=True, share=False)