bstraehle commited on
Commit
6137156
·
verified ·
1 Parent(s): 3b60fba

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -6
app.py CHANGED
@@ -8,8 +8,7 @@ from agents.crew import run_crew
8
  from utils.utils import (
9
  QUESTION_TYPE_GAIA,
10
  QUESTION_TYPE_HLE,
11
- #get_questions_from_dataset,
12
- get_questions_from_file
13
  )
14
 
15
  # MCP server functions
@@ -185,7 +184,7 @@ with gr.Blocks(elem_classes=["full-width-app"]) as gaia:
185
  with gr.Tabs():
186
  with gr.TabItem("GAIA Benchmark Level 1"):
187
  gr.Examples(
188
- examples=get_questions_from_file(QUESTION_TYPE_GAIA, 1),
189
  inputs=[question, ground_truth, file_name, openai_api_key, gemini_api_key, anthropic_api_key],
190
  examples_per_page=3,
191
  cache_examples=False
@@ -193,7 +192,7 @@ with gr.Blocks(elem_classes=["full-width-app"]) as gaia:
193
 
194
  with gr.TabItem("GAIA Benchmark Level 2"):
195
  gr.Examples(
196
- examples=get_questions_from_file(QUESTION_TYPE_GAIA, 2),
197
  inputs=[question, ground_truth, file_name, openai_api_key, gemini_api_key, anthropic_api_key],
198
  examples_per_page=3,
199
  cache_examples=False
@@ -201,7 +200,7 @@ with gr.Blocks(elem_classes=["full-width-app"]) as gaia:
201
 
202
  with gr.TabItem("GAIA Benchmark Level 3"):
203
  gr.Examples(
204
- examples=get_questions_from_file(QUESTION_TYPE_GAIA, 3),
205
  inputs=[question, ground_truth, file_name, openai_api_key, gemini_api_key, anthropic_api_key],
206
  examples_per_page=3,
207
  cache_examples=False
@@ -209,7 +208,7 @@ with gr.Blocks(elem_classes=["full-width-app"]) as gaia:
209
 
210
  with gr.TabItem("Humanity's Last Exam"):
211
  gr.Examples(
212
- examples=get_questions_from_file(QUESTION_TYPE_HLE, 0),
213
  inputs=[question, ground_truth, file_name, openai_api_key, gemini_api_key, anthropic_api_key],
214
  examples_per_page=3,
215
  cache_examples=False
 
8
  from utils.utils import (
9
  QUESTION_TYPE_GAIA,
10
  QUESTION_TYPE_HLE,
11
+ get_questions_from_dataset
 
12
  )
13
 
14
  # MCP server functions
 
184
  with gr.Tabs():
185
  with gr.TabItem("GAIA Benchmark Level 1"):
186
  gr.Examples(
187
+ examples=get_questions_from_dataset(QUESTION_TYPE_GAIA, 1),
188
  inputs=[question, ground_truth, file_name, openai_api_key, gemini_api_key, anthropic_api_key],
189
  examples_per_page=3,
190
  cache_examples=False
 
192
 
193
  with gr.TabItem("GAIA Benchmark Level 2"):
194
  gr.Examples(
195
+ examples=get_questions_from_dataset(QUESTION_TYPE_GAIA, 2),
196
  inputs=[question, ground_truth, file_name, openai_api_key, gemini_api_key, anthropic_api_key],
197
  examples_per_page=3,
198
  cache_examples=False
 
200
 
201
  with gr.TabItem("GAIA Benchmark Level 3"):
202
  gr.Examples(
203
+ examples=get_questions_from_dataset(QUESTION_TYPE_GAIA, 3),
204
  inputs=[question, ground_truth, file_name, openai_api_key, gemini_api_key, anthropic_api_key],
205
  examples_per_page=3,
206
  cache_examples=False
 
208
 
209
  with gr.TabItem("Humanity's Last Exam"):
210
  gr.Examples(
211
+ examples=get_questions_from_dataset(QUESTION_TYPE_HLE, 0),
212
  inputs=[question, ground_truth, file_name, openai_api_key, gemini_api_key, anthropic_api_key],
213
  examples_per_page=3,
214
  cache_examples=False