bstraehle commited on
Commit
5f4828e
·
verified ·
1 Parent(s): 702ce31

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -8,7 +8,7 @@ from agents.crew import run_crew
8
  from utils.utils import (
9
  DATASET_TYPE_GAIA,
10
  DATASET_TYPE_HLE,
11
- get_dataset_from_file,
12
  get_dataset
13
  )
14
 
@@ -185,7 +185,7 @@ with gr.Blocks(elem_classes=["full-width-app"]) as gaia:
185
  with gr.Tabs():
186
  with gr.TabItem("GAIA Benchmark Level 1"):
187
  gr.Examples(
188
- examples=get_dataset_from_file(DATASET_TYPE_GAIA, 1),
189
  inputs=[question, ground_truth, file_name, openai_api_key, gemini_api_key, anthropic_api_key],
190
  examples_per_page=3,
191
  cache_examples=False
@@ -193,7 +193,7 @@ with gr.Blocks(elem_classes=["full-width-app"]) as gaia:
193
 
194
  with gr.TabItem("GAIA Benchmark Level 2"):
195
  gr.Examples(
196
- examples=get_dataset_from_file(DATASET_TYPE_GAIA, 2),
197
  inputs=[question, ground_truth, file_name, openai_api_key, gemini_api_key, anthropic_api_key],
198
  examples_per_page=3,
199
  cache_examples=False
@@ -201,7 +201,7 @@ with gr.Blocks(elem_classes=["full-width-app"]) as gaia:
201
 
202
  with gr.TabItem("GAIA Benchmark Level 3"):
203
  gr.Examples(
204
- examples=get_dataset_from_file(DATASET_TYPE_GAIA, 3),
205
  inputs=[question, ground_truth, file_name, openai_api_key, gemini_api_key, anthropic_api_key],
206
  examples_per_page=3,
207
  cache_examples=False
@@ -209,7 +209,7 @@ with gr.Blocks(elem_classes=["full-width-app"]) as gaia:
209
 
210
  with gr.TabItem("Humanity's Last Exam"):
211
  gr.Examples(
212
- examples=get_dataset_from_file(DATASET_TYPE_HLE, 0),
213
  inputs=[question, ground_truth, file_name, openai_api_key, gemini_api_key, anthropic_api_key],
214
  examples_per_page=3,
215
  cache_examples=False
 
8
  from utils.utils import (
9
  DATASET_TYPE_GAIA,
10
  DATASET_TYPE_HLE,
11
+ #get_dataset_from_file,
12
  get_dataset
13
  )
14
 
 
185
  with gr.Tabs():
186
  with gr.TabItem("GAIA Benchmark Level 1"):
187
  gr.Examples(
188
+ examples=get_dataset(DATASET_TYPE_GAIA, 1),
189
  inputs=[question, ground_truth, file_name, openai_api_key, gemini_api_key, anthropic_api_key],
190
  examples_per_page=3,
191
  cache_examples=False
 
193
 
194
  with gr.TabItem("GAIA Benchmark Level 2"):
195
  gr.Examples(
196
+ examples=get_dataset(DATASET_TYPE_GAIA, 2),
197
  inputs=[question, ground_truth, file_name, openai_api_key, gemini_api_key, anthropic_api_key],
198
  examples_per_page=3,
199
  cache_examples=False
 
201
 
202
  with gr.TabItem("GAIA Benchmark Level 3"):
203
  gr.Examples(
204
+ examples=get_dataset(DATASET_TYPE_GAIA, 3),
205
  inputs=[question, ground_truth, file_name, openai_api_key, gemini_api_key, anthropic_api_key],
206
  examples_per_page=3,
207
  cache_examples=False
 
209
 
210
  with gr.TabItem("Humanity's Last Exam"):
211
  gr.Examples(
212
+ examples=get_dataset(DATASET_TYPE_HLE, 0),
213
  inputs=[question, ground_truth, file_name, openai_api_key, gemini_api_key, anthropic_api_key],
214
  examples_per_page=3,
215
  cache_examples=False