Update script
Browse files- generate_evaluation_datasets.py +29 -8
- requirements.txt +2 -1
generate_evaluation_datasets.py
CHANGED
|
@@ -1,8 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import typer
|
| 2 |
from datasets import (Dataset, DatasetDict, get_dataset_config_names,
|
| 3 |
load_dataset)
|
|
|
|
| 4 |
from huggingface_hub import list_datasets
|
| 5 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 6 |
|
| 7 |
app = typer.Typer()
|
| 8 |
|
|
@@ -20,10 +29,14 @@ def convert(dataset_id: str):
|
|
| 20 |
for config in configs:
|
| 21 |
typer.echo(f"🛠️🛠️🛠️ Converting {dataset_id} with config {config} 🛠️🛠️🛠️")
|
| 22 |
try:
|
| 23 |
-
raw_datasets = load_dataset(
|
|
|
|
|
|
|
| 24 |
except:
|
| 25 |
typer.echo(f"❌ Failed to load {dataset_id} with config {config}")
|
| 26 |
-
errors.append(
|
|
|
|
|
|
|
| 27 |
continue
|
| 28 |
datasets_to_convert = DatasetDict()
|
| 29 |
|
|
@@ -38,7 +51,15 @@ def convert(dataset_id: str):
|
|
| 38 |
typer.echo(
|
| 39 |
f"❌ Skipping {dataset_name}/{config}/{split} due to missing columns: {', '.join(remainder_cols)}"
|
| 40 |
)
|
| 41 |
-
errors.append(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 42 |
else:
|
| 43 |
# Add `input` column if it exists
|
| 44 |
if "input" in dataset.column_names:
|
|
@@ -73,12 +94,12 @@ def extract_evaluation_datasets():
|
|
| 73 |
errors = []
|
| 74 |
all_datasets = list_datasets()
|
| 75 |
# Filter for GEM datasets
|
| 76 |
-
gem_datasets = [
|
|
|
|
|
|
|
| 77 |
# Filter for blocklist
|
| 78 |
blocklist = [
|
| 79 |
-
"
|
| 80 |
-
"RiSAWOZ", # Can't load
|
| 81 |
-
"CrossWOZ", # Can't load
|
| 82 |
"references", # This repo, so exclude!
|
| 83 |
]
|
| 84 |
blocklist = ["GEM/" + dataset for dataset in blocklist]
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from pathlib import Path
|
| 3 |
+
|
| 4 |
+
import pandas as pd
|
| 5 |
import typer
|
| 6 |
from datasets import (Dataset, DatasetDict, get_dataset_config_names,
|
| 7 |
load_dataset)
|
| 8 |
+
from dotenv import load_dotenv
|
| 9 |
from huggingface_hub import list_datasets
|
| 10 |
+
|
| 11 |
+
if Path(".env").is_file():
|
| 12 |
+
load_dotenv(".env")
|
| 13 |
+
|
| 14 |
+
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 15 |
|
| 16 |
app = typer.Typer()
|
| 17 |
|
|
|
|
| 29 |
for config in configs:
|
| 30 |
typer.echo(f"🛠️🛠️🛠️ Converting {dataset_id} with config {config} 🛠️🛠️🛠️")
|
| 31 |
try:
|
| 32 |
+
raw_datasets = load_dataset(
|
| 33 |
+
dataset_id, name=config, use_auth_token=HF_TOKEN
|
| 34 |
+
)
|
| 35 |
except:
|
| 36 |
typer.echo(f"❌ Failed to load {dataset_id} with config {config}")
|
| 37 |
+
errors.append(
|
| 38 |
+
{"dataset_name": dataset_id, "config": config, "error_type": "load"}
|
| 39 |
+
)
|
| 40 |
continue
|
| 41 |
datasets_to_convert = DatasetDict()
|
| 42 |
|
|
|
|
| 51 |
typer.echo(
|
| 52 |
f"❌ Skipping {dataset_name}/{config}/{split} due to missing columns: {', '.join(remainder_cols)}"
|
| 53 |
)
|
| 54 |
+
errors.append(
|
| 55 |
+
{
|
| 56 |
+
"dataset_name": dataset_id,
|
| 57 |
+
"config": config,
|
| 58 |
+
"split": split,
|
| 59 |
+
"error_type": "missing_columns",
|
| 60 |
+
"missing_columns": remainder_cols,
|
| 61 |
+
}
|
| 62 |
+
)
|
| 63 |
else:
|
| 64 |
# Add `input` column if it exists
|
| 65 |
if "input" in dataset.column_names:
|
|
|
|
| 94 |
errors = []
|
| 95 |
all_datasets = list_datasets()
|
| 96 |
# Filter for GEM datasets
|
| 97 |
+
gem_datasets = [
|
| 98 |
+
dataset for dataset in all_datasets if dataset.id.startswith("GEM/")
|
| 99 |
+
]
|
| 100 |
# Filter for blocklist
|
| 101 |
blocklist = [
|
| 102 |
+
"CrossWOZ", # Can't load
|
|
|
|
|
|
|
| 103 |
"references", # This repo, so exclude!
|
| 104 |
]
|
| 105 |
blocklist = ["GEM/" + dataset for dataset in blocklist]
|
requirements.txt
CHANGED
|
@@ -1,3 +1,4 @@
|
|
| 1 |
datasets
|
| 2 |
typer
|
| 3 |
-
lxml
|
|
|
|
|
|
| 1 |
datasets
|
| 2 |
typer
|
| 3 |
+
lxml
|
| 4 |
+
python-dotenv
|