|
|
import os |
|
|
import json |
|
|
from datasets import ( |
|
|
GeneratorBasedBuilder, |
|
|
DatasetInfo, |
|
|
Features, |
|
|
Value, |
|
|
Sequence, |
|
|
Split, |
|
|
SplitGenerator, |
|
|
BuilderConfig, |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
SUITE_DATASET_CONDITION_SPEC = { |
|
|
"condition_name": Value("string"), |
|
|
"content": Value("string"), |
|
|
"regions": Sequence({ |
|
|
"region_number": Value("int32"), |
|
|
"content": Value("string"), |
|
|
}), |
|
|
} |
|
|
|
|
|
SUITE_DATASET_SPEC = Features({ |
|
|
"suite_name": Value("string"), |
|
|
"item_number": Value("int32"), |
|
|
"conditions": Sequence(SUITE_DATASET_CONDITION_SPEC), |
|
|
"predictions": Sequence(Value("string")), |
|
|
}) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class SyntacticAgreement(GeneratorBasedBuilder): |
|
|
BUILDER_CONFIG_CLASS = BuilderConfig |
|
|
BUILDER_CONFIGS = [ |
|
|
BuilderConfig( |
|
|
name="spanish", |
|
|
description="Spanish syntax test suites", |
|
|
), |
|
|
BuilderConfig( |
|
|
name="italian", |
|
|
description="Italian syntax test suites", |
|
|
), |
|
|
BuilderConfig( |
|
|
name="portuguese", |
|
|
description="Portuguese syntax test suites", |
|
|
), |
|
|
BuilderConfig( |
|
|
name="russian", |
|
|
description="Russian syntax test suites", |
|
|
), |
|
|
] |
|
|
|
|
|
def _info(self): |
|
|
return DatasetInfo( |
|
|
description="Agreement syntax test suites in multiple languages.", |
|
|
features=SUITE_DATASET_SPEC, |
|
|
supervised_keys=None, |
|
|
homepage="", |
|
|
license="", |
|
|
citation="https://aclanthology.org/2025.depling-1.4/", |
|
|
) |
|
|
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
|
|
base_url = "https://huggingface.co/datasets/albalbalba/SyntacticAgreement/resolve/main" |
|
|
archive_url = f"{base_url}/{self.config.name}.zip" |
|
|
extracted_dir = dl_manager.download_and_extract(archive_url) |
|
|
|
|
|
|
|
|
possible_nested = os.path.join(extracted_dir, self.config.name) |
|
|
if os.path.exists(possible_nested): |
|
|
data_dir = possible_nested |
|
|
else: |
|
|
data_dir = extracted_dir |
|
|
|
|
|
return [ |
|
|
SplitGenerator( |
|
|
name=Split.TRAIN, |
|
|
gen_kwargs={"data_dir": data_dir}, |
|
|
), |
|
|
] |
|
|
|
|
|
def _generate_examples(self, data_dir): |
|
|
for filename in sorted(os.listdir(data_dir)): |
|
|
if not filename.endswith(".json"): |
|
|
continue |
|
|
filepath = os.path.join(data_dir, filename) |
|
|
with open(filepath, encoding="utf-8") as f: |
|
|
data = json.load(f) |
|
|
|
|
|
suite_name = data["meta"]["name"] |
|
|
predictions = [p["formula"] for p in data.get("predictions", [])] |
|
|
|
|
|
for item in data.get("items", []): |
|
|
item_number = item["item_number"] |
|
|
conditions = [] |
|
|
for condition in item.get("conditions", []): |
|
|
content_all = " ".join( |
|
|
[region["content"] for region in condition["regions"]] |
|
|
).lstrip(" ") |
|
|
content_all = ( |
|
|
content_all.replace(" ", " ") |
|
|
.replace(" .", ".") |
|
|
.replace(" ,", ",") |
|
|
.replace("¿ ", "¿") |
|
|
) |
|
|
conditions.append({ |
|
|
"condition_name": condition["condition_name"], |
|
|
"content": content_all, |
|
|
"regions": [ |
|
|
{ |
|
|
"region_number": region["region_number"], |
|
|
"content": region["content"], |
|
|
} |
|
|
for region in condition["regions"] |
|
|
], |
|
|
}) |
|
|
|
|
|
key = f"{suite_name}_{item_number}" |
|
|
yield key, { |
|
|
"suite_name": suite_name, |
|
|
"item_number": item_number, |
|
|
"conditions": conditions, |
|
|
"predictions": predictions, |
|
|
} |
|
|
|