import datasets import os import json _DESCRIPTION = "lm-polygraph wrapper for xsum dataset" _LANGS = ["en"] _DATA_DIRECTORY = "." VERSION = datasets.Version("0.0.1") _SPLITS = ["train", "validation", "test"] _CONFIG = { "dataset": "xsum", "input_column": "document", "output_column": "summary", } def _prepare_dataset(dataset): return dataset[_CONFIG["input_column"]], dataset[_CONFIG["output_column"]] class PolygraphXsum(datasets.GeneratorBasedBuilder): """lm-polygraph wrapper for xsum dataset""" def _info(self): features = datasets.Features( { "input": datasets.Value("string"), "output": datasets.Value("string"), } ) return datasets.DatasetInfo( description=_DESCRIPTION, features=features, ) def _split_generators(self, dl_manager): dataset = datasets.load_dataset(_CONFIG["dataset"], trust_remote_code=True) def download_custom_dataset(src_url: str, dst_path: str): split = src_url x, y = _prepare_dataset(dataset[split]) result_dataset = Dataset.from_dict({"input": x, "output": y}) result_dataset.to_json(dst_path) downloaded_files = dl_manager.download_custom(_SPLITS, download_custom_dataset) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "filepath": downloaded_files["train"], "lang": lang, }), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={ "filepath": downloaded_files["validation"], }), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={ "filepath": downloaded_files["test"], }) ] def _generate_examples(self, filepath, lang): with open(filepath, encoding="utf-8") as f: dataset = Dataset.from_json(f.read()) for i in range(len(item)): yield i, dataset[i]