| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | |
| | """Wiki Long Subset.""" |
| |
|
| |
|
| | import json |
| |
|
| | import datasets |
| |
|
| | logger = datasets.logging.get_logger(__name__) |
| |
|
| |
|
| | _DESCRIPTION = """\ |
| | Dataset consisting of long wikipedia articles. |
| | """ |
| |
|
| | _URLS = { |
| | "train": [ |
| | "train/partition_0.jsonl", |
| | "train/partition_1.jsonl", |
| | "train/partition_2.jsonl", |
| | "train/partition_3.jsonl", |
| | "train/partition_4.jsonl", |
| | "train/partition_5.jsonl", |
| | "train/partition_6.jsonl", |
| | "train/partition_7.jsonl", |
| | "train/partition_8.jsonl", |
| | "train/partition_9.jsonl", |
| | "train/partition_10.jsonl", |
| | "train/partition_11.jsonl", |
| | ], |
| | "test": "test/partition_0.jsonl", |
| | } |
| |
|
| |
|
| | class WikiLongDatasetConfig(datasets.BuilderConfig): |
| | """BuilderConfig for Dataset.""" |
| |
|
| | def __init__(self, **kwargs): |
| | """BuilderConfig for Dataset. |
| | |
| | Args: |
| | **kwargs: keyword arguments forwarded to super. |
| | """ |
| | super(WikiLongDatasetConfig, self).__init__(**kwargs) |
| |
|
| | @property |
| | def features(self): |
| | return { |
| | "id": datasets.Value("string"), |
| | "url": datasets.Value("string"), |
| | "title": datasets.Value("string"), |
| | "text": datasets.Value("string"), |
| | } |
| |
|
| |
|
| | class WikiLongDataset(datasets.GeneratorBasedBuilder): |
| | """WikiLongDataset Classification dataset. Version 1.0.""" |
| |
|
| | BUILDER_CONFIGS = [ |
| | WikiLongDatasetConfig( |
| | version=datasets.Version("1.0.0", ""), |
| | description="Long Wikipedia Articles" |
| | ) |
| | ] |
| | BUILDER_CONFIG_CLASS = WikiLongDatasetConfig |
| |
|
| | def _info(self): |
| | return datasets.DatasetInfo( |
| | description=_DESCRIPTION, |
| | features=datasets.Features(self.config.features), |
| | ) |
| |
|
| | def _split_generators(self, dl_manager): |
| | data_dir = dl_manager.download_and_extract(_URLS) |
| |
|
| | return [ |
| | datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_dir["train"]}), |
| | datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_dir["test"]}), |
| | ] |
| |
|
| | def _generate_examples(self, filepath): |
| | """This function returns the examples in the raw (text) form.""" |
| | logger.info("generating examples from = %s", filepath) |
| | if isinstance(filepath, str): |
| | filepath = [filepath] |
| | key = 0 |
| | for path in filepath: |
| | with open(path, encoding="utf-8") as data: |
| | for article in data: |
| | yield key, json.loads(article) |
| | key += 1 |
| |
|