| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | |
| | """Multi-Document Dataset.""" |
| |
|
| |
|
| | import json |
| |
|
| | import datasets |
| |
|
| |
|
| | _CITATION = """ |
| | @article{lu2020multi, |
| | title={Multi-Document: A Large-scale Dataset for Extreme Multi-document Summarization of Scientific Articles}, |
| | author={Arka Das, India}, |
| | journal={arXiv preprint arXiv:2010.14235}, |
| | year={2022} |
| | } |
| | """ |
| |
|
| | _DESCRIPTION = """ |
| | Multi-Document, a large-scale multi-document summarization dataset created from scientific articles. Multi-Document introduces a challenging multi-document summarization task: writing the related-work section of a paper based on its abstract and the articles it references. |
| | """ |
| |
|
| | _URL_TRAIN = "https://github.com/arka0821/multi_document_summarization/blob/master/data/train.txt" |
| | _URL_TEST = "https://github.com/arka0821/multi_document_summarization/blob/master/data/test.txt" |
| | _URL_VAL = "https://github.com/arka0821/multi_document_summarization/blob/master/data/val.txt" |
| |
|
| |
|
| | class MultiDocumentSum(datasets.GeneratorBasedBuilder): |
| | """ "Multi-Document Dataset.""" |
| |
|
| | VERSION = datasets.Version("1.1.0") |
| |
|
| | def _info(selif): |
| | return datasets.DatasetInfo( |
| | description=_DESCRIPTION, |
| | features=datasets.Features( |
| | { |
| | "id": datasets.Value("string"), |
| | "docs": datasets.Sequence( |
| | { |
| | "id": datasets.Value("string"), |
| | "text": datasets.Value("string") |
| | }, |
| | ), |
| | "summary": datasets.Value("string"), |
| | } |
| | ), |
| | supervised_keys=None, |
| | homepage="https://github.com/arka0821/multi_document_summarization", |
| | citation=_CITATION, |
| | ) |
| |
|
| | def _split_generators(self, dl_manager): |
| | """Returns SplitGenerators.""" |
| | train_path = dl_manager.download_and_extract(_URL_TRAIN) |
| | test_path = dl_manager.download_and_extract(_URL_TEST) |
| | val_path = dl_manager.download_and_extract(_URL_VAL) |
| |
|
| | return [ |
| | datasets.SplitGenerator( |
| | name=datasets.Split.TRAIN, |
| | gen_kwargs={"path": train_path}, |
| | ), |
| | datasets.SplitGenerator( |
| | name=datasets.Split.TEST, |
| | gen_kwargs={"path": test_path}, |
| | ), |
| | datasets.SplitGenerator( |
| | name=datasets.Split.VALIDATION, |
| | gen_kwargs={"path": val_path}, |
| | ), |
| | ] |
| | def _generate_examples(self, path=None): |
| | """Yields examples.""" |
| | with open(path, encoding="utf-8") as f: |
| | data = f.readlines() |
| | f.close() |
| | print("************") |
| | print(data[0]) |
| | for idx, el in enumerate(data): |
| | |
| | |
| | |
| | d = el.copy() |
| | d["docs"] = tmp |
| | yield idx, d |