| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | """VLSP: Very Long Scientific Papers""" |
| |
|
| |
|
| | import os |
| | import glob |
| |
|
| | import datasets |
| |
|
| |
|
| | _CITATION = """""" |
| | _DESCRIPTION = """\ |
| | Very Long version of the scientific papers summarization dataset. Only includes theses over 10,000 tokens long. |
| | """ |
| | _HOMEPAGE = "https://github.com/ghomasHudson/very_long_scientific_papers" |
| | _URL = "https://github.com/ghomasHudson/very_long_scientific_papers/archive/master.zip" |
| |
|
| | class VLSP(datasets.GeneratorBasedBuilder): |
| | """VLSP: Very Long Scientific Papers""" |
| |
|
| | VERSION = datasets.Version("1.1.0") |
| |
|
| | BUILDER_CONFIGS = [ |
| | datasets.BuilderConfig(name="arxiv", description="Arxiv theses"), |
| | ] |
| |
|
| | DEFAULT_CONFIG_NAME = "arxiv" |
| |
|
| | def _info(self): |
| | return datasets.DatasetInfo( |
| | description=_DESCRIPTION, |
| | features=datasets.Features( |
| | { |
| | "abstract": datasets.features.Sequence( |
| | datasets.Value("string") |
| | ), |
| | "article": datasets.Value("string"), |
| | } |
| | ), |
| | homepage=_HOMEPAGE, |
| | citation=_CITATION, |
| | ) |
| |
|
| | def _split_generators(self, dl_manager): |
| | data_dir = dl_manager.download_and_extract(_URL) |
| | return [ |
| | datasets.SplitGenerator( |
| | name=datasets.Split.TEST, |
| | gen_kwargs={ |
| | "filepath": os.path.join(data_dir,"very_long_scientific_papers-master", "deduped-test"), |
| | } |
| | ) |
| | ] |
| |
|
| | def _generate_examples(self, filepath): |
| | for key, fn in enumerate(glob.glob(os.path.join(filepath, "*.main.txt"))): |
| | summary = [] |
| | summ_fn = fn.replace(".main.", ".abstract.") |
| | if os.path.exists(summ_fn): |
| | summary.append(open(summ_fn).read()) |
| | summ_fn = fn.replace(".main.", ".abstract-long.") |
| | if os.path.exists(summ_fn): |
| | summary.append(open(summ_fn).read()) |
| |
|
| | yield key, { |
| | "article": open(fn).read(), |
| | "abstract": summary |
| | } |