| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| """CNN/DailyMail Summarization dataset, non-anonymized version.""" |
|
|
| import hashlib |
| import os |
|
|
| import datasets |
|
|
|
|
| logger = datasets.logging.get_logger(__name__) |
|
|
|
|
| _DESCRIPTION = """\ |
| CNN/DailyMail non-anonymized summarization dataset. |
| |
| There are two features: |
| - article: text of news article, used as the document to be summarized |
| - highlights: joined text of highlights with <s> and </s> around each |
| highlight, which is the target summary |
| """ |
|
|
| |
| |
| _CITATION = """\ |
| @article{DBLP:journals/corr/SeeLM17, |
| author = {Abigail See and |
| Peter J. Liu and |
| Christopher D. Manning}, |
| title = {Get To The Point: Summarization with Pointer-Generator Networks}, |
| journal = {CoRR}, |
| volume = {abs/1704.04368}, |
| year = {2017}, |
| url = {http://arxiv.org/abs/1704.04368}, |
| archivePrefix = {arXiv}, |
| eprint = {1704.04368}, |
| timestamp = {Mon, 13 Aug 2018 16:46:08 +0200}, |
| biburl = {https://dblp.org/rec/bib/journals/corr/SeeLM17}, |
| bibsource = {dblp computer science bibliography, https://dblp.org} |
| } |
| |
| @inproceedings{hermann2015teaching, |
| title={Teaching machines to read and comprehend}, |
| author={Hermann, Karl Moritz and Kocisky, Tomas and Grefenstette, Edward and Espeholt, Lasse and Kay, Will and Suleyman, Mustafa and Blunsom, Phil}, |
| booktitle={Advances in neural information processing systems}, |
| pages={1693--1701}, |
| year={2015} |
| } |
| """ |
|
|
| _DL_URLS = { |
| |
| "cnn_stories": "cnn_stories.tgz", |
| "dm_stories": "dailymail_stories.tgz", |
| "test_urls": "https://raw.githubusercontent.com/abisee/cnn-dailymail/master/url_lists/all_test.txt", |
| "train_urls": "https://raw.githubusercontent.com/abisee/cnn-dailymail/master/url_lists/all_train.txt", |
| "val_urls": "https://raw.githubusercontent.com/abisee/cnn-dailymail/master/url_lists/all_val.txt", |
| |
| } |
|
|
| _HIGHLIGHTS = "highlights" |
| _ARTICLE = "article" |
|
|
| _SUPPORTED_VERSIONS = [ |
| |
| datasets.Version("3.0.0", "Using cased version."), |
| |
| datasets.Version("1.0.0", ""), |
| |
| |
| datasets.Version("2.0.0", "Separate target sentences with newline."), |
| ] |
|
|
|
|
| _DEFAULT_VERSION = datasets.Version("3.0.0", "Using cased version.") |
| DEFAULT_CONFIG_NAME = "3.0.0" |
|
|
|
|
| class CnnDailymailConfig(datasets.BuilderConfig): |
| """BuilderConfig for CnnDailymail.""" |
|
|
| def __init__(self, **kwargs): |
| """BuilderConfig for CnnDailymail. |
| |
| Args: |
| |
| **kwargs: keyword arguments forwarded to super. |
| """ |
| super(CnnDailymailConfig, self).__init__(**kwargs) |
|
|
|
|
| def _get_url_hashes(path): |
| """Get hashes of urls in file.""" |
| urls = _read_text_file(path) |
|
|
| def url_hash(u): |
| h = hashlib.sha1() |
| try: |
| u = u.encode("utf-8") |
| except UnicodeDecodeError: |
| logger.error("Cannot hash url: %s", u) |
| h.update(u) |
| return h.hexdigest() |
|
|
| return {url_hash(u): True for u in urls} |
|
|
|
|
| def _get_hash_from_path(p): |
| """Extract hash from path.""" |
| basename = os.path.basename(p) |
| return basename[0 : basename.find(".story")] |
|
|
|
|
| def _find_files(dl_paths, publisher, url_dict): |
| """Find files corresponding to urls.""" |
| if publisher == "cnn": |
| top_dir = os.path.join(dl_paths["cnn_stories"], "cnn", "stories") |
| elif publisher == "dm": |
| top_dir = os.path.join(dl_paths["dm_stories"], "dailymail", "stories") |
| else: |
| logger.fatal("Unsupported publisher: %s", publisher) |
| files = sorted(os.listdir(top_dir)) |
|
|
| ret_files = [] |
| for p in files: |
| if _get_hash_from_path(p) in url_dict: |
| ret_files.append(os.path.join(top_dir, p)) |
| return ret_files |
|
|
|
|
| def _subset_filenames(dl_paths, split): |
| """Get filenames for a particular split.""" |
| assert isinstance(dl_paths, dict), dl_paths |
| |
| if split == datasets.Split.TRAIN: |
| urls = _get_url_hashes(dl_paths["train_urls"]) |
| elif split == datasets.Split.VALIDATION: |
| urls = _get_url_hashes(dl_paths["val_urls"]) |
| elif split == datasets.Split.TEST: |
| urls = _get_url_hashes(dl_paths["test_urls"]) |
| else: |
| logger.fatal("Unsupported split: %s", split) |
| cnn = _find_files(dl_paths, "cnn", urls) |
| dm = _find_files(dl_paths, "dm", urls) |
| return cnn + dm |
|
|
|
|
| DM_SINGLE_CLOSE_QUOTE = "\u2019" |
| DM_DOUBLE_CLOSE_QUOTE = "\u201d" |
| |
| END_TOKENS = [".", "!", "?", "...", "'", "`", '"', DM_SINGLE_CLOSE_QUOTE, DM_DOUBLE_CLOSE_QUOTE, ")"] |
|
|
|
|
| def _read_text_file(text_file): |
| lines = [] |
| with open(text_file, "r", encoding="utf-8") as f: |
| for line in f: |
| lines.append(line.strip()) |
| return lines |
|
|
|
|
| def _get_art_abs(story_file, tfds_version): |
| """Get abstract (highlights) and article from a story file path.""" |
| |
| |
|
|
| lines = _read_text_file(story_file) |
|
|
| |
|
|
| |
| |
| |
| |
| def fix_missing_period(line): |
| """Adds a period to a line that is missing a period.""" |
| if "@highlight" in line: |
| return line |
| if not line: |
| return line |
| if line[-1] in END_TOKENS: |
| return line |
| return line + " ." |
|
|
| lines = [fix_missing_period(line) for line in lines] |
|
|
| |
| article_lines = [] |
| highlights = [] |
| next_is_highlight = False |
| for line in lines: |
| if not line: |
| continue |
| elif line.startswith("@highlight"): |
| next_is_highlight = True |
| elif next_is_highlight: |
| highlights.append(line) |
| else: |
| article_lines.append(line) |
|
|
| |
| article = " ".join(article_lines) |
|
|
| if tfds_version >= "2.0.0": |
| abstract = "\n".join(highlights) |
| else: |
| abstract = " ".join(highlights) |
|
|
| return article, abstract |
|
|
|
|
| class CnnDailymail(datasets.GeneratorBasedBuilder): |
| """CNN/DailyMail non-anonymized summarization dataset.""" |
|
|
| BUILDER_CONFIGS = [ |
| CnnDailymailConfig(name=str(version), description="Plain text", version=version) |
| for version in _SUPPORTED_VERSIONS |
| ] |
|
|
| def _info(self): |
| |
| return datasets.DatasetInfo( |
| description=_DESCRIPTION, |
| features=datasets.Features( |
| { |
| _ARTICLE: datasets.Value("string"), |
| _HIGHLIGHTS: datasets.Value("string"), |
| "id": datasets.Value("string"), |
| } |
| ), |
| supervised_keys=None, |
| homepage="https://github.com/abisee/cnn-dailymail", |
| citation=_CITATION, |
| ) |
|
|
| def _vocab_text_gen(self, paths): |
| for _, ex in self._generate_examples(paths): |
| yield " ".join([ex[_ARTICLE], ex[_HIGHLIGHTS]]) |
|
|
| def _split_generators(self, dl_manager): |
| dl_paths = dl_manager.download_and_extract(_DL_URLS) |
| train_files = _subset_filenames(dl_paths, datasets.Split.TRAIN) |
| |
|
|
| return [ |
| datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": train_files}), |
| datasets.SplitGenerator( |
| name=datasets.Split.VALIDATION, |
| gen_kwargs={"files": _subset_filenames(dl_paths, datasets.Split.VALIDATION)}, |
| ), |
| datasets.SplitGenerator( |
| name=datasets.Split.TEST, gen_kwargs={"files": _subset_filenames(dl_paths, datasets.Split.TEST)} |
| ), |
| ] |
|
|
| def _generate_examples(self, files): |
| for p in files: |
| article, highlights = _get_art_abs(p, self.config.version) |
| if not article or not highlights: |
| continue |
| fname = os.path.basename(p) |
| yield fname, { |
| _ARTICLE: article, |
| _HIGHLIGHTS: highlights, |
| "id": _get_hash_from_path(fname), |
| } |
|
|