Datasets:
Tasks:
Summarization
Modalities:
Text
Formats:
parquet
Sub-tasks:
news-articles-summarization
Languages:
English
Size:
100K - 1M
License:
Commit
·
dee4e7e
1
Parent(s):
0dbbea5
Support streaming cnn_dailymail dataset (#4188)
Browse files* Support streaming cnn_dailymail dataset
* Refactor URLS
* Fix dataset card
Commit from https://github.com/huggingface/datasets/commit/966d3bcd2eb01ede907062599db688feacf03762
- README.md +1 -1
- cnn_dailymail.py +40 -72
README.md
CHANGED
|
@@ -16,7 +16,7 @@ source_datasets:
|
|
| 16 |
task_categories:
|
| 17 |
- summarization
|
| 18 |
task_ids:
|
| 19 |
-
-
|
| 20 |
paperswithcode_id: cnn-daily-mail-1
|
| 21 |
pretty_name: CNN / Daily Mail
|
| 22 |
---
|
|
|
|
| 16 |
task_categories:
|
| 17 |
- summarization
|
| 18 |
task_ids:
|
| 19 |
+
- news-articles-summarization
|
| 20 |
paperswithcode_id: cnn-daily-mail-1
|
| 21 |
pretty_name: CNN / Daily Mail
|
| 22 |
---
|
cnn_dailymail.py
CHANGED
|
@@ -25,6 +25,8 @@ import datasets
|
|
| 25 |
logger = datasets.logging.get_logger(__name__)
|
| 26 |
|
| 27 |
|
|
|
|
|
|
|
| 28 |
_DESCRIPTION = """\
|
| 29 |
CNN/DailyMail non-anonymized summarization dataset.
|
| 30 |
|
|
@@ -63,13 +65,11 @@ _CITATION = """\
|
|
| 63 |
"""
|
| 64 |
|
| 65 |
_DL_URLS = {
|
| 66 |
-
# pylint: disable=line-too-long
|
| 67 |
"cnn_stories": "https://drive.google.com/uc?export=download&id=0BwmD_VLjROrfTHk4NFg2SndKcjQ",
|
| 68 |
"dm_stories": "https://drive.google.com/uc?export=download&id=0BwmD_VLjROrfM1BxdkxVaTY2bWs",
|
| 69 |
-
"
|
| 70 |
-
"
|
| 71 |
-
"
|
| 72 |
-
# pylint: enable=line-too-long
|
| 73 |
}
|
| 74 |
|
| 75 |
_HIGHLIGHTS = "highlights"
|
|
@@ -104,7 +104,7 @@ class CnnDailymailConfig(datasets.BuilderConfig):
|
|
| 104 |
|
| 105 |
def _get_url_hashes(path):
|
| 106 |
"""Get hashes of urls in file."""
|
| 107 |
-
urls =
|
| 108 |
|
| 109 |
def url_hash(u):
|
| 110 |
h = hashlib.sha1()
|
|
@@ -115,47 +115,12 @@ def _get_url_hashes(path):
|
|
| 115 |
h.update(u)
|
| 116 |
return h.hexdigest()
|
| 117 |
|
| 118 |
-
return {url_hash(u)
|
| 119 |
|
| 120 |
|
| 121 |
def _get_hash_from_path(p):
|
| 122 |
"""Extract hash from path."""
|
| 123 |
-
|
| 124 |
-
return basename[0 : basename.find(".story")]
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
def _find_files(dl_paths, publisher, url_dict):
|
| 128 |
-
"""Find files corresponding to urls."""
|
| 129 |
-
if publisher == "cnn":
|
| 130 |
-
top_dir = os.path.join(dl_paths["cnn_stories"], "cnn", "stories")
|
| 131 |
-
elif publisher == "dm":
|
| 132 |
-
top_dir = os.path.join(dl_paths["dm_stories"], "dailymail", "stories")
|
| 133 |
-
else:
|
| 134 |
-
logger.fatal("Unsupported publisher: %s", publisher)
|
| 135 |
-
files = sorted(os.listdir(top_dir))
|
| 136 |
-
|
| 137 |
-
ret_files = []
|
| 138 |
-
for p in files:
|
| 139 |
-
if _get_hash_from_path(p) in url_dict:
|
| 140 |
-
ret_files.append(os.path.join(top_dir, p))
|
| 141 |
-
return ret_files
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
def _subset_filenames(dl_paths, split):
|
| 145 |
-
"""Get filenames for a particular split."""
|
| 146 |
-
assert isinstance(dl_paths, dict), dl_paths
|
| 147 |
-
# Get filenames for a split.
|
| 148 |
-
if split == datasets.Split.TRAIN:
|
| 149 |
-
urls = _get_url_hashes(dl_paths["train_urls"])
|
| 150 |
-
elif split == datasets.Split.VALIDATION:
|
| 151 |
-
urls = _get_url_hashes(dl_paths["val_urls"])
|
| 152 |
-
elif split == datasets.Split.TEST:
|
| 153 |
-
urls = _get_url_hashes(dl_paths["test_urls"])
|
| 154 |
-
else:
|
| 155 |
-
logger.fatal("Unsupported split: %s", split)
|
| 156 |
-
cnn = _find_files(dl_paths, "cnn", urls)
|
| 157 |
-
dm = _find_files(dl_paths, "dm", urls)
|
| 158 |
-
return cnn + dm
|
| 159 |
|
| 160 |
|
| 161 |
DM_SINGLE_CLOSE_QUOTE = "\u2019" # unicode
|
|
@@ -164,14 +129,16 @@ DM_DOUBLE_CLOSE_QUOTE = "\u201d"
|
|
| 164 |
END_TOKENS = [".", "!", "?", "...", "'", "`", '"', DM_SINGLE_CLOSE_QUOTE, DM_DOUBLE_CLOSE_QUOTE, ")"]
|
| 165 |
|
| 166 |
|
| 167 |
-
def
|
| 168 |
-
|
| 169 |
-
|
| 170 |
-
for line in f:
|
| 171 |
-
lines.append(line.strip())
|
| 172 |
return lines
|
| 173 |
|
| 174 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 175 |
def _get_art_abs(story_file, tfds_version):
|
| 176 |
"""Get abstract (highlights) and article from a story file path."""
|
| 177 |
# Based on https://github.com/abisee/cnn-dailymail/blob/master/
|
|
@@ -231,7 +198,6 @@ class CnnDailymail(datasets.GeneratorBasedBuilder):
|
|
| 231 |
]
|
| 232 |
|
| 233 |
def _info(self):
|
| 234 |
-
# Should return a datasets.DatasetInfo object
|
| 235 |
return datasets.DatasetInfo(
|
| 236 |
description=_DESCRIPTION,
|
| 237 |
features=datasets.Features(
|
|
@@ -242,7 +208,7 @@ class CnnDailymail(datasets.GeneratorBasedBuilder):
|
|
| 242 |
}
|
| 243 |
),
|
| 244 |
supervised_keys=None,
|
| 245 |
-
homepage=
|
| 246 |
citation=_CITATION,
|
| 247 |
)
|
| 248 |
|
|
@@ -251,29 +217,31 @@ class CnnDailymail(datasets.GeneratorBasedBuilder):
|
|
| 251 |
yield " ".join([ex[_ARTICLE], ex[_HIGHLIGHTS]])
|
| 252 |
|
| 253 |
def _split_generators(self, dl_manager):
|
| 254 |
-
dl_paths = dl_manager.
|
| 255 |
-
train_files = _subset_filenames(dl_paths, datasets.Split.TRAIN)
|
| 256 |
-
# Generate shared vocabulary
|
| 257 |
-
|
| 258 |
return [
|
| 259 |
-
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": train_files}),
|
| 260 |
-
datasets.SplitGenerator(
|
| 261 |
-
name=datasets.Split.VALIDATION,
|
| 262 |
-
gen_kwargs={"files": _subset_filenames(dl_paths, datasets.Split.VALIDATION)},
|
| 263 |
-
),
|
| 264 |
datasets.SplitGenerator(
|
| 265 |
-
name=
|
| 266 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 267 |
]
|
| 268 |
|
| 269 |
-
def _generate_examples(self,
|
| 270 |
-
|
| 271 |
-
|
| 272 |
-
|
| 273 |
-
|
| 274 |
-
|
| 275 |
-
|
| 276 |
-
|
| 277 |
-
|
| 278 |
-
|
| 279 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 25 |
logger = datasets.logging.get_logger(__name__)
|
| 26 |
|
| 27 |
|
| 28 |
+
_HOMEPAGE = "https://github.com/abisee/cnn-dailymail"
|
| 29 |
+
|
| 30 |
_DESCRIPTION = """\
|
| 31 |
CNN/DailyMail non-anonymized summarization dataset.
|
| 32 |
|
|
|
|
| 65 |
"""
|
| 66 |
|
| 67 |
_DL_URLS = {
|
|
|
|
| 68 |
"cnn_stories": "https://drive.google.com/uc?export=download&id=0BwmD_VLjROrfTHk4NFg2SndKcjQ",
|
| 69 |
"dm_stories": "https://drive.google.com/uc?export=download&id=0BwmD_VLjROrfM1BxdkxVaTY2bWs",
|
| 70 |
+
"train": "https://raw.githubusercontent.com/abisee/cnn-dailymail/master/url_lists/all_train.txt",
|
| 71 |
+
"validation": "https://raw.githubusercontent.com/abisee/cnn-dailymail/master/url_lists/all_val.txt",
|
| 72 |
+
"test": "https://raw.githubusercontent.com/abisee/cnn-dailymail/master/url_lists/all_test.txt",
|
|
|
|
| 73 |
}
|
| 74 |
|
| 75 |
_HIGHLIGHTS = "highlights"
|
|
|
|
| 104 |
|
| 105 |
def _get_url_hashes(path):
|
| 106 |
"""Get hashes of urls in file."""
|
| 107 |
+
urls = _read_text_file_path(path)
|
| 108 |
|
| 109 |
def url_hash(u):
|
| 110 |
h = hashlib.sha1()
|
|
|
|
| 115 |
h.update(u)
|
| 116 |
return h.hexdigest()
|
| 117 |
|
| 118 |
+
return {url_hash(u) for u in urls}
|
| 119 |
|
| 120 |
|
| 121 |
def _get_hash_from_path(p):
|
| 122 |
"""Extract hash from path."""
|
| 123 |
+
return os.path.splitext(os.path.basename(p))[0]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 124 |
|
| 125 |
|
| 126 |
DM_SINGLE_CLOSE_QUOTE = "\u2019" # unicode
|
|
|
|
| 129 |
END_TOKENS = [".", "!", "?", "...", "'", "`", '"', DM_SINGLE_CLOSE_QUOTE, DM_DOUBLE_CLOSE_QUOTE, ")"]
|
| 130 |
|
| 131 |
|
| 132 |
+
def _read_text_file_path(path):
|
| 133 |
+
with open(path, "r", encoding="utf-8") as f:
|
| 134 |
+
lines = [line.strip() for line in f]
|
|
|
|
|
|
|
| 135 |
return lines
|
| 136 |
|
| 137 |
|
| 138 |
+
def _read_text_file(file):
|
| 139 |
+
return [line.decode("utf-8").strip() for line in file]
|
| 140 |
+
|
| 141 |
+
|
| 142 |
def _get_art_abs(story_file, tfds_version):
|
| 143 |
"""Get abstract (highlights) and article from a story file path."""
|
| 144 |
# Based on https://github.com/abisee/cnn-dailymail/blob/master/
|
|
|
|
| 198 |
]
|
| 199 |
|
| 200 |
def _info(self):
|
|
|
|
| 201 |
return datasets.DatasetInfo(
|
| 202 |
description=_DESCRIPTION,
|
| 203 |
features=datasets.Features(
|
|
|
|
| 208 |
}
|
| 209 |
),
|
| 210 |
supervised_keys=None,
|
| 211 |
+
homepage=_HOMEPAGE,
|
| 212 |
citation=_CITATION,
|
| 213 |
)
|
| 214 |
|
|
|
|
| 217 |
yield " ".join([ex[_ARTICLE], ex[_HIGHLIGHTS]])
|
| 218 |
|
| 219 |
def _split_generators(self, dl_manager):
|
| 220 |
+
dl_paths = dl_manager.download(_DL_URLS)
|
|
|
|
|
|
|
|
|
|
| 221 |
return [
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 222 |
datasets.SplitGenerator(
|
| 223 |
+
name=split,
|
| 224 |
+
gen_kwargs={
|
| 225 |
+
"urls_file": dl_paths[split],
|
| 226 |
+
"cnn_stories_archive": dl_manager.iter_archive(dl_paths["cnn_stories"]),
|
| 227 |
+
"dm_stories_archive": dl_manager.iter_archive(dl_paths["dm_stories"]),
|
| 228 |
+
},
|
| 229 |
+
)
|
| 230 |
+
for split in [datasets.Split.TRAIN, datasets.Split.VALIDATION, datasets.Split.TEST]
|
| 231 |
]
|
| 232 |
|
| 233 |
+
def _generate_examples(self, urls_file, cnn_stories_archive, dm_stories_archive):
|
| 234 |
+
urls = _get_url_hashes(urls_file)
|
| 235 |
+
idx = 0
|
| 236 |
+
for path, file in cnn_stories_archive:
|
| 237 |
+
hash_from_path = _get_hash_from_path(path)
|
| 238 |
+
if hash_from_path in urls:
|
| 239 |
+
article, highlights = _get_art_abs(file, self.config.version)
|
| 240 |
+
if not article or not highlights:
|
| 241 |
+
continue
|
| 242 |
+
yield idx, {
|
| 243 |
+
_ARTICLE: article,
|
| 244 |
+
_HIGHLIGHTS: highlights,
|
| 245 |
+
"id": hash_from_path,
|
| 246 |
+
}
|
| 247 |
+
idx += 1
|