Sebastian Gehrmann
commited on
Commit
·
00fd900
1
Parent(s):
1c10c7d
xsum.py
CHANGED
|
@@ -4,12 +4,20 @@ import os
|
|
| 4 |
import datasets
|
| 5 |
|
| 6 |
_CITATION = """\
|
| 7 |
-
@
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
}
|
| 14 |
"""
|
| 15 |
|
|
@@ -70,6 +78,7 @@ class Xsum(datasets.GeneratorBasedBuilder):
|
|
| 70 |
def _split_generators(self, dl_manager):
|
| 71 |
"""Returns SplitGenerators."""
|
| 72 |
dl_dir = dl_manager.download_and_extract(_URLs)
|
|
|
|
| 73 |
challenge_sets = [
|
| 74 |
("challenge_train_sample", "train_xsum_RandomSample500.json"),
|
| 75 |
("challenge_validation_sample", "validation_xsum_RandomSample500.json"),
|
|
|
|
| 4 |
import datasets
|
| 5 |
|
| 6 |
_CITATION = """\
|
| 7 |
+
@inproceedings{narayan-etal-2018-dont,
|
| 8 |
+
title = "Don{'}t Give Me the Details, Just the Summary! Topic-Aware Convolutional Neural Networks for Extreme Summarization",
|
| 9 |
+
author = "Narayan, Shashi and
|
| 10 |
+
Cohen, Shay B. and
|
| 11 |
+
Lapata, Mirella",
|
| 12 |
+
booktitle = "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing",
|
| 13 |
+
month = oct # "-" # nov,
|
| 14 |
+
year = "2018",
|
| 15 |
+
address = "Brussels, Belgium",
|
| 16 |
+
publisher = "Association for Computational Linguistics",
|
| 17 |
+
url = "https://aclanthology.org/D18-1206",
|
| 18 |
+
doi = "10.18653/v1/D18-1206",
|
| 19 |
+
pages = "1797--1807",
|
| 20 |
+
abstract = "We introduce {``}extreme summarization{''}, a new single-document summarization task which does not favor extractive strategies and calls for an abstractive modeling approach. The idea is to create a short, one-sentence news summary answering the question {``}What is the article about?{''}. We collect a real-world, large-scale dataset for this task by harvesting online articles from the British Broadcasting Corporation (BBC). We propose a novel abstractive model which is conditioned on the article{'}s topics and based entirely on convolutional neural networks. We demonstrate experimentally that this architecture captures long-range dependencies in a document and recognizes pertinent content, outperforming an oracle extractive system and state-of-the-art abstractive approaches when evaluated automatically and by humans.",
|
| 21 |
}
|
| 22 |
"""
|
| 23 |
|
|
|
|
| 78 |
def _split_generators(self, dl_manager):
|
| 79 |
"""Returns SplitGenerators."""
|
| 80 |
dl_dir = dl_manager.download_and_extract(_URLs)
|
| 81 |
+
print(dl_dir)
|
| 82 |
challenge_sets = [
|
| 83 |
("challenge_train_sample", "train_xsum_RandomSample500.json"),
|
| 84 |
("challenge_validation_sample", "validation_xsum_RandomSample500.json"),
|