Update files from the datasets library (from 1.2.0)
Browse filesRelease notes: https://github.com/huggingface/datasets/releases/tag/1.2.0
- wiki_split.py +8 -9
wiki_split.py
CHANGED
|
@@ -26,10 +26,13 @@ One million English sentences, each split into two sentences that together prese
|
|
| 26 |
Google's WikiSplit dataset was constructed automatically from the publicly available Wikipedia revision history. Although
|
| 27 |
the dataset contains some inherent noise, it can serve as valuable training data for models that split or merge sentences.
|
| 28 |
"""
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
|
|
|
|
|
|
|
|
|
| 33 |
|
| 34 |
|
| 35 |
class WikiSplit(datasets.GeneratorBasedBuilder):
|
|
@@ -66,11 +69,7 @@ class WikiSplit(datasets.GeneratorBasedBuilder):
|
|
| 66 |
# TODO(wiki_split): Downloads the data and defines the splits
|
| 67 |
# dl_manager is a datasets.download.DownloadManager that can be used to
|
| 68 |
# download and extract URLs
|
| 69 |
-
urls_to_download =
|
| 70 |
-
"train": os.path.join(_URL, _TRAIN_FILE),
|
| 71 |
-
"test": os.path.join(_URL, _TEST_FILE),
|
| 72 |
-
"dev": os.path.join(_URL, _DEV_FILE),
|
| 73 |
-
}
|
| 74 |
dl_dir = dl_manager.download_and_extract(urls_to_download)
|
| 75 |
return [
|
| 76 |
datasets.SplitGenerator(
|
|
|
|
| 26 |
Google's WikiSplit dataset was constructed automatically from the publicly available Wikipedia revision history. Although
|
| 27 |
the dataset contains some inherent noise, it can serve as valuable training data for models that split or merge sentences.
|
| 28 |
"""
|
| 29 |
+
|
| 30 |
+
_URL = "https://github.com/google-research-datasets/wiki-split/raw/master/"
|
| 31 |
+
_URLS = {
|
| 32 |
+
"train": _URL + "train.tsv.zip",
|
| 33 |
+
"test": _URL + "test.tsv",
|
| 34 |
+
"dev": _URL + "validation.tsv",
|
| 35 |
+
}
|
| 36 |
|
| 37 |
|
| 38 |
class WikiSplit(datasets.GeneratorBasedBuilder):
|
|
|
|
| 69 |
# TODO(wiki_split): Downloads the data and defines the splits
|
| 70 |
# dl_manager is a datasets.download.DownloadManager that can be used to
|
| 71 |
# download and extract URLs
|
| 72 |
+
urls_to_download = _URLS
|
|
|
|
|
|
|
|
|
|
|
|
|
| 73 |
dl_dir = dl_manager.download_and_extract(urls_to_download)
|
| 74 |
return [
|
| 75 |
datasets.SplitGenerator(
|