Datasets:
Update files from the datasets library (from 1.16.0)
Browse filesRelease notes: https://github.com/huggingface/datasets/releases/tag/1.16.0
- README.md +19 -2
- bookcorpusopen.py +12 -12
README.md
CHANGED
|
@@ -1,10 +1,27 @@
|
|
| 1 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
languages:
|
| 3 |
- en
|
| 4 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
---
|
| 6 |
|
| 7 |
-
# Dataset Card for
|
| 8 |
|
| 9 |
## Table of Contents
|
| 10 |
- [Dataset Description](#dataset-description)
|
|
|
|
| 1 |
---
|
| 2 |
+
annotations_creators:
|
| 3 |
+
- no-annotation
|
| 4 |
+
language_creators:
|
| 5 |
+
- found
|
| 6 |
languages:
|
| 7 |
- en
|
| 8 |
+
licenses:
|
| 9 |
+
- unknown
|
| 10 |
+
multilinguality:
|
| 11 |
+
- monolingual
|
| 12 |
+
pretty_name: BookCorpusOpen
|
| 13 |
+
size_categories:
|
| 14 |
+
- 10K<n<100K
|
| 15 |
+
source_datasets:
|
| 16 |
+
- original
|
| 17 |
+
task_categories:
|
| 18 |
+
- sequence-modeling
|
| 19 |
+
task_ids:
|
| 20 |
+
- language-modeling
|
| 21 |
+
paperswithcode_id: bookcorpus
|
| 22 |
---
|
| 23 |
|
| 24 |
+
# Dataset Card for BookCorpusOpen
|
| 25 |
|
| 26 |
## Table of Contents
|
| 27 |
- [Dataset Description](#dataset-description)
|
bookcorpusopen.py
CHANGED
|
@@ -17,9 +17,8 @@
|
|
| 17 |
"""The BookCorpus dataset based on Shawn Presser's work https://github.com/soskek/bookcorpus/issues/27 """
|
| 18 |
|
| 19 |
|
| 20 |
-
import glob
|
| 21 |
import os
|
| 22 |
-
import
|
| 23 |
|
| 24 |
import datasets
|
| 25 |
|
|
@@ -65,6 +64,8 @@ class BookCorpusOpenConfig(datasets.BuilderConfig):
|
|
| 65 |
class BookCorpusOpen(datasets.GeneratorBasedBuilder):
|
| 66 |
"""BookCorpus dataset."""
|
| 67 |
|
|
|
|
|
|
|
| 68 |
BUILDER_CONFIGS = [
|
| 69 |
BookCorpusOpenConfig(
|
| 70 |
name="plain_text",
|
|
@@ -87,19 +88,18 @@ class BookCorpusOpen(datasets.GeneratorBasedBuilder):
|
|
| 87 |
)
|
| 88 |
|
| 89 |
def _split_generators(self, dl_manager):
|
| 90 |
-
|
| 91 |
|
| 92 |
return [
|
| 93 |
-
datasets.SplitGenerator(
|
|
|
|
|
|
|
| 94 |
]
|
| 95 |
|
| 96 |
-
def _generate_examples(self,
|
| 97 |
-
glob_target = os.path.join(directory, "**/*.epub.txt")
|
| 98 |
-
book_files = glob.glob(glob_target, recursive=True)
|
| 99 |
-
book_files = sorted(book_files)
|
| 100 |
_id = 0
|
| 101 |
-
for book_file_path in book_files:
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
yield _id, {"title":
|
| 105 |
_id += 1
|
|
|
|
| 17 |
"""The BookCorpus dataset based on Shawn Presser's work https://github.com/soskek/bookcorpus/issues/27 """
|
| 18 |
|
| 19 |
|
|
|
|
| 20 |
import os
|
| 21 |
+
from fnmatch import fnmatch
|
| 22 |
|
| 23 |
import datasets
|
| 24 |
|
|
|
|
| 64 |
class BookCorpusOpen(datasets.GeneratorBasedBuilder):
|
| 65 |
"""BookCorpus dataset."""
|
| 66 |
|
| 67 |
+
DEFAULT_WRITER_BATCH_SIZE = 256 # documents are full books and are quite heavy
|
| 68 |
+
|
| 69 |
BUILDER_CONFIGS = [
|
| 70 |
BookCorpusOpenConfig(
|
| 71 |
name="plain_text",
|
|
|
|
| 88 |
)
|
| 89 |
|
| 90 |
def _split_generators(self, dl_manager):
|
| 91 |
+
archive = dl_manager.download(_DOWNLOAD_URL)
|
| 92 |
|
| 93 |
return [
|
| 94 |
+
datasets.SplitGenerator(
|
| 95 |
+
name=datasets.Split.TRAIN, gen_kwargs={"book_files": dl_manager.iter_archive(archive)}
|
| 96 |
+
),
|
| 97 |
]
|
| 98 |
|
| 99 |
+
def _generate_examples(self, book_files):
|
|
|
|
|
|
|
|
|
|
| 100 |
_id = 0
|
| 101 |
+
for book_file_path, f in book_files:
|
| 102 |
+
name = os.path.basename(book_file_path)
|
| 103 |
+
if fnmatch(name, "*.epub.txt"):
|
| 104 |
+
yield _id, {"title": name, "text": f.read().decode("utf-8")},
|
| 105 |
_id += 1
|