add files
Browse files- README.md +28 -1
- data/train/data-00000-of-00008.arrow +3 -0
- data/train/data-00001-of-00008.arrow +3 -0
- data/train/data-00002-of-00008.arrow +3 -0
- data/train/data-00003-of-00008.arrow +3 -0
- data/train/data-00004-of-00008.arrow +3 -0
- data/train/data-00005-of-00008.arrow +3 -0
- data/train/data-00006-of-00008.arrow +3 -0
- data/train/data-00007-of-00008.arrow +3 -0
- guteberg_download.py +259 -0
- gutenberg_metadata.csv +0 -0
- make_ds_gutenberg.py +30 -0
README.md
CHANGED
|
@@ -1,3 +1,30 @@
|
|
| 1 |
---
|
| 2 |
-
license:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
---
|
| 2 |
+
license: cc-by-nc-sa-4.0
|
| 3 |
+
task_categories:
|
| 4 |
+
- image-text-to-text
|
| 5 |
+
language:
|
| 6 |
+
- en
|
| 7 |
+
size_categories:
|
| 8 |
+
- 1K<n<10K
|
| 9 |
+
configs:
|
| 10 |
+
- config_name: default
|
| 11 |
+
data_files:
|
| 12 |
+
- split: train
|
| 13 |
+
path: data/train/*
|
| 14 |
---
|
| 15 |
+
|
| 16 |
+
# Gutenberg 8K
|
| 17 |
+
|
| 18 |
+
This is a subset of the files available at [15000 Gutenberg Books](https://www.kaggle.com/datasets/mateibejan/15000-gutenberg-books)
|
| 19 |
+
|
| 20 |
+
## About Dataset
|
| 21 |
+
|
| 22 |
+
The Gutenberg dataset represents a corpus of over 15,000 book texts, their authors and titles. The data has been scraped from the Project Gutenberg website using a custom script to parse all bookshelves. The text download links for the books have been stored in the gutenberg_metadata.csv file, alongside their respective book's title, author and bookshelf (category). The download links have been scraped with respect to the category the Gutenberg website associated them with. Some books from the website are not part of the dataset, as they have not yet been categorized.
|
| 23 |
+
|
| 24 |
+
The text data itself can be downloaded using the gutenberg_download.py script, which will parse the metadata file, download the text data for each book and save the results as a csv file. The final csv file containing the book texts, the authors, the titles and the categories will have a size of around 5 GB.
|
| 25 |
+
|
| 26 |
+
Please note that some books are audiobooks. We have decided to keep these in the dataset in case someone wishes to work with the audio data instead of text.
|
| 27 |
+
|
| 28 |
+
## Acknowledgement
|
| 29 |
+
|
| 30 |
+
All credits to the original [15000 Gutenberg Books](https://www.kaggle.com/datasets/mateibejan/15000-gutenberg-books) project.
|
data/train/data-00000-of-00008.arrow
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0e2892e8d6422683c77738a00ae16942da202efcebf8b60d6b1fe87a2759793d
|
| 3 |
+
size 394751544
|
data/train/data-00001-of-00008.arrow
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:adadc09fd6e0eec0970c3136ed6f640de5a1f654e81fa98f20baf580461fe126
|
| 3 |
+
size 411709888
|
data/train/data-00002-of-00008.arrow
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7bb64a3549634f236c7bfcc46c99162aae4a5f57f266727a8b4bffada9b45686
|
| 3 |
+
size 398712168
|
data/train/data-00003-of-00008.arrow
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:11fb6d616ca5f82b1cfbdce3424d3632c98fbcc45561936362b9bbae99b3ec0e
|
| 3 |
+
size 413012448
|
data/train/data-00004-of-00008.arrow
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7c4f357c8e4343b3a119e3757cfba3f69d6636c85c1327745e04a4780502a42e
|
| 3 |
+
size 369482624
|
data/train/data-00005-of-00008.arrow
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3cfe7fa230b344ecb11316c286f868fb4f02175f9b1da6fee72216adef47f19c
|
| 3 |
+
size 413897096
|
data/train/data-00006-of-00008.arrow
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6ab0f47605da7fb717cead981f181ca5b4af4dc1d5f8d2f9c829adcff3c7abce
|
| 3 |
+
size 391903120
|
data/train/data-00007-of-00008.arrow
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:84045656c7b8b3f6f0ff7b6a1a99dba20be934f3adf737958dc835384f919c17
|
| 3 |
+
size 402591120
|
guteberg_download.py
ADDED
|
@@ -0,0 +1,259 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
|
| 3 |
+
# os.system('apt install libdb5.3-dev')
|
| 4 |
+
# os.system('pip install gutenberg')
|
| 5 |
+
# os.system('pip install requests')
|
| 6 |
+
|
| 7 |
+
import pandas as pd
|
| 8 |
+
import requests
|
| 9 |
+
import numpy as np
|
| 10 |
+
from bs4 import BeautifulSoup
|
| 11 |
+
from urllib.request import urlopen
|
| 12 |
+
|
| 13 |
+
from tqdm import tqdm
|
| 14 |
+
import json
|
| 15 |
+
# from gutenberg.acquire import load_etext
|
| 16 |
+
# from gutenberg.cleanup import strip_headers
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
TEXT_START_MARKERS = frozenset((
|
| 20 |
+
"*END*THE SMALL PRINT",
|
| 21 |
+
"*** START OF THE PROJECT GUTENBERG",
|
| 22 |
+
"*** START OF THIS PROJECT GUTENBERG",
|
| 23 |
+
"This etext was prepared by",
|
| 24 |
+
"E-text prepared by",
|
| 25 |
+
"Produced by",
|
| 26 |
+
"Distributed Proofreading Team",
|
| 27 |
+
"Proofreading Team at http://www.pgdp.net",
|
| 28 |
+
"http://gallica.bnf.fr)",
|
| 29 |
+
" http://archive.org/details/",
|
| 30 |
+
"http://www.pgdp.net",
|
| 31 |
+
"by The Internet Archive)",
|
| 32 |
+
"by The Internet Archive/Canadian Libraries",
|
| 33 |
+
"by The Internet Archive/American Libraries",
|
| 34 |
+
"public domain material from the Internet Archive",
|
| 35 |
+
"Internet Archive)",
|
| 36 |
+
"Internet Archive/Canadian Libraries",
|
| 37 |
+
"Internet Archive/American Libraries",
|
| 38 |
+
"material from the Google Print project",
|
| 39 |
+
"*END THE SMALL PRINT",
|
| 40 |
+
"***START OF THE PROJECT GUTENBERG",
|
| 41 |
+
"This etext was produced by",
|
| 42 |
+
"*** START OF THE COPYRIGHTED",
|
| 43 |
+
"The Project Gutenberg",
|
| 44 |
+
"http://gutenberg.spiegel.de/ erreichbar.",
|
| 45 |
+
"Project Runeberg publishes",
|
| 46 |
+
"Beginning of this Project Gutenberg",
|
| 47 |
+
"Project Gutenberg Online Distributed",
|
| 48 |
+
"Gutenberg Online Distributed",
|
| 49 |
+
"the Project Gutenberg Online Distributed",
|
| 50 |
+
"Project Gutenberg TEI",
|
| 51 |
+
"This eBook was prepared by",
|
| 52 |
+
"http://gutenberg2000.de erreichbar.",
|
| 53 |
+
"This Etext was prepared by",
|
| 54 |
+
"This Project Gutenberg Etext was prepared by",
|
| 55 |
+
"Gutenberg Distributed Proofreaders",
|
| 56 |
+
"Project Gutenberg Distributed Proofreaders",
|
| 57 |
+
"the Project Gutenberg Online Distributed Proofreading Team",
|
| 58 |
+
"**The Project Gutenberg",
|
| 59 |
+
"*SMALL PRINT!",
|
| 60 |
+
"More information about this book is at the top of this file.",
|
| 61 |
+
"tells you about restrictions in how the file may be used.",
|
| 62 |
+
"l'authorization à les utilizer pour preparer ce texte.",
|
| 63 |
+
"of the etext through OCR.",
|
| 64 |
+
"*****These eBooks Were Prepared By Thousands of Volunteers!*****",
|
| 65 |
+
"We need your donations more than ever!",
|
| 66 |
+
" *** START OF THIS PROJECT GUTENBERG",
|
| 67 |
+
"**** SMALL PRINT!",
|
| 68 |
+
'["Small Print" V.',
|
| 69 |
+
' (http://www.ibiblio.org/gutenberg/',
|
| 70 |
+
'and the Project Gutenberg Online Distributed Proofreading Team',
|
| 71 |
+
'Mary Meehan, and the Project Gutenberg Online Distributed Proofreading',
|
| 72 |
+
' this Project Gutenberg edition.',
|
| 73 |
+
))
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
TEXT_END_MARKERS = frozenset((
|
| 77 |
+
"*** END OF THE PROJECT GUTENBERG",
|
| 78 |
+
"*** END OF THIS PROJECT GUTENBERG",
|
| 79 |
+
"***END OF THE PROJECT GUTENBERG",
|
| 80 |
+
"End of the Project Gutenberg",
|
| 81 |
+
"End of The Project Gutenberg",
|
| 82 |
+
"Ende dieses Project Gutenberg",
|
| 83 |
+
"by Project Gutenberg",
|
| 84 |
+
"End of Project Gutenberg",
|
| 85 |
+
"End of this Project Gutenberg",
|
| 86 |
+
"Ende dieses Projekt Gutenberg",
|
| 87 |
+
" ***END OF THE PROJECT GUTENBERG",
|
| 88 |
+
"*** END OF THE COPYRIGHTED",
|
| 89 |
+
"End of this is COPYRIGHTED",
|
| 90 |
+
"Ende dieses Etextes ",
|
| 91 |
+
"Ende dieses Project Gutenber",
|
| 92 |
+
"Ende diese Project Gutenberg",
|
| 93 |
+
"**This is a COPYRIGHTED Project Gutenberg Etext, Details Above**",
|
| 94 |
+
"Fin de Project Gutenberg",
|
| 95 |
+
"The Project Gutenberg Etext of ",
|
| 96 |
+
"Ce document fut presente en lecture",
|
| 97 |
+
"Ce document fut présenté en lecture",
|
| 98 |
+
"More information about this book is at the top of this file.",
|
| 99 |
+
"We need your donations more than ever!",
|
| 100 |
+
"END OF PROJECT GUTENBERG",
|
| 101 |
+
" End of the Project Gutenberg",
|
| 102 |
+
" *** END OF THIS PROJECT GUTENBERG",
|
| 103 |
+
))
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
LEGALESE_START_MARKERS = frozenset(("<<THIS ELECTRONIC VERSION OF",))
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
LEGALESE_END_MARKERS = frozenset(("SERVICE THAT CHARGES FOR DOWNLOAD",))
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
def strip_headers(text):
|
| 113 |
+
"""Remove lines that are part of the Project Gutenberg header or footer.
|
| 114 |
+
Note: this function is a port of the C++ utility by Johannes Krugel. The
|
| 115 |
+
original version of the code can be found at:
|
| 116 |
+
http://www14.in.tum.de/spp1307/src/strip_headers.cpp
|
| 117 |
+
|
| 118 |
+
Args:
|
| 119 |
+
text (unicode): The body of the text to clean up.
|
| 120 |
+
|
| 121 |
+
Returns:
|
| 122 |
+
unicode: The text with any non-text content removed.
|
| 123 |
+
|
| 124 |
+
"""
|
| 125 |
+
lines = text.splitlines()
|
| 126 |
+
sep = str(os.linesep)
|
| 127 |
+
|
| 128 |
+
out = []
|
| 129 |
+
i = 0
|
| 130 |
+
footer_found = False
|
| 131 |
+
ignore_section = False
|
| 132 |
+
|
| 133 |
+
for line in lines:
|
| 134 |
+
reset = False
|
| 135 |
+
|
| 136 |
+
if i <= 600:
|
| 137 |
+
# Check if the header ends here
|
| 138 |
+
if any(line.startswith(token) for token in TEXT_START_MARKERS):
|
| 139 |
+
reset = True
|
| 140 |
+
|
| 141 |
+
# If it's the end of the header, delete the output produced so far.
|
| 142 |
+
# May be done several times, if multiple lines occur indicating the
|
| 143 |
+
# end of the header
|
| 144 |
+
if reset:
|
| 145 |
+
out = []
|
| 146 |
+
continue
|
| 147 |
+
|
| 148 |
+
if i >= 100:
|
| 149 |
+
# Check if the footer begins here
|
| 150 |
+
if any(line.startswith(token) for token in TEXT_END_MARKERS):
|
| 151 |
+
footer_found = True
|
| 152 |
+
|
| 153 |
+
# If it's the beginning of the footer, stop output
|
| 154 |
+
if footer_found:
|
| 155 |
+
break
|
| 156 |
+
|
| 157 |
+
if any(line.startswith(token) for token in LEGALESE_START_MARKERS):
|
| 158 |
+
ignore_section = True
|
| 159 |
+
continue
|
| 160 |
+
elif any(line.startswith(token) for token in LEGALESE_END_MARKERS):
|
| 161 |
+
ignore_section = False
|
| 162 |
+
continue
|
| 163 |
+
|
| 164 |
+
if not ignore_section:
|
| 165 |
+
out.append(line.rstrip(sep))
|
| 166 |
+
i += 1
|
| 167 |
+
|
| 168 |
+
return sep.join(out)
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
# only removes funny tokens for English texts
|
| 172 |
+
def remove_funny_tokens(text):
|
| 173 |
+
tokens = text.split()
|
| 174 |
+
sample = ' '.join(' '.join(tokens).replace('xe2x80x9c', ' ').replace('xe2x80x9d', ' ')\
|
| 175 |
+
.replace('xe2x80x94', ' ').replace('xe2x80x99', "'")\
|
| 176 |
+
.replace('xe2x80x98', "'").split())
|
| 177 |
+
return sample
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
# clean newlines, carriage returns and tabs
|
| 181 |
+
def clean_text(text):
|
| 182 |
+
cleaned_listed_text = []
|
| 183 |
+
listed_text = list(text)
|
| 184 |
+
|
| 185 |
+
for iter in range(len(listed_text) - 1):
|
| 186 |
+
if (listed_text[iter] == '\\' and listed_text[iter + 1] == 'n') or \
|
| 187 |
+
(listed_text[iter] == 'n' and listed_text[iter - 1] == '\\'):
|
| 188 |
+
continue
|
| 189 |
+
elif listed_text[iter] == '\\' and listed_text[iter + 1] == 'r' or \
|
| 190 |
+
(listed_text[iter] == 'r' and listed_text[iter - 1] == '\\'):
|
| 191 |
+
continue
|
| 192 |
+
elif listed_text[iter] == '\\' and listed_text[iter + 1] == 't' or \
|
| 193 |
+
(listed_text[iter] == 't' and listed_text[iter - 1] == '\\'):
|
| 194 |
+
continue
|
| 195 |
+
elif listed_text[iter] == '\\':
|
| 196 |
+
continue
|
| 197 |
+
else:
|
| 198 |
+
cleaned_listed_text.append(listed_text[iter])
|
| 199 |
+
|
| 200 |
+
cleaned_text = ''.join(cleaned_listed_text)
|
| 201 |
+
cleaned_text = remove_funny_tokens(cleaned_text)
|
| 202 |
+
|
| 203 |
+
return ''.join(cleaned_text)
|
| 204 |
+
|
| 205 |
+
df_metadata = pd.read_csv('gutenberg_metadata.csv')
|
| 206 |
+
num_books = len(df_metadata)
|
| 207 |
+
os.makedirs("books", exist_ok=True)
|
| 208 |
+
|
| 209 |
+
ok_files = 0
|
| 210 |
+
for key, row in tqdm(df_metadata.iterrows(), desc="Downloading Books", total=num_books, ncols=80):
|
| 211 |
+
data = {}
|
| 212 |
+
data['Author'] = row['Author']
|
| 213 |
+
data['Title'] = row['Title']
|
| 214 |
+
data['Link'] = row['Link']
|
| 215 |
+
|
| 216 |
+
book_id = int(row['Link'].split('/')[-1])
|
| 217 |
+
data['ID'] = book_id
|
| 218 |
+
data['Bookshelf'] = row['Bookshelf']
|
| 219 |
+
|
| 220 |
+
save_fn = os.path.join("books", f"{book_id}.json")
|
| 221 |
+
if os.path.exists(save_fn):
|
| 222 |
+
continue
|
| 223 |
+
|
| 224 |
+
text = np.nan
|
| 225 |
+
try:
|
| 226 |
+
page = requests.get(row['Link'])
|
| 227 |
+
soup = BeautifulSoup(page.content, 'html.parser')
|
| 228 |
+
text_link = 'http://www.gutenberg.org' + soup.find_all("a", string="Plain Text UTF-8")[0]['href']
|
| 229 |
+
http_response_object = urlopen(text_link)
|
| 230 |
+
|
| 231 |
+
# Properly decode bytes to string with UTF-8 encoding
|
| 232 |
+
raw_bytes = http_response_object.read()
|
| 233 |
+
text = raw_bytes.decode('utf-8', errors='replace')
|
| 234 |
+
text = strip_headers(text)
|
| 235 |
+
text = ' '.join(' '.join(' '.join(text.split('\n')).split('\t')).split('\r'))
|
| 236 |
+
text = ' '.join(text.split())
|
| 237 |
+
text = clean_text(text)
|
| 238 |
+
except KeyboardInterrupt:
|
| 239 |
+
raise
|
| 240 |
+
except Exception:
|
| 241 |
+
print("Couldn't acquire text for " + row['Title'] + ' with ID ' + str(book_id) + '. Link: ' + row['Link'])
|
| 242 |
+
continue
|
| 243 |
+
|
| 244 |
+
try:
|
| 245 |
+
data['Text'] = ' '.join(text.split(' '))
|
| 246 |
+
except KeyboardInterrupt:
|
| 247 |
+
raise
|
| 248 |
+
except Exception:
|
| 249 |
+
print("Couldn't save data for " + row['Title'] + ' with ID ' + str(book_id) + '. Link: ' + row['Link'])
|
| 250 |
+
continue
|
| 251 |
+
|
| 252 |
+
with open(save_fn, "w", encoding="utf-8") as writer:
|
| 253 |
+
json.dump(data, writer, indent=2, ensure_ascii=False)
|
| 254 |
+
ok_files += 1
|
| 255 |
+
|
| 256 |
+
print(f"Downloaded files: {ok_files}/{num_books}")
|
| 257 |
+
# df_data = pd.DataFrame(data, columns = ['Title', 'Author', 'Link', 'ID', 'Bookshelf', 'Text'])
|
| 258 |
+
|
| 259 |
+
# df_data.to_csv('./gutenberg_data.csv', index=False)
|
gutenberg_metadata.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
make_ds_gutenberg.py
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from glob import glob
|
| 2 |
+
import json
|
| 3 |
+
import os
|
| 4 |
+
|
| 5 |
+
from datasets import Dataset
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def main():
|
| 9 |
+
workdir = "./gutenberg"
|
| 10 |
+
books = glob(os.path.join(workdir, "books", "*.json"))
|
| 11 |
+
|
| 12 |
+
def gen():
|
| 13 |
+
for book in books:
|
| 14 |
+
with open(book, "r", encoding="utf-8") as reader:
|
| 15 |
+
data = json.load(reader)
|
| 16 |
+
data["Bookshelf"] = str(data["Bookshelf"])
|
| 17 |
+
data["Author"] = str(data["Author"])
|
| 18 |
+
yield data
|
| 19 |
+
|
| 20 |
+
ds = Dataset.from_generator(gen)
|
| 21 |
+
column_names = list(ds.features.keys())
|
| 22 |
+
new_names = {k: k.lower() for k in column_names}
|
| 23 |
+
ds = ds.rename_columns(new_names)
|
| 24 |
+
|
| 25 |
+
ds.save_to_disk(os.path.join(workdir, "datasets", "data", "train"), max_shard_size="400MB")
|
| 26 |
+
return
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
if __name__ == "__main__":
|
| 30 |
+
main()
|