vuiseng9 commited on
Commit
d882056
·
1 Parent(s): 6a2beed

Baseline clone

Browse files
Files changed (4) hide show
  1. README.md +31 -0
  2. dataset_infos.json +1 -0
  3. openwebtext-10k.py +89 -0
  4. process.txt +68 -0
README.md ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 10K slice of OpenWebText - An open-source replication of the WebText dataset from OpenAI.
2
+
3
+ This is a small subset representing the first 10K records from the original dataset - created for testing.
4
+
5
+ The full 8M-record dataset is [here](https://huggingface.co/datasets/openwebtext).
6
+
7
+ ```
8
+ $ python -c "from datasets import load_dataset; ds=load_dataset('stas/openwebtext-10k'); print(ds)"
9
+ DatasetDict({
10
+ train: Dataset({
11
+ features: ['text'],
12
+ num_rows: 10000
13
+ })
14
+ })
15
+ ```
16
+
17
+ * Records: 10,000
18
+ * compressed size: ~15MB
19
+ * uncompressed size: 50MB
20
+
21
+ To convert to jsonlines:
22
+
23
+ ```
24
+ from datasets import load_dataset
25
+ dataset_name = "stas/openwebtext-10k"
26
+ name = dataset_name.split('/')[-1]
27
+ ds = load_dataset(dataset_name, split='train')
28
+ ds.to_json(f"{name}.jsonl", orient="records", lines=True)
29
+ ```
30
+
31
+ To see how this subset was created, here is the [instructions file](https://huggingface.co/datasets/stas/openwebtext-10k/blob/main/process.txt).
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"plain_text": {"description": "An open-source replication of the WebText dataset from OpenAI.\n\nThis is a small subset representing the first 10K records from the original dataset - created for testing.\n\nThe full 8M-record dataset is at https://huggingface.co/datasets/openwebtext\n", "citation": "@misc{Gokaslan2019OpenWeb,\n title={OpenWebText Corpus},\n author={Aaron Gokaslan*, Vanya Cohen*, Ellie Pavlick, Stefanie Tellex},\n howpublished{\\url{http://Skylion007.github.io/OpenWebTextCorpus}},\n year={2019}\n}\n", "homepage": "https://skylion007.github.io/OpenWebTextCorpus/", "license": "", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "openwebtext10k", "config_name": "plain_text", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 49670861, "num_examples": 10000, "dataset_name": "openwebtext10k"}}, "download_checksums": {"https://cdn-datasets.huggingface.co/nlp/datasets/openwebtext/openwebtext-10k.tar.xz": {"num_bytes": 14723792, "checksum": "1dd150ffa3361ab32fa9f129d1b5ce20ac48728be16be436558f844d1761c572"}}, "download_size": 14723792, "post_processing_size": null, "dataset_size": 49670861, "size_in_bytes": 64394653}}
openwebtext-10k.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """The Open WebText Corpus"""
16
+
17
+
18
+ import os
19
+ import re
20
+ from itertools import chain
21
+
22
+ import datasets
23
+
24
+
25
+ _CITATION = """\
26
+ @misc{Gokaslan2019OpenWeb,
27
+ title={OpenWebText Corpus},
28
+ author={Aaron Gokaslan*, Vanya Cohen*, Ellie Pavlick, Stefanie Tellex},
29
+ howpublished{\\url{http://Skylion007.github.io/OpenWebTextCorpus}},
30
+ year={2019}
31
+ }
32
+ """
33
+
34
+ _DESCRIPTION = """\
35
+ An open-source replication of the WebText dataset from OpenAI.
36
+
37
+ This is a small subset representing the first 10K records from the original dataset - created for testing.
38
+
39
+ The full 8M-record dataset is at https://huggingface.co/datasets/openwebtext
40
+ """
41
+
42
+ _URL = "https://cdn-datasets.huggingface.co/nlp/datasets/openwebtext/openwebtext-10k.tar.xz"
43
+
44
+ class Openwebtext10k(datasets.GeneratorBasedBuilder):
45
+ """The Open WebText dataset."""
46
+
47
+ BUILDER_CONFIGS = [
48
+ datasets.BuilderConfig(
49
+ name="plain_text",
50
+ description="Plain text",
51
+ version=datasets.Version("1.0.0"),
52
+ )
53
+ ]
54
+
55
+ def _info(self):
56
+ return datasets.DatasetInfo(
57
+ description=_DESCRIPTION,
58
+ features=datasets.Features({"text": datasets.Value("string")}),
59
+ homepage="https://skylion007.github.io/OpenWebTextCorpus/",
60
+ citation=_CITATION,
61
+ )
62
+
63
+ def _split_generators(self, dl_manager):
64
+ dl_dir = dl_manager.download_and_extract(_URL)
65
+ owt_dir = os.path.join(dl_dir, "openwebtext-10k")
66
+ subset_xzs = [
67
+ os.path.join(owt_dir, file_name)
68
+ for file_name in sorted(os.listdir(owt_dir))
69
+ if file_name.endswith("xz") # filter out ...xz.lock
70
+ ]
71
+ ex_dirs = dl_manager.extract(subset_xzs, num_proc=round(os.cpu_count() * 0.75))
72
+ nested_txt_files = [
73
+ [
74
+ os.path.join(ex_dir, txt_file_name)
75
+ for txt_file_name in sorted(os.listdir(ex_dir))
76
+ if txt_file_name.endswith("txt")
77
+ ]
78
+ for ex_dir in ex_dirs
79
+ ]
80
+ txt_files = chain(*nested_txt_files)
81
+ return [
82
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"txt_files": txt_files}),
83
+ ]
84
+
85
+ def _generate_examples(self, txt_files):
86
+ """Yields examples."""
87
+ for idx, filepath in enumerate(txt_files):
88
+ with open(filepath, encoding="utf-8") as f:
89
+ yield idx, {"text": re.sub("\n\n\n+", "\n\n", f.read()).strip()}
process.txt ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------------------------
2
+ # Original process as following (until the end)
3
+
4
+
5
+ # this is a small derivative from 8M-big openwebtext dataset for testing
6
+
7
+ # how this build script and dataset_infos.json were generated
8
+
9
+ #
10
+
11
+ mkdir openwebtext-10k
12
+ cd openwebtext-10k
13
+
14
+ # data
15
+ wget https://zenodo.org/record/3834942/files/openwebtext.tar.xz
16
+ tar xf openwebtext.tar.xz
17
+ cd openwebtext
18
+ rename.pl 's|-|-00|; s|-00(\d\d\d)|-$1|; s|-00(\d\d)|-0$1|;' *xz
19
+
20
+ # now open the first 30 archives
21
+ mkdir subset
22
+ cp urlsf_subset00-0[0-2]*_data.xz subset
23
+ cd subset
24
+ find . -name "*xz" -exec tar xf {} \;
25
+ mkdir 10k
26
+ find . -name "*txt" | sort | head -10000 | xargs mv -t 10k
27
+ tar cfJ 10k.xz -C 10k .
28
+ mkdir openwebtext-10k
29
+ mv 10k.xz openwebtext-10k
30
+ tar cfJ openwebtext-10k.tar.xz openwebtext-10k
31
+ # the openwebtext subdir gets created on the fly
32
+ aws s3 cp openwebtext-10k.tar.xz s3://datasets.huggingface.co/nlp/datasets/openwebtext/
33
+
34
+ # script
35
+ wget https://raw.githubusercontent.com/huggingface/datasets/master/datasets/openwebtext/openwebtext.py
36
+ mv openwebtext.py openwebtext-10k.py
37
+ perl -pi -e 's|openwebtext|openwebtext-10k|g' openwebtext-10k.py
38
+ perl -pi -e 's|https://zenodo.org/record/3834942/files/|https://cdn-datasets.huggingface.co/nlp/datasets/openwebtext/|g' openwebtext-10k.py
39
+ perl -pi -e 's|Openwebtext|Openwebtext10k|g' openwebtext-10k.py
40
+
41
+
42
+
43
+ # manually check that the script is correct - edit the descriptions
44
+
45
+ # create a new dataset entry on the hub
46
+ https://huggingface.co/new-dataset
47
+
48
+ # once created clone it
49
+ git clone https://huggingface.co/datasets/stas/openwebtext-10k
50
+ cp openwebtext-10k.py process.txt openwebtext-10k
51
+ cd openwebtext-10k
52
+
53
+ git add openwebtext-10k.py process.txt
54
+ git commit -m "build script" openwebtext-10k.py process.txt
55
+ git push
56
+
57
+ # test and generate config file
58
+ cd ..
59
+ datasets-cli test ./openwebtext-10k --save_infos --all_configs
60
+
61
+ # add and push the generated config
62
+ cd openwebtext-10k
63
+ git add dataset_infos.json
64
+ git commit -m "add dataset_infos.json" dataset_infos.json
65
+ git push
66
+
67
+ # test that the dataset is working
68
+ python -c "from datasets import load_dataset; ds=load_dataset('stas/openwebtext-10k'); print(ds)"