pszemraj commited on
Commit
cf3f7be
·
verified ·
0 Parent(s):

Super-squash branch 'main' using huggingface_hub

Browse files
.gitattributes ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.lz4 filter=lfs diff=lfs merge=lfs -text
12
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
13
+ *.model filter=lfs diff=lfs merge=lfs -text
14
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
15
+ *.npy filter=lfs diff=lfs merge=lfs -text
16
+ *.npz filter=lfs diff=lfs merge=lfs -text
17
+ *.onnx filter=lfs diff=lfs merge=lfs -text
18
+ *.ot filter=lfs diff=lfs merge=lfs -text
19
+ *.parquet filter=lfs diff=lfs merge=lfs -text
20
+ *.pb filter=lfs diff=lfs merge=lfs -text
21
+ *.pickle filter=lfs diff=lfs merge=lfs -text
22
+ *.pkl filter=lfs diff=lfs merge=lfs -text
23
+ *.pt filter=lfs diff=lfs merge=lfs -text
24
+ *.pth filter=lfs diff=lfs merge=lfs -text
25
+ *.rar filter=lfs diff=lfs merge=lfs -text
26
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
27
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
29
+ *.tar filter=lfs diff=lfs merge=lfs -text
30
+ *.tflite filter=lfs diff=lfs merge=lfs -text
31
+ *.tgz filter=lfs diff=lfs merge=lfs -text
32
+ *.wasm filter=lfs diff=lfs merge=lfs -text
33
+ *.xz filter=lfs diff=lfs merge=lfs -text
34
+ *.zip filter=lfs diff=lfs merge=lfs -text
35
+ *.zst filter=lfs diff=lfs merge=lfs -text
36
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
37
+ # Audio files - uncompressed
38
+ *.pcm filter=lfs diff=lfs merge=lfs -text
39
+ *.sam filter=lfs diff=lfs merge=lfs -text
40
+ *.raw filter=lfs diff=lfs merge=lfs -text
41
+ # Audio files - compressed
42
+ *.aac filter=lfs diff=lfs merge=lfs -text
43
+ *.flac filter=lfs diff=lfs merge=lfs -text
44
+ *.mp3 filter=lfs diff=lfs merge=lfs -text
45
+ *.ogg filter=lfs diff=lfs merge=lfs -text
46
+ *.wav filter=lfs diff=lfs merge=lfs -text
47
+ # Image files - uncompressed
48
+ *.bmp filter=lfs diff=lfs merge=lfs -text
49
+ *.gif filter=lfs diff=lfs merge=lfs -text
50
+ *.png filter=lfs diff=lfs merge=lfs -text
51
+ *.tiff filter=lfs diff=lfs merge=lfs -text
52
+ # Image files - compressed
53
+ *.jpg filter=lfs diff=lfs merge=lfs -text
54
+ *.jpeg filter=lfs diff=lfs merge=lfs -text
55
+ *.webp filter=lfs diff=lfs merge=lfs -text
56
+ # Video files - compressed
57
+ *.mp4 filter=lfs diff=lfs merge=lfs -text
58
+ *.webm filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ license: mit
5
+ size_categories:
6
+ - 10K<n<100K
7
+ task_categories:
8
+ - text2text-generation
9
+ dataset_info:
10
+ - config_name: default
11
+ features:
12
+ - name: pageid
13
+ dtype: int64
14
+ - name: title
15
+ dtype: string
16
+ - name: revid
17
+ dtype: int64
18
+ - name: description
19
+ dtype: string
20
+ - name: categories
21
+ sequence: string
22
+ - name: inputs
23
+ dtype: string
24
+ - name: targets
25
+ dtype: string
26
+ splits:
27
+ - name: train
28
+ num_bytes: 829905155
29
+ num_examples: 44754
30
+ download_size: 489718761
31
+ dataset_size: 829905155
32
+ - config_name: instruct
33
+ features:
34
+ - name: inputs
35
+ dtype: string
36
+ - name: targets
37
+ dtype: string
38
+ - name: _task_name
39
+ dtype: string
40
+ splits:
41
+ - name: train
42
+ num_bytes: 815113125
43
+ num_examples: 44754
44
+ download_size: 480497634
45
+ dataset_size: 815113125
46
+ configs:
47
+ - config_name: default
48
+ data_files:
49
+ - split: train
50
+ path: data/train-*
51
+ - config_name: instruct
52
+ data_files:
53
+ - split: train
54
+ path: instruct/train-*
55
+ source_datasets: euirim/goodwiki
56
+ ---
57
+
58
+ # goodwiki-text2text-completion
59
+
60
+ Wikipedia articles split on paragraphs and/or markdown headers pseudo-randomly.
61
+
62
+ - inputs/outputs are in columns `inputs`/`targets`
63
+ - see the `instruct` config with prompts prepended to the text in `inputs`
64
+ - Source data: `euirim/goodwiki`
65
+
66
+
67
+ ```yml
68
+ dataset_info:
69
+ features:
70
+ - name: pageid
71
+ dtype: int64
72
+ - name: title
73
+ dtype: string
74
+ - name: revid
75
+ dtype: int64
76
+ - name: description
77
+ dtype: string
78
+ - name: categories
79
+ sequence: string
80
+ - name: inputs
81
+ dtype: string
82
+ - name: targets
83
+ dtype: string
84
+ ```
data/train-00000-of-00002.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:173185a1ff4699592aa4bba0d43dd45c3f3ad36f363e0a9f376470665555533b
3
+ size 222853522
data/train-00001-of-00002.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:27f42926608de3435bb1087cba67e6dd231f4850e42e05a17ac9b420f0d469e8
3
+ size 266865239
dataset-preprocessor.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datasets import load_dataset
2
+ import re
3
+ import random
4
+
5
+ def split_into_paragraphs(text):
6
+ # Split by markdown headers or double newlines
7
+ paragraphs = re.split(r'\n\n|(?=^#)', text, flags=re.MULTILINE)
8
+ return [p.strip() for p in paragraphs if p.strip()]
9
+
10
+ def create_input_output_pairs(example):
11
+ paragraphs = example['paragraphs']
12
+ n_paragraphs = len(paragraphs)
13
+
14
+ # Randomly select about half of the paragraphs for input
15
+ n_input = max(1, random.randint(n_paragraphs // 2 - 1, n_paragraphs // 2 + 1))
16
+
17
+ input_paragraphs = paragraphs[:n_input]
18
+ output_paragraphs = paragraphs[n_input:]
19
+
20
+ return {
21
+ 'inputs': ' '.join(input_paragraphs),
22
+ 'targets': ' '.join(output_paragraphs)
23
+ }
24
+
25
+ def preprocess_dataset(dataset_name, text_column='text'):
26
+ # Load the dataset
27
+ dataset = load_dataset(dataset_name)
28
+
29
+ # Split text into paragraphs
30
+ dataset = dataset.map(
31
+ lambda example: {'paragraphs': split_into_paragraphs(example[text_column])},
32
+ remove_columns=[text_column]
33
+ )
34
+
35
+ # Create input-output pairs
36
+ preprocessed_dataset = dataset.map(
37
+ create_input_output_pairs,
38
+ remove_columns=['paragraphs']
39
+ )
40
+
41
+ return preprocessed_dataset
42
+
43
+ # Usage example
44
+ if __name__ == "__main__":
45
+ # Replace 'your_dataset' with the actual dataset name
46
+ dataset_name = 'your_dataset'
47
+
48
+ preprocessed_dataset = preprocess_dataset(dataset_name)
49
+
50
+ # Print some examples
51
+ print(preprocessed_dataset['train'][:5])
52
+
53
+ # Save the preprocessed dataset
54
+ preprocessed_dataset.save_to_disk("preprocessed_dataset")
instruct/train-00000-of-00002.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bce615933e87c1273ebea366c359d95aeb9d65f440bf55309ff3d1ff573e939a
3
+ size 218343792
instruct/train-00001-of-00002.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:023decf77e191850ba543bda25ac0e23b0a1fe51ccb16611bb09dc77ee6b419a
3
+ size 262153842
text2text-completion-task.ipynb ADDED
The diff for this file is too large to render. See raw diff