mswedrowski commited on
Commit
6b81375
·
1 Parent(s): 0559f2a

Add script

Browse files
Files changed (2) hide show
  1. data/multiwiki_90k.zip +2 -2
  2. multiwiki_90k.py +141 -0
data/multiwiki_90k.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2305bc8fea0b7fe5395e2d3c5ec782489b77ce1382ef739eb729126ef67e6953
3
- size 196806760
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4efa835f9a45779eee3170609e5c6029894687458f00da3ec9245d87000fb085
3
+ size 195744272
multiwiki_90k.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import os
17
+ from typing import Generator, Tuple, Dict, List, TextIO
18
+ from pathlib import Path
19
+
20
+ import numpy as np
21
+ import datasets
22
+ from datasets import DownloadManager
23
+ from datasets.info import SupervisedKeysData
24
+
25
+ _DESCRIPTION = """Multi-Wiki90k: Multilingual benchmark dataset for paragraph
26
+ segmentation"""
27
+ _HOMEPAGE = "https://clarin-pl.eu"
28
+ _URLs = {
29
+ "1.0": "https://huggingface.co/datasets/clarin-pl/multiwiki_90k/resolve/main/data/multiwiki_90k.zip",
30
+ }
31
+ _CLASSES = ["O", "1"]
32
+
33
+ _PARAGRAPH_SEPARATOR = '=========='
34
+
35
+
36
+ class MultiWiki(datasets.GeneratorBasedBuilder):
37
+ VERSION = datasets.Version("1.0.0")
38
+
39
+ BUILDER_CONFIGS = [
40
+ datasets.BuilderConfig(
41
+ name="1.0",
42
+ version=VERSION,
43
+ description="Multilingual benchmark dataset for paragraph segmentation containing 90k of wikipedia "
44
+ "articles in 9 languages. The selection of languages was made from the most popular "
45
+ "representatives of the Romance, Slavic and Germanic language groups. These languages are: "
46
+ "German, English, Spanish, French, Italian, Dutch, Polish, Portuguese and Russian.",
47
+ ),
48
+ ]
49
+
50
+ DEFAULT_CONFIG_NAME = "1.0"
51
+
52
+ def _info(self) -> datasets.DatasetInfo:
53
+ return datasets.DatasetInfo(
54
+ description=_DESCRIPTION,
55
+ features=datasets.Features(
56
+ {
57
+ "label": datasets.Sequence(
58
+ datasets.features.ClassLabel(
59
+ names=_CLASSES, num_classes=len(_CLASSES)
60
+ )
61
+ ),
62
+ "text": datasets.Sequence(
63
+ datasets.features.Value("string")
64
+ )
65
+ }
66
+ ),
67
+ homepage=_HOMEPAGE,
68
+ supervised_keys=SupervisedKeysData(input='text', output='label')
69
+ )
70
+
71
+ @staticmethod
72
+ def _doc_to_vector(doc: TextIO) -> np.array:
73
+ """
74
+ Returns a vector of label from a document.
75
+ eg. [0,1,0,0,0,1] where each number stands for sentence with respective position
76
+ """
77
+ vec = []
78
+ lines = doc.readlines()
79
+ for i in range(len(lines) - 1):
80
+ if _PARAGRAPH_SEPARATOR in lines[i]:
81
+ pass
82
+ elif _PARAGRAPH_SEPARATOR in lines[i + 1]:
83
+ vec.append(1)
84
+ else:
85
+ vec.append(0)
86
+ return np.array(vec).reshape(len(vec), 1)
87
+
88
+ @staticmethod
89
+ def _doc_get_sentences(doc: TextIO) -> np.array:
90
+ """
91
+ Returns a list of sentences from a document.
92
+ """
93
+ sentences = []
94
+ lines = doc.readlines()
95
+ for line in lines:
96
+ if _PARAGRAPH_SEPARATOR not in line:
97
+ sentences.append(line)
98
+ return np.array(sentences)
99
+
100
+ def _split_generators(
101
+ self, dl_manager: DownloadManager
102
+ ) -> List[datasets.SplitGenerator]:
103
+ my_urls = _URLs[self.config.name]
104
+ data_dir = dl_manager.download_and_extract(my_urls)
105
+
106
+ return [
107
+ datasets.SplitGenerator(
108
+ name=datasets.Split.TRAIN,
109
+ gen_kwargs={
110
+ "dirpath": os.path.join(data_dir),
111
+ "split": "train",
112
+ },
113
+ ),
114
+ datasets.SplitGenerator(
115
+ name=datasets.Split.TEST,
116
+ gen_kwargs={
117
+ "dirpath": os.path.join(data_dir),
118
+ "split": "test",
119
+ },
120
+ ),
121
+ datasets.SplitGenerator(
122
+ name=datasets.Split.VALIDATION,
123
+ gen_kwargs={
124
+ "dirpath": os.path.join(data_dir),
125
+ "split": "valid",
126
+ },
127
+ ),
128
+ ]
129
+
130
+ def _generate_examples(
131
+ self,
132
+ dirpath: str,
133
+ split: str,
134
+ ) -> Generator[Tuple[int, Dict[str, str]], None, None]:
135
+
136
+ for file_path in Path(f'{dirpath}/{split}').glob('*'):
137
+ with open(file_path) as f:
138
+ yield file_path.name, {
139
+ "label": self._doc_to_vector(f),
140
+ "text": self._doc_get_sentences(f),
141
+ }