moska commited on
Commit
f94bcdc
·
1 Parent(s): 94d90a1

feat: initial loading version

Browse files
.gitattributes CHANGED
@@ -58,3 +58,21 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
58
  # Video files - compressed
59
  *.mp4 filter=lfs diff=lfs merge=lfs -text
60
  *.webm filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58
  # Video files - compressed
59
  *.mp4 filter=lfs diff=lfs merge=lfs -text
60
  *.webm filter=lfs diff=lfs merge=lfs -text
61
+ data/glex_test.jsonl filter=lfs diff=lfs merge=lfs -text
62
+ data/wsd_et_test.jsonl filter=lfs diff=lfs merge=lfs -text
63
+ data/wsd_hr_test.jsonl filter=lfs diff=lfs merge=lfs -text
64
+ data/wsd_it_test.jsonl filter=lfs diff=lfs merge=lfs -text
65
+ data/wsd_test.jsonl filter=lfs diff=lfs merge=lfs -text
66
+ data/wsd_ca_test.jsonl filter=lfs diff=lfs merge=lfs -text
67
+ data/wsd_da_test.jsonl filter=lfs diff=lfs merge=lfs -text
68
+ data/wsd_ja_test.jsonl filter=lfs diff=lfs merge=lfs -text
69
+ data/wsd_sl_test.jsonl filter=lfs diff=lfs merge=lfs -text
70
+ data/wsd_gl_test.jsonl filter=lfs diff=lfs merge=lfs -text
71
+ data/wsd_hu_test.jsonl filter=lfs diff=lfs merge=lfs -text
72
+ data/wsd_nl_test.jsonl filter=lfs diff=lfs merge=lfs -text
73
+ data/wsd_bg_test.jsonl filter=lfs diff=lfs merge=lfs -text
74
+ data/wsd_en_test.jsonl filter=lfs diff=lfs merge=lfs -text
75
+ data/wsd_es_test.jsonl filter=lfs diff=lfs merge=lfs -text
76
+ data/wsd_eu_test.jsonl filter=lfs diff=lfs merge=lfs -text
77
+ data/wsd_de_test.jsonl filter=lfs diff=lfs merge=lfs -text
78
+ data/wsd_fr_test.jsonl filter=lfs diff=lfs merge=lfs -text
data/glex_test.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c827d5902f432c8529820679d72d14e14107b447c8c397b140792870fd6f00d9
3
+ size 11823262
data/wsd_bg_test.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f4cb2e188de2491bdd223c84d851427ae4f277f0d7314a531e536128196c31dd
3
+ size 3061292
data/wsd_ca_test.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ac9bb4d6be410760fe4f44616681e05bfbda6f749c4b4f1eb42462db89070225
3
+ size 531972
data/wsd_da_test.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:62471656e06089fc9c702a33f2ed4e8f20ffe65832adcf350769664c8655ed8d
3
+ size 1354910
data/wsd_de_test.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:77f80fe3b4970ced2de3be3945f30b93819f4e04b4724e6627c0b0e6c06ca5d4
3
+ size 534408
data/wsd_en_test.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5af6e32a185f234336964eb2f520a62a14d039004d4e7404db20e3dffbbed5a4
3
+ size 3379854
data/wsd_es_test.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:afeaa4d249a26c28a5db54591e4214b72e725dfb296d1535d6c53ab60ebbaa3e
3
+ size 1068750
data/wsd_et_test.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d1dc289b0c774d97d8116b6685f5d12700ddc8371fba6efffc3bb72e2ebbdec6
3
+ size 634051
data/wsd_eu_test.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d8f77a5b6e814cfdfdc73b73445f28246391d2e52237c0f2f9425254377f8a52
3
+ size 590709
data/wsd_fr_test.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5597b924f73e9dd3dba8c23ef63b63e388707490e03d35c2da433e8c9a14f130
3
+ size 760330
data/wsd_gl_test.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0eabb279a67872ec56f8973842da1ca4acf1807cf2c0ab505e75ff976b0b2b9c
3
+ size 577704
data/wsd_hr_test.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:799a879b4711acf3897ddcb31e047f93d4b8c7d23e6dfbf04d0c989251c2f483
3
+ size 1925744
data/wsd_hu_test.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0356f457fdad2358e1e802e65063067ab25ceeff961653edb1c6c55d2a3a1958
3
+ size 1465706
data/wsd_it_test.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d992cf3f83a0d5d7dd770ea9e916d70ec947086885be23e1d8bc7f0626bc370
3
+ size 1363102
data/wsd_ja_test.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:81cc66686669a6f44161714c4b4ba6310e041801a3982f66926d6dbb6c840e93
3
+ size 2069385
data/wsd_nl_test.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:91e29be2334d94406ae903a3963aa3ba9c34d09599ab6814ad5a94d1d5d189a7
3
+ size 1350916
data/wsd_sl_test.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:81bdaa2dcd893e33e0e4b19ddecf8de6860975a05528535e5110d5596bb0331a
3
+ size 1004397
data/wsd_test.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:527e0e5f6f73e18e58fe1f863fcc6d8e612245f236a0e1419e498e6100149596
3
+ size 178957026
wsd_entitylinker_test.py ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import itertools
15
+ import json
16
+ from typing import Sequence
17
+
18
+ import datasets
19
+
20
+ logger = datasets.logging.get_logger(__name__)
21
+
22
+ _CITATION = """\
23
+ @InProceedings{10.1007/978-3-031-08754-7_70,
24
+ author="Janz, Arkadiusz
25
+ and Dziob, Agnieszka
26
+ and Oleksy, Marcin
27
+ and Baran, Joanna",
28
+ editor="Groen, Derek
29
+ and de Mulatier, Cl{\'e}lia
30
+ and Paszynski, Maciej
31
+ and Krzhizhanovskaya, Valeria V.
32
+ and Dongarra, Jack J.
33
+ and Sloot, Peter M. A.",
34
+ title="A Unified Sense Inventory for Word Sense Disambiguation in Polish",
35
+ booktitle="Computational Science -- ICCS 2022",
36
+ year="2022",
37
+ publisher="Springer International Publishing",
38
+ address="Cham",
39
+ pages="682--689",
40
+ isbn="978-3-031-08754-7"
41
+ }
42
+ """
43
+ _DESCRIPTION = """\
44
+ Polish WSD training data manually annotated by experts according to plWordNet-4.2.
45
+ """
46
+
47
+ _LICENSE = "cc-by-4.0"
48
+
49
+ _CORPUS_NAMES = [
50
+ "glex",
51
+ "wsd",
52
+ "bg",
53
+ "ca",
54
+ "da",
55
+ "de",
56
+ "en",
57
+ "es",
58
+ "et",
59
+ "eu",
60
+ "fr",
61
+ "gl",
62
+ "hr",
63
+ "hu",
64
+ "it",
65
+ "ja",
66
+ "nl",
67
+ "sl",
68
+ ]
69
+
70
+
71
+ _URLS = {
72
+
73
+ "glex": "data/glex_test.jsonl",
74
+ "wsd": "data/wsd_test.jsonl",
75
+ **{lang: f"data/wsd_{lang}_test.jsonl" for lang in ["bg", "ca", "da", "de", "en", "es", "et", "eu", "fr", "gl", "hr", "hu", "it", "ja", "nl", "sl"]}
76
+
77
+ }
78
+
79
+
80
+ class WsdEntityLinkerBuilderConfig(datasets.BuilderConfig):
81
+ def __init__(
82
+ self,
83
+ data_urls: Sequence[str],
84
+ corpus: str,
85
+ data_type: str,
86
+ **kwargs,
87
+ ):
88
+ super(WsdEntityLinkerBuilderConfig, self).__init__(
89
+ name=f"{corpus}_{data_type}",
90
+ version=datasets.Version("1.0.0"),
91
+ **kwargs,
92
+ )
93
+
94
+ self.data_type = data_type
95
+ self.corpus = corpus
96
+ self.data_urls = data_urls
97
+
98
+ if self.corpus not in (*_CORPUS_NAMES, "all"):
99
+ raise ValueError(
100
+ f"Corpus name `{self.corpus}` is not available. Enter one of: {(*_CORPUS_NAMES, 'all')}"
101
+ )
102
+
103
+
104
+ class WsdEntityLinkerDataset(datasets.GeneratorBasedBuilder):
105
+ """WSD Entity Linker test data"""
106
+
107
+ BUILDER_CONFIGS = [
108
+ WsdEntityLinkerBuilderConfig(
109
+ corpus=corpus_name,
110
+
111
+ data_urls=[_URLS[corpus_name]],
112
+ description=f"Data part covering `{corpus_name}`.",
113
+ )
114
+ for corpus_name in itertools.product(_CORPUS_NAMES)
115
+ ]
116
+ BUILDER_CONFIGS.extend(
117
+ [
118
+ WsdEntityLinkerBuilderConfig(
119
+ corpus="all",
120
+
121
+ data_urls=list(_URLS.values()),
122
+ description=f"Data part covering `all` corpora ",
123
+ )
124
+ ]
125
+ )
126
+
127
+ DEFAULT_CONFIG_NAME = "all_text"
128
+
129
+ def _info(self) -> datasets.DatasetInfo:
130
+ features = datasets.Features(
131
+ {
132
+ "id": datasets.Value("string"),
133
+ "meta": datasets.Features(
134
+ {
135
+ "left_context": datasets.Value("string"),
136
+ "right_context": datasets.Value("string"),
137
+ "mention": datasets.Value("string"),
138
+ }
139
+ ),
140
+ "output": datasets.features.Sequence(
141
+ datasets.Features(
142
+ {
143
+ "provenance": datasets.features.Sequence(
144
+ datasets.Features(
145
+ {
146
+ "wikidata_id": datasets.Value("string"),
147
+ }
148
+ )
149
+ ),
150
+ }
151
+ )
152
+ ),
153
+ "lemma": datasets.Value("string"),
154
+ "provenance_to_other_wordnets": datasets.features.Sequence(
155
+ datasets.Features(
156
+ {
157
+ "provenance_other_wordnets": datasets.features.Sequence(
158
+ datasets.Features(
159
+ {
160
+ "bn_syn_id": datasets.Value("string"),
161
+ "PWN_syn_id": datasets.Value("string"),
162
+ "plWN_syn_id": datasets.Value("string"),
163
+ }
164
+ )
165
+ ),
166
+ }
167
+ )
168
+ ),
169
+ "source": datasets.features.Sequence(datasets.Value("string")),
170
+ "input": datasets.Value("string"),
171
+ }
172
+ )
173
+
174
+ return datasets.DatasetInfo(
175
+ description=_DESCRIPTION,
176
+ license=_LICENSE,
177
+ features=features,
178
+ supervised_keys=None,
179
+ citation=_CITATION,
180
+ )
181
+
182
+ def _split_generators(self, dl_manager):
183
+ # Since data is local, no download needed
184
+ filepaths = self.config.data_urls
185
+ return [
186
+ datasets.SplitGenerator(
187
+ name=datasets.Split.TRAIN,
188
+ gen_kwargs={
189
+ "filepaths": filepaths,
190
+ },
191
+ ),
192
+ ]
193
+
194
+ def _generate_examples(self, filepaths: Sequence[str]):
195
+ key_iter = 0
196
+ for filepath in filepaths:
197
+ with open(filepath, encoding="utf-8") as f:
198
+ for line in f:
199
+ data = json.loads(line)
200
+ yield key_iter, data
201
+ key_iter += 1