holylovenia commited on
Commit
ca564e2
·
1 Parent(s): 1ac417b

Upload multilexnorm.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. multilexnorm.py +180 -0
multilexnorm.py ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ from typing import Dict, List, Tuple
3
+
4
+ import datasets
5
+
6
+ from nusacrowd.utils import schemas
7
+ from nusacrowd.utils.configs import NusantaraConfig
8
+ from nusacrowd.utils.constants import Tasks
9
+
10
+ _CITATION = """\
11
+ @inproceedings{multilexnorm,
12
+ title= {MultiLexNorm: A Shared Task on Multilingual Lexical Normalization,
13
+ author = "van der Goot, Rob and Ramponi et al.",
14
+ booktitle = "Proceedings of the 7th Workshop on Noisy User-generated Text (W-NUT 2021)",
15
+ year = "2021",
16
+ publisher = "Association for Computational Linguistics",
17
+ address = "Punta Cana, Dominican Republic"
18
+ }
19
+ """
20
+
21
+ _DATASETNAME = "multilexnorm"
22
+
23
+ _DESCRIPTION = """\
24
+ MULTILEXNPRM is a new benchmark dataset for multilingual lexical normalization
25
+ including 12 language variants,
26
+ we here specifically work on the Indonisian-english language.
27
+ """
28
+
29
+ _HOMEPAGE = "https://bitbucket.org/robvanderg/multilexnorm/src/master/"
30
+
31
+ _LOCAL = False
32
+ _LANGUAGES = ["ind"] # We follow ISO639-3 language code (https://iso639-3.sil.org/code_tables/639/data)
33
+ _LICENSE = "CC-BY-NC-SA 4.0"
34
+
35
+ _URLS = {
36
+ "train": "https://bitbucket.org/robvanderg/multilexnorm/raw/e92e5b8f111fea15c7c88aebd4c058f6a1ca8d74/data/iden/train.norm",
37
+ "validation": "https://bitbucket.org/robvanderg/multilexnorm/raw/e92e5b8f111fea15c7c88aebd4c058f6a1ca8d74/data/iden/dev.norm",
38
+ "test": "https://bitbucket.org/robvanderg/multilexnorm/raw/e92e5b8f111fea15c7c88aebd4c058f6a1ca8d74/data/iden/test.norm",
39
+ }
40
+
41
+ _SUPPORTED_TASKS = [Tasks.MULTILEXNORM]
42
+
43
+ _SOURCE_VERSION = "1.0.0"
44
+
45
+ _NUSANTARA_VERSION = "1.0.0"
46
+
47
+
48
+ class MultiLexNorm(datasets.GeneratorBasedBuilder):
49
+ """MultiLexNorm is a new benchmark dataset for lexical normalization for indonisian English language. which is the translation
50
+ of social media text to canonical text:
51
+ new pix comming tomoroe
52
+ new pictures coming tomorrow
53
+ """
54
+
55
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
56
+ NUSANTARA_VERSION = datasets.Version(_NUSANTARA_VERSION)
57
+
58
+ BUILDER_CONFIGS = [
59
+ NusantaraConfig(
60
+ name="multilexnorm_source",
61
+ version=_SOURCE_VERSION,
62
+ description="multilexnorm source schema",
63
+ schema="source",
64
+ subset_id="multilexnorm",
65
+ ),
66
+ NusantaraConfig(
67
+ name="multilexnorm_nusantara_t2t",
68
+ version=_NUSANTARA_VERSION,
69
+ description="multilexnorm Nusantara schema",
70
+ schema="nusantara_t2t",
71
+ subset_id="multilexnorm",
72
+ ),
73
+ ]
74
+
75
+ DEFAULT_CONFIG_NAME = "multilexnorm_source"
76
+
77
+ def _info(self) -> datasets.DatasetInfo:
78
+
79
+ if self.config.schema == "source":
80
+
81
+ features = datasets.Features(
82
+ {
83
+ "src_sent": datasets.Value("string"),
84
+ "id": datasets.Value("string"),
85
+ "norm_sent": datasets.Value("string"),
86
+ }
87
+ )
88
+
89
+ elif self.config.schema == "nusantara_t2t":
90
+ features = schemas.text2text_features
91
+
92
+ return datasets.DatasetInfo(
93
+ description=_DESCRIPTION,
94
+ features=features,
95
+ homepage=_HOMEPAGE,
96
+ license=_LICENSE,
97
+ citation=_CITATION,
98
+ )
99
+
100
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
101
+
102
+ train_path = Path(dl_manager.download_and_extract(_URLS["train"]))
103
+ validation_path = Path(dl_manager.download_and_extract(_URLS["validation"]))
104
+ test_path = Path(dl_manager.download_and_extract(_URLS["test"]))
105
+ data_files = {
106
+ "train": train_path,
107
+ "validation": validation_path,
108
+ "test": test_path,
109
+ }
110
+
111
+ return [
112
+ datasets.SplitGenerator(
113
+ name=datasets.Split.TRAIN,
114
+ gen_kwargs={
115
+ "filepath": data_files["train"],
116
+ "split": "train",
117
+ },
118
+ ),
119
+ datasets.SplitGenerator(
120
+ name=datasets.Split.TEST,
121
+ gen_kwargs={
122
+ "filepath": data_files["test"],
123
+ "split": "test",
124
+ },
125
+ ),
126
+ datasets.SplitGenerator(
127
+ name=datasets.Split.VALIDATION,
128
+ gen_kwargs={
129
+ "filepath": data_files["validation"],
130
+ "split": "dev",
131
+ },
132
+ ),
133
+ ]
134
+
135
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
136
+
137
+ curSent = []
138
+ print(filepath)
139
+ if self.config.schema == "source":
140
+ i = 0
141
+ for line in open(filepath):
142
+ tok = line.strip("\n").split("\t")
143
+
144
+ if tok == [""] or tok == []:
145
+ ex = {"id": str(i),
146
+ "src_sent": " ".join([x[0] for x in curSent]),
147
+ "norm_sent": " ".join([x[1] for x in curSent])}
148
+ yield i, ex
149
+ i += 1
150
+ curSent = []
151
+
152
+ else:
153
+ if len(tok) > 2:
154
+ print("erroneous input, line:\n" + line + "\n in file " + filepath + " contains more then two elements")
155
+ if len(tok) == 1:
156
+ tok.append("")
157
+ curSent.append(tok)
158
+
159
+ elif self.config.schema == "nusantara_t2t":
160
+ i = 0
161
+ for line in open(filepath):
162
+ tok = line.strip("\n").split("\t")
163
+
164
+ if tok == [""] or tok == []:
165
+ ex = {"id": str(i),
166
+ "text_1": " ".join([x[0] for x in curSent]),
167
+ "text_2": " ".join([x[1] for x in curSent]),
168
+ "text_1_name": "src_sent",
169
+ "text_2_name": "norm_sent"}
170
+ yield i, ex
171
+ i += 1
172
+ curSent = []
173
+
174
+ else:
175
+ if len(tok) > 2:
176
+ print("erroneous input, line:\n" + line + "\n in file " + filepath + " contains more then two elements")
177
+ if len(tok) == 1:
178
+ tok.append("")
179
+ curSent.append(tok)
180
+