ramybaly commited on
Commit
d0f678c
·
1 Parent(s): f1cb494

added the dataset script and text files

Browse files
Files changed (5) hide show
  1. .gitattributes +3 -0
  2. conll2012.py +239 -0
  3. test.txt +3 -0
  4. train.txt +3 -0
  5. validation.txt +3 -0
.gitattributes CHANGED
@@ -14,3 +14,6 @@
14
  *.pb filter=lfs diff=lfs merge=lfs -text
15
  *.pt filter=lfs diff=lfs merge=lfs -text
16
  *.pth filter=lfs diff=lfs merge=lfs -text
 
 
 
 
14
  *.pb filter=lfs diff=lfs merge=lfs -text
15
  *.pt filter=lfs diff=lfs merge=lfs -text
16
  *.pth filter=lfs diff=lfs merge=lfs -text
17
+ train.txt filter=lfs diff=lfs merge=lfs -text
18
+ validation.txt filter=lfs diff=lfs merge=lfs -text
19
+ test.txt filter=lfs diff=lfs merge=lfs -text
conll2012.py ADDED
@@ -0,0 +1,239 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ # CoNLL-2012 Shared Task: Modeling Multilingual Unrestricted Coreference in OntoNotes
18
+
19
+ import datasets
20
+
21
+
22
+ logger = datasets.logging.get_logger(__name__)
23
+
24
+
25
+ _CITATION = """\
26
+ @inproceedings{pradhan2012conll,
27
+ title={CoNLL-2012 shared task: Modeling multilingual unrestricted coreference in OntoNotes},
28
+ author={Pradhan, Sameer and Moschitti, Alessandro and Xue, Nianwen and Uryupina, Olga and Zhang, Yuchen},
29
+ booktitle={Joint Conference on EMNLP and CoNLL-Shared Task},
30
+ pages={1--40},
31
+ year={2012}
32
+ }
33
+ """
34
+
35
+ _DESCRIPTION = """\
36
+ The CoNLL-2012 shared task involved predicting coreference in English, Chinese, and Arabic, using the final version, v5.0,
37
+ of the OntoNotes corpus. It was a follow-on to the English-only task organized in 2011. Until the creation of the OntoNotes
38
+ corpus, resources in this sub-field of language processing were limited to noun phrase coreference, often on a restricted
39
+ set of entities, such as the ACE entities. OntoNotes provides a large-scale corpus of general anaphoric coreference not
40
+ restricted to noun phrases or to a specified set of entity types, and covers multiple languages. OntoNotes also provides
41
+ additional layers of integrated annotation, capturing additional shallow semantic structure. This paper describes the
42
+ OntoNotes annotation (coreference and other layers) and then describes the parameters of the shared task including the
43
+ format, pre-processing information, evaluation criteria, and presents and discusses the results achieved by the participating
44
+ systems. The task of coreference has had a complex evaluation history. Potentially many evaluation conditions, have, in the past,
45
+ made it difficult to judge the improvement in new algorithms over previously reported results. Having a standard test set
46
+ and standard evaluation parameters, all based on a resource that provides multiple integrated annotation layers (syntactic
47
+ parses, semantic roles, word senses, named entities and coreference) and in multiple languages could support joint modeling
48
+ and help ground and energize ongoing research in the task of entity and event coreference.
49
+ For more details see https://aclanthology.org/W12-4501.pdf
50
+ """
51
+
52
+
53
+ class Conll2012Config(datasets.BuilderConfig):
54
+ """BuilderConfig for Conll2012"""
55
+
56
+ def __init__(self, **kwargs):
57
+ """BuilderConfig for Conll2012.
58
+ Args:
59
+ **kwargs: keyword arguments forwarded to super.
60
+ """
61
+ super(Conll2012Config, self).__init__(**kwargs)
62
+
63
+
64
+ class Conll2012(datasets.GeneratorBasedBuilder):
65
+ """Conll2012 dataset."""
66
+
67
+ BUILDER_CONFIGS = [
68
+ Conll2012Config(name="conll2012", version=datasets.Version("1.0.0"), description="Conll2012 dataset"),
69
+ ]
70
+
71
+ def _info(self):
72
+ return datasets.DatasetInfo(
73
+ description=_DESCRIPTION,
74
+ features=datasets.Features(
75
+ {
76
+ "id": datasets.Value("string"),
77
+ "tokens": datasets.Sequence(datasets.Value("string")),
78
+ "pos_tags": datasets.Sequence(
79
+ datasets.features.ClassLabel(
80
+ names=[
81
+ '$',
82
+ "''",
83
+ '*',
84
+ ',',
85
+ '-LRB-',
86
+ '-RRB-',
87
+ '.',
88
+ ':',
89
+ 'ADD',
90
+ 'AFX',
91
+ 'CC',
92
+ 'CD',
93
+ 'DT',
94
+ 'EX',
95
+ 'FW',
96
+ 'HYPH',
97
+ 'IN',
98
+ 'JJ',
99
+ 'JJR',
100
+ 'JJS',
101
+ 'LS',
102
+ 'MD',
103
+ 'NFP',
104
+ 'NN',
105
+ 'NNP',
106
+ 'NNPS',
107
+ 'NNS',
108
+ 'PDT',
109
+ 'POS',
110
+ 'PRP',
111
+ 'PRP$',
112
+ 'RB',
113
+ 'RBR',
114
+ 'RBS',
115
+ 'RP',
116
+ 'SYM',
117
+ 'TO',
118
+ 'UH',
119
+ 'VB',
120
+ 'VBD',
121
+ 'VBG',
122
+ 'VBN',
123
+ 'VBP',
124
+ 'VBZ',
125
+ 'VERB',
126
+ 'WDT',
127
+ 'WP',
128
+ 'WP$',
129
+ 'WRB',
130
+ 'XX',
131
+ '``'
132
+ ]
133
+ )
134
+ ),
135
+ "ner_tags": datasets.Sequence(
136
+ datasets.features.ClassLabel(
137
+ names=[
138
+ 'O',
139
+ 'B-CARDINAL',
140
+ 'B-DATE',
141
+ 'B-EVENT',
142
+ 'B-FAC',
143
+ 'B-GPE',
144
+ 'B-LANGUAGE',
145
+ 'B-LAW',
146
+ 'B-LOC',
147
+ 'B-MONEY',
148
+ 'B-NORP',
149
+ 'B-ORDINAL',
150
+ 'B-ORG',
151
+ 'B-PERCENT',
152
+ 'B-PERSON',
153
+ 'B-PRODUCT',
154
+ 'B-QUANTITY',
155
+ 'B-TIME',
156
+ 'B-WORK_OF_ART',
157
+ 'I-CARDINAL',
158
+ 'I-DATE',
159
+ 'I-EVENT',
160
+ 'I-FAC',
161
+ 'I-GPE',
162
+ 'I-LANGUAGE',
163
+ 'I-LAW',
164
+ 'I-LOC',
165
+ 'I-MONEY',
166
+ 'I-NORP',
167
+ 'I-ORDINAL',
168
+ 'I-ORG',
169
+ 'I-PERCENT',
170
+ 'I-PERSON',
171
+ 'I-PRODUCT',
172
+ 'I-QUANTITY',
173
+ 'I-TIME',
174
+ 'I-WORK_OF_ART'
175
+ ]
176
+ )
177
+ ),
178
+ }
179
+ ),
180
+ supervised_keys=None,
181
+ homepage="https://catalog.ldc.upenn.edu/LDC2013T19",
182
+ citation=_CITATION,
183
+ )
184
+
185
+ def _split_generators(self, dl_manager):
186
+ """Returns SplitGenerators."""
187
+ urls_to_download = {
188
+ # "train": f"{_URL}{_TRAINING_FILE}",
189
+ # "validation": f"{_URL}{_DEV_FILE}",
190
+ # "test": f"{_URL}{_TEST_FILE}",
191
+ 'train': 'train.txt',
192
+ 'validation': 'validation.txt',
193
+ 'test': 'test.txt',
194
+ }
195
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
196
+
197
+ return [
198
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
199
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["validation"]}),
200
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
201
+ ]
202
+
203
+ def _generate_examples(self, filepath):
204
+ logger.info("⏳ Generating examples from = %s", filepath)
205
+
206
+ with open(filepath, encoding="utf-8") as f:
207
+ lines = f.readlines()
208
+
209
+ guid = 0
210
+ tokens = []
211
+ pos_tags = []
212
+ ner_tags = []
213
+
214
+ for line in lines:
215
+ if line.startswith("-DOCSTART-") or line.strip() == "" or line == "\n":
216
+ if tokens:
217
+ yield guid, {
218
+ "id": str(guid),
219
+ "tokens": tokens,
220
+ "pos_tags": pos_tags,
221
+ "ner_tags": ner_tags,
222
+ }
223
+ guid += 1
224
+ tokens = []
225
+ pos_tags = []
226
+ ner_tags = []
227
+ else:
228
+ # conll2012 tokens are tab- separated
229
+ splits = line.split('\t')
230
+ tokens.append(splits[0])
231
+ pos_tags.append(splits[1])
232
+ ner_tags.append(splits[3].rstrip())
233
+ # last example
234
+ yield guid, {
235
+ "id": str(guid),
236
+ "tokens": tokens,
237
+ "pos_tags": pos_tags,
238
+ "ner_tags": ner_tags,
239
+ }
test.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ca6d86e6a641c0bb3c56638ce88abdcc56a243a3e678205d7fcd978108f1e9b
3
+ size 3821129
train.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b05b94a76652c7d53979b6f854631afa8a8f0a50793f408c198af37d9323c19
3
+ size 36758959
validation.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c78bc284b229a2d6d95ccb9190015aacfb75d8be4cdc773de24ad63007878e0
3
+ size 5107622