ArneBinder commited on
Commit
9df1364
·
verified ·
1 Parent(s): 312df95

use pie-modules instead of pytorch-ie

Browse files

see https://github.com/ArneBinder/pie-datasets/pull/204 for further information

Files changed (3) hide show
  1. README.md +47 -0
  2. conll2012_ontonotesv5.py +176 -47
  3. requirements.txt +2 -0
README.md ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # PIE Dataset Card for CoNLL2012 shared task data based on OntoNotes 5.0
2
+
3
+ This is a [PyTorch-IE](https://github.com/ChristophAlt/pytorch-ie) (PIE) wrapper for the
4
+ [CoNLL 2012 OntoNotes v.5.0 Huggingface dataset loading script](https://huggingface.co/datasets/conll2012_ontonotesv5).
5
+
6
+ ## Dataset Variants
7
+
8
+ This dataset contains data in three languages and two versions:
9
+
10
+ - `arabic_v4`
11
+ - `chinese_v4`
12
+ - `english_v4`
13
+ - `english_v12`
14
+
15
+ ## Data Schema
16
+
17
+ The document type for this dataset is `Conll2012OntonotesV5Document` which defines the following data fields:
18
+
19
+ - `id` (str)
20
+ - `tokens` (tuple)
21
+ - `pos_tags` (list)
22
+ - `metadata` (dictionary, optional)
23
+
24
+ and the following annotation layers:
25
+
26
+ - `entities` (annotation type: `LabeledSpan`, target: `tokens`)
27
+ - `parts` (annotation type: `LabeledSpan`, target: `tokens`)
28
+ - `predicates` (annotation type: `Predicate`, target: `tokens`)
29
+ - `sentences` (annotation type: `Span`, target: `tokens`)
30
+ - `coref_mentions` (annotation type: `Span`, target: `tokens`)
31
+ - `coref_clusters` (annotation type: `SpanSet`, target: `coref_mentions`)
32
+ - `srl_arguments` (annotation type: `Span`, target: `tokens`)
33
+ - `srl_relations` (annotation type: `NaryRelation`, target: `srl_arguments`)
34
+ - `word_senses` (annotation type: `LabeledSpan`, target: `tokens`)
35
+ - `speakers` (annotation type: `Attribute`, target: `sentences`)
36
+ - `parse_trees` (annotation type: `Attribute`, target: `sentences`)
37
+
38
+ See [here](https://github.com/ArneBinder/pie-modules/blob/main/src/pie_modules/annotations.py) for the annotation type definitions.
39
+
40
+ ## Document Converters
41
+
42
+ The dataset provides document converters for the following target document types:
43
+
44
+ - `pie_modules.documents.TextDocumentWithLabeledSpansAndLabeledPartitions`
45
+
46
+ See [here](https://github.com/ArneBinder/pie-modules/blob/main/src/pie_modules/documents.py) for the document type
47
+ definitions.
conll2012_ontonotesv5.py CHANGED
@@ -1,12 +1,16 @@
1
  import dataclasses
2
  from collections import defaultdict
3
- from typing import Any, Callable, Dict, List, Optional, Tuple
4
 
5
  import datasets
6
- import pytorch_ie
7
- from pytorch_ie.annotations import LabeledSpan, NaryRelation, Span
8
- from pytorch_ie.core import Annotation, AnnotationList, Document, annotation_field
9
- from pytorch_ie.documents import TextDocumentWithLabeledEntitiesAndLabeledPartitions
 
 
 
 
10
 
11
 
12
  @dataclasses.dataclass(eq=True, frozen=True)
@@ -19,7 +23,7 @@ class SpanSet(Annotation):
19
  object.__setattr__(
20
  self,
21
  "spans",
22
- tuple(sorted(set(s for s in self.spans), key=lambda s: (s.start, s.end))),
23
  )
24
 
25
 
@@ -37,21 +41,19 @@ class Predicate(Span):
37
 
38
 
39
  @dataclasses.dataclass
40
- class Conll2012OntonotesV5Document(Document):
41
- tokens: List[str]
42
- id: str
43
- pos_tags: List[str]
44
- sentences: AnnotationList[Span] = annotation_field(target="tokens")
45
- parse_trees: AnnotationList[Attribute] = annotation_field(target="sentences")
46
- speakers: AnnotationList[Attribute] = annotation_field(target="sentences")
47
- parts: AnnotationList[LabeledSpan] = annotation_field(target="tokens")
48
- coref_mentions: AnnotationList[Span] = annotation_field(target="tokens")
49
- coref_clusters: AnnotationList[SpanSet] = annotation_field(target="coref_mentions")
50
- srl_arguments: AnnotationList[Span] = annotation_field(target="tokens")
51
- srl_relations: AnnotationList[NaryRelation] = annotation_field(target="srl_arguments")
52
- entities: AnnotationList[LabeledSpan] = annotation_field(target="tokens")
53
- predicates: AnnotationList[Predicate] = annotation_field(target="tokens")
54
- word_senses: AnnotationList[LabeledSpan] = annotation_field(target="tokens")
55
 
56
 
57
  def bio2spans(bio: List[str], offset: int = 0) -> List[LabeledSpan]:
@@ -99,10 +101,9 @@ def bio2spans(bio: List[str], offset: int = 0) -> List[LabeledSpan]:
99
 
100
  def example_to_document(
101
  example: Dict[str, Any],
102
- entities_int2str: Callable[[int], str],
103
- pos_tags_int2str: Optional[Callable[[int], str]] = None,
104
  ) -> Conll2012OntonotesV5Document:
105
-
106
  sentences = []
107
  tokens = []
108
  pos_tags = []
@@ -112,6 +113,7 @@ def example_to_document(
112
  predicates = []
113
  coref_mentions = []
114
  coref_clusters = []
 
115
  srl_arguments = []
116
  srl_relations = []
117
  word_senses = []
@@ -125,10 +127,12 @@ def example_to_document(
125
  current_sentence = Span(start=sentence_offset, end=sentence_offset + len(current_tokens))
126
  sentences.append(current_sentence)
127
 
128
- if pos_tags_int2str is not None:
129
  pos_tags.extend(
130
- [pos_tags_int2str(pos_tag_id) for pos_tag_id in sentence_dict["pos_tags"]]
131
  )
 
 
132
  else:
133
  pos_tags.extend(sentence_dict["pos_tags"])
134
  parse_trees.append(
@@ -138,7 +142,7 @@ def example_to_document(
138
  Attribute(target_annotation=current_sentence, label=sentence_dict["speaker"])
139
  )
140
  named_entities_bio = [
141
- entities_int2str(entity_id) for entity_id in sentence_dict["named_entities"]
142
  ]
143
  entities.extend(bio2spans(bio=named_entities_bio, offset=len(tokens)))
144
 
@@ -146,7 +150,7 @@ def example_to_document(
146
  zip(sentence_dict["predicate_lemmas"], sentence_dict["predicate_framenet_ids"])
147
  ):
148
  token_idx = sentence_offset + idx
149
- if predicate_lemma_value is not None:
150
  predicate = Predicate(
151
  start=token_idx,
152
  end=token_idx + 1,
@@ -165,7 +169,9 @@ def example_to_document(
165
  current_coref_clusters = [
166
  SpanSet(spans=tuple(spans)) for spans in coref_clusters_dict.values()
167
  ]
 
168
  coref_clusters.extend(current_coref_clusters)
 
169
 
170
  # handle srl_frames
171
  for frame_dict in sentence_dict["srl_frames"]:
@@ -189,7 +195,8 @@ def example_to_document(
189
  token_idx = sentence_offset + idx
190
  if word_sense is not None:
191
  word_senses.append(
192
- LabeledSpan(start=token_idx, end=token_idx + 1, label=str(int(word_sense)))
 
193
  )
194
 
195
  # handle parts
@@ -210,11 +217,12 @@ def example_to_document(
210
  parts.append(LabeledSpan(start=last_start, end=len(tokens), label=str(last_part_id)))
211
 
212
  doc = Conll2012OntonotesV5Document(
213
- tokens=tokens,
214
  id=example["document_id"],
215
  pos_tags=pos_tags,
216
  )
217
  # add the annotations to the document
 
218
  doc.sentences.extend(sentences)
219
  doc.parse_trees.extend(parse_trees)
220
  doc.speakers.extend(speakers)
@@ -225,14 +233,136 @@ def example_to_document(
225
  doc.srl_arguments.extend(srl_arguments)
226
  doc.srl_relations.extend(srl_relations)
227
  doc.word_senses.extend(word_senses)
 
228
 
229
  return doc
230
 
231
 
232
- def convert_to_text_document_with_labeled_entities_and_labeled_partitions(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
233
  doc: Conll2012OntonotesV5Document,
234
  token_separator: str = " ",
235
- ) -> TextDocumentWithLabeledEntitiesAndLabeledPartitions:
236
  start = 0
237
  token_offsets: List[Tuple[int, int]] = []
238
  for token in doc.tokens:
@@ -258,9 +388,9 @@ def convert_to_text_document_with_labeled_entities_and_labeled_partitions(
258
  char_offset_sentence = LabeledSpan(start=char_start, end=char_end, label="sentence")
259
  sentence_map[(sentence.start, sentence.end)] = char_offset_sentence
260
 
261
- new_doc = TextDocumentWithLabeledEntitiesAndLabeledPartitions(text=text, id=doc.id)
262
- new_doc.entities.extend(entity_map.values())
263
- new_doc.partitions.extend(sentence_map.values())
264
 
265
  return new_doc
266
 
@@ -280,7 +410,7 @@ class Conll2012OntonotesV5Config(datasets.BuilderConfig):
280
  assert conll_version in ["v4", "v12"]
281
  if conll_version == "v12":
282
  assert language == "english"
283
- super(Conll2012OntonotesV5Config, self).__init__(
284
  name=f"{language}_{conll_version}",
285
  description=f"{conll_version} of CoNLL formatted OntoNotes dataset for {language}.",
286
  version=datasets.Version("1.0.0"), # hf dataset script version
@@ -290,14 +420,15 @@ class Conll2012OntonotesV5Config(datasets.BuilderConfig):
290
  self.conll_version = conll_version
291
 
292
 
293
- class Conll2012Ontonotesv5(pytorch_ie.data.builder.GeneratorBasedBuilder):
294
  DOCUMENT_TYPE = Conll2012OntonotesV5Document
295
 
296
  DOCUMENT_CONVERTERS = {
297
- TextDocumentWithLabeledEntitiesAndLabeledPartitions: convert_to_text_document_with_labeled_entities_and_labeled_partitions
298
  }
299
 
300
- BASE_DATASET_PATH = "DFKI-SLT/conll2012_ontonotesv5"
 
301
 
302
  BUILDER_CONFIGS = [
303
  Conll2012OntonotesV5Config(
@@ -315,13 +446,11 @@ class Conll2012Ontonotesv5(pytorch_ie.data.builder.GeneratorBasedBuilder):
315
  def _generate_document_kwargs(self, dataset):
316
  pos_tags_feature = dataset.features["sentences"][0]["pos_tags"].feature
317
  return dict(
318
- entities_int2str=dataset.features["sentences"][0]["named_entities"].feature.int2str,
319
- pos_tags_int2str=pos_tags_feature.int2str
320
- if isinstance(pos_tags_feature, datasets.ClassLabel)
321
- else None,
322
  )
323
 
324
- def _generate_document(self, example, entities_int2str, pos_tags_int2str):
325
- return example_to_document(
326
- example, entities_int2str=entities_int2str, pos_tags_int2str=pos_tags_int2str
327
- )
 
1
  import dataclasses
2
  from collections import defaultdict
3
+ from typing import Any, Dict, List, Optional, Tuple
4
 
5
  import datasets
6
+ from pie_core import Annotation, AnnotationLayer, annotation_field
7
+ from pie_modules.annotations import LabeledSpan, NaryRelation, Span
8
+ from pie_modules.documents import (
9
+ TextDocumentWithLabeledSpansAndLabeledPartitions,
10
+ TokenBasedDocument,
11
+ )
12
+
13
+ from pie_datasets import GeneratorBasedBuilder
14
 
15
 
16
  @dataclasses.dataclass(eq=True, frozen=True)
 
23
  object.__setattr__(
24
  self,
25
  "spans",
26
+ tuple(sorted({s for s in self.spans}, key=lambda s: (s.start, s.end))),
27
  )
28
 
29
 
 
41
 
42
 
43
  @dataclasses.dataclass
44
+ class Conll2012OntonotesV5Document(TokenBasedDocument):
45
+ pos_tags: Optional[List[str]] = None
46
+ sentences: AnnotationLayer[Span] = annotation_field(target="tokens")
47
+ parse_trees: AnnotationLayer[Attribute] = annotation_field(target="sentences")
48
+ speakers: AnnotationLayer[Attribute] = annotation_field(target="sentences")
49
+ parts: AnnotationLayer[LabeledSpan] = annotation_field(target="tokens")
50
+ coref_mentions: AnnotationLayer[Span] = annotation_field(target="tokens")
51
+ coref_clusters: AnnotationLayer[SpanSet] = annotation_field(target="coref_mentions")
52
+ srl_arguments: AnnotationLayer[Span] = annotation_field(target="tokens")
53
+ srl_relations: AnnotationLayer[NaryRelation] = annotation_field(target="srl_arguments")
54
+ entities: AnnotationLayer[LabeledSpan] = annotation_field(target="tokens")
55
+ predicates: AnnotationLayer[Predicate] = annotation_field(target="tokens")
56
+ word_senses: AnnotationLayer[LabeledSpan] = annotation_field(target="tokens")
 
 
57
 
58
 
59
  def bio2spans(bio: List[str], offset: int = 0) -> List[LabeledSpan]:
 
101
 
102
  def example_to_document(
103
  example: Dict[str, Any],
104
+ entity_labels: datasets.ClassLabel,
105
+ pos_tag_labels: Optional[datasets.ClassLabel] = None,
106
  ) -> Conll2012OntonotesV5Document:
 
107
  sentences = []
108
  tokens = []
109
  pos_tags = []
 
113
  predicates = []
114
  coref_mentions = []
115
  coref_clusters = []
116
+ coref_cluster_ids = []
117
  srl_arguments = []
118
  srl_relations = []
119
  word_senses = []
 
127
  current_sentence = Span(start=sentence_offset, end=sentence_offset + len(current_tokens))
128
  sentences.append(current_sentence)
129
 
130
+ if pos_tag_labels is not None:
131
  pos_tags.extend(
132
+ [pos_tag_labels.int2str(pos_tag_id) for pos_tag_id in sentence_dict["pos_tags"]]
133
  )
134
+ if pos_tag_labels.int2str is None:
135
+ raise ValueError("pos_tag_labels.int2str is None.")
136
  else:
137
  pos_tags.extend(sentence_dict["pos_tags"])
138
  parse_trees.append(
 
142
  Attribute(target_annotation=current_sentence, label=sentence_dict["speaker"])
143
  )
144
  named_entities_bio = [
145
+ entity_labels.int2str(entity_id) for entity_id in sentence_dict["named_entities"]
146
  ]
147
  entities.extend(bio2spans(bio=named_entities_bio, offset=len(tokens)))
148
 
 
150
  zip(sentence_dict["predicate_lemmas"], sentence_dict["predicate_framenet_ids"])
151
  ):
152
  token_idx = sentence_offset + idx
153
+ if predicate_lemma_value is not None or predicate_framenet_id is not None:
154
  predicate = Predicate(
155
  start=token_idx,
156
  end=token_idx + 1,
 
169
  current_coref_clusters = [
170
  SpanSet(spans=tuple(spans)) for spans in coref_clusters_dict.values()
171
  ]
172
+ current_coref_cluster_ids = [cluster_id for cluster_id in coref_clusters_dict.keys()]
173
  coref_clusters.extend(current_coref_clusters)
174
+ coref_cluster_ids.extend(current_coref_cluster_ids)
175
 
176
  # handle srl_frames
177
  for frame_dict in sentence_dict["srl_frames"]:
 
195
  token_idx = sentence_offset + idx
196
  if word_sense is not None:
197
  word_senses.append(
198
+ # LabeledSpan(start=token_idx, end=token_idx + 1, label=str(int(word_sense)))
199
+ LabeledSpan(start=token_idx, end=token_idx + 1, label=str(word_sense))
200
  )
201
 
202
  # handle parts
 
217
  parts.append(LabeledSpan(start=last_start, end=len(tokens), label=str(last_part_id)))
218
 
219
  doc = Conll2012OntonotesV5Document(
220
+ tokens=tuple(tokens),
221
  id=example["document_id"],
222
  pos_tags=pos_tags,
223
  )
224
  # add the annotations to the document
225
+ doc.parts.extend(parts)
226
  doc.sentences.extend(sentences)
227
  doc.parse_trees.extend(parse_trees)
228
  doc.speakers.extend(speakers)
 
233
  doc.srl_arguments.extend(srl_arguments)
234
  doc.srl_relations.extend(srl_relations)
235
  doc.word_senses.extend(word_senses)
236
+ doc.metadata["coref_cluster_ids"] = coref_cluster_ids
237
 
238
  return doc
239
 
240
 
241
+ def document_to_example(
242
+ document: Conll2012OntonotesV5Document,
243
+ entity_labels: datasets.ClassLabel,
244
+ pos_tag_labels: Optional[datasets.ClassLabel] = None,
245
+ ) -> Dict[str, Any]:
246
+ example = {
247
+ "document_id": document.id,
248
+ "sentences": [],
249
+ }
250
+
251
+ for idx, sentence in enumerate(document.sentences):
252
+ sent_start = sentence.start
253
+ sent_end = sentence.end
254
+ sent_len = sent_end - sent_start
255
+
256
+ predicate_lemmas = [None] * sent_len
257
+ predicate_framenet_ids = [None] * sent_len
258
+ for pred in document.predicates:
259
+ if sent_start <= pred.start and pred.end <= sent_end:
260
+ pred_len = pred.end - pred.start
261
+ predicate_lemmas[pred.start - sent_start : pred.end - sent_start] = [
262
+ pred.lemma
263
+ ] * pred_len
264
+ if pred.framenet_id is not None:
265
+ predicate_framenet_ids[pred.start - sent_start : pred.end - sent_start] = [
266
+ pred.framenet_id
267
+ ] * pred_len
268
+
269
+ word_senses = [None] * sent_len
270
+ for sense in document.word_senses:
271
+ if sent_start <= sense.start and sense.end <= sent_end:
272
+ word_senses[sense.start - sent_start : sense.end - sent_start] = [
273
+ float(sense.label)
274
+ ] * (sense.end - sense.start)
275
+
276
+ named_entities = [0] * sent_len
277
+ for ent in document.entities:
278
+ if sent_start <= ent.start and ent.end <= sent_end:
279
+ ent_len = ent.end - ent.start
280
+ named_entities[ent.start - sent_start] = entity_labels.str2int("B-" + ent.label)
281
+ if ent_len > 1:
282
+ named_entities[ent.start - sent_start + 1 : ent.end - sent_start] = [
283
+ entity_labels.str2int("I-" + ent.label)
284
+ ] * (ent_len - 1)
285
+
286
+ srl_frames = []
287
+ for srl_rel in document.srl_relations:
288
+ span_start = min([span.start for span in srl_rel.arguments])
289
+ span_end = max([span.end for span in srl_rel.arguments])
290
+ if sent_start <= span_start and span_end <= sent_end:
291
+ verb = None
292
+ frames = ["O"] * sent_len
293
+ for arg, role in zip(srl_rel.arguments, srl_rel.roles):
294
+ frames[arg.start - sent_start] = "B-" + role
295
+ if arg.end - arg.start > 1:
296
+ frames[arg.start - sent_start + 1 : arg.end - sent_start] = [
297
+ "I-" + role
298
+ ] * (arg.end - arg.start - 1)
299
+ # english_v4 and arabic_v4 contain some weird role names (in addition to "V") for the verb
300
+ if role in [
301
+ "V",
302
+ "ARG0(V",
303
+ "ARG1(V",
304
+ "C-ARG0(V",
305
+ "C-ARG1(V",
306
+ "C-ARG2(V",
307
+ "R-ARG0(V",
308
+ "R-ARG1(V",
309
+ ]:
310
+ verb = document.tokens[arg.start]
311
+ if verb is None:
312
+ raise ValueError(f"Verb not found for SRL relation: {srl_rel}")
313
+ srl_frames.append({"verb": verb, "frames": frames})
314
+
315
+ coref_spans = []
316
+ for cluster, id in zip(document.coref_clusters, document.metadata["coref_cluster_ids"]):
317
+ span_start = min([span.start for span in cluster.spans])
318
+ span_end = max([span.end for span in cluster.spans])
319
+ if sent_start <= span_start and span_end <= sent_end:
320
+ current_coref = [
321
+ [id, span.start - sent_start, span.end - sent_start - 1]
322
+ for span in cluster.spans
323
+ ]
324
+ coref_spans.extend(current_coref)
325
+
326
+ for part in document.parts:
327
+ if part.start <= sent_start and sent_end <= part.end:
328
+ part_id = int(part.label)
329
+
330
+ pos_tags = []
331
+ if pos_tag_labels is not None:
332
+ pos_tags.extend(
333
+ [
334
+ pos_tag_labels.str2int(pos_tag)
335
+ for pos_tag in document.pos_tags[sent_start:sent_end]
336
+ ]
337
+ )
338
+ if pos_tag_labels.int2str is None:
339
+ raise ValueError("pos_tag_labels.str2int is None.")
340
+ else:
341
+ pos_tags = document.pos_tags[sent_start:sent_end]
342
+
343
+ example_sentence = {
344
+ "part_id": part_id,
345
+ "words": list(document.tokens[sent_start:sent_end]),
346
+ "pos_tags": pos_tags,
347
+ "parse_tree": document.parse_trees[idx].label,
348
+ "predicate_lemmas": predicate_lemmas,
349
+ "predicate_framenet_ids": predicate_framenet_ids,
350
+ "word_senses": word_senses,
351
+ "speaker": document.speakers[idx].label,
352
+ "named_entities": named_entities,
353
+ "srl_frames": srl_frames,
354
+ "coref_spans": coref_spans,
355
+ }
356
+
357
+ example["sentences"].append(example_sentence)
358
+
359
+ return example
360
+
361
+
362
+ def convert_to_text_document_with_labeled_spans_and_labeled_partitions(
363
  doc: Conll2012OntonotesV5Document,
364
  token_separator: str = " ",
365
+ ) -> TextDocumentWithLabeledSpansAndLabeledPartitions:
366
  start = 0
367
  token_offsets: List[Tuple[int, int]] = []
368
  for token in doc.tokens:
 
388
  char_offset_sentence = LabeledSpan(start=char_start, end=char_end, label="sentence")
389
  sentence_map[(sentence.start, sentence.end)] = char_offset_sentence
390
 
391
+ new_doc = TextDocumentWithLabeledSpansAndLabeledPartitions(text=text, id=doc.id)
392
+ new_doc.labeled_spans.extend(entity_map.values())
393
+ new_doc.labeled_partitions.extend(sentence_map.values())
394
 
395
  return new_doc
396
 
 
410
  assert conll_version in ["v4", "v12"]
411
  if conll_version == "v12":
412
  assert language == "english"
413
+ super().__init__(
414
  name=f"{language}_{conll_version}",
415
  description=f"{conll_version} of CoNLL formatted OntoNotes dataset for {language}.",
416
  version=datasets.Version("1.0.0"), # hf dataset script version
 
420
  self.conll_version = conll_version
421
 
422
 
423
+ class Conll2012Ontonotesv5(GeneratorBasedBuilder):
424
  DOCUMENT_TYPE = Conll2012OntonotesV5Document
425
 
426
  DOCUMENT_CONVERTERS = {
427
+ TextDocumentWithLabeledSpansAndLabeledPartitions: convert_to_text_document_with_labeled_spans_and_labeled_partitions
428
  }
429
 
430
+ BASE_DATASET_PATH = "conll2012_ontonotesv5"
431
+ BASE_DATASET_REVISION = "1161216f7e7185a4b2f4d0a4e0734dc7919dfa15"
432
 
433
  BUILDER_CONFIGS = [
434
  Conll2012OntonotesV5Config(
 
446
  def _generate_document_kwargs(self, dataset):
447
  pos_tags_feature = dataset.features["sentences"][0]["pos_tags"].feature
448
  return dict(
449
+ entity_labels=dataset.features["sentences"][0]["named_entities"].feature,
450
+ pos_tag_labels=(
451
+ pos_tags_feature if isinstance(pos_tags_feature, datasets.ClassLabel) else None
452
+ ),
453
  )
454
 
455
+ def _generate_document(self, example, **document_kwargs):
456
+ return example_to_document(example, **document_kwargs)
 
 
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ pie-datasets>=0.10.11,<0.11.0
2
+ pie-modules>=0.15.9,<0.16.0