christophalt commited on
Commit
360aef6
·
1 Parent(s): aa6a999

Upload all datasets to hub

Browse files

Commit from https://github.com/huggingface/datasets/pie/commit/44e2b49f756ae55906addbd4cbcb339698ea0e7c

Files changed (2) hide show
  1. dummy/wnut_17/1.0.0/dummy_data.zip +3 -0
  2. wnut_17.py +57 -0
dummy/wnut_17/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c7d6728b17fbc087bdd3825a08c61501697ccf7d9b78a164a1c601b4e0399598
3
+ size 1637
wnut_17.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+
3
+ import datasets
4
+ import pytorch_ie.data.builder
5
+ from pytorch_ie.annotations import LabeledSpan
6
+ from pytorch_ie.core import AnnotationList, annotation_field
7
+ from pytorch_ie.documents import TextDocument
8
+ from pytorch_ie.utils.span import tokens_and_tags_to_text_and_labeled_spans
9
+
10
+
11
+ class WNUT_17Config(datasets.BuilderConfig):
12
+ """The WNUT 17 Emerging Entities Dataset."""
13
+
14
+ def __init__(self, **kwargs):
15
+ """BuilderConfig for WNUT 17.
16
+ Args:
17
+ **kwargs: keyword arguments forwarded to super.
18
+ """
19
+ super().__init__(**kwargs)
20
+
21
+
22
+ @dataclass
23
+ class WNUT17Document(TextDocument):
24
+ entities: AnnotationList[LabeledSpan] = annotation_field(target="text")
25
+
26
+
27
+ class WNUT17(pytorch_ie.data.builder.GeneratorBasedBuilder):
28
+ """The WNUT 17 Emerging Entities Dataset."""
29
+
30
+ DOCUMENT_TYPE = WNUT17Document
31
+
32
+ BASE_DATASET_PATH = "wnut_17"
33
+
34
+ BUILDER_CONFIGS = [
35
+ WNUT_17Config(
36
+ name="wnut_17",
37
+ version=datasets.Version("1.0.0"),
38
+ description="The WNUT 17 Emerging Entities Dataset",
39
+ ),
40
+ ]
41
+
42
+ def _generate_document_kwargs(self, dataset):
43
+ return {"int_to_str": dataset.features["ner_tags"].feature.int2str}
44
+
45
+ def _generate_document(self, example, int_to_str):
46
+ doc_id = example["id"]
47
+ tokens = example["tokens"]
48
+ ner_tags = [int_to_str(tag) for tag in example["ner_tags"]]
49
+
50
+ text, ner_spans = tokens_and_tags_to_text_and_labeled_spans(tokens=tokens, tags=ner_tags)
51
+
52
+ document = WNUT17Document(text=text, id=doc_id)
53
+
54
+ for span in sorted(ner_spans, key=lambda span: span.start):
55
+ document.entities.append(span)
56
+
57
+ return document