redhijosbello commited on
Commit
f28420f
·
1 Parent(s): e5d146b

files uploaded

Browse files
Files changed (4) hide show
  1. dev.conll +0 -0
  2. test.conll +0 -0
  3. train.conll +0 -0
  4. wl.py +116 -0
dev.conll ADDED
The diff for this file is too large to render. See raw diff
 
test.conll ADDED
The diff for this file is too large to render. See raw diff
 
train.conll ADDED
The diff for this file is too large to render. See raw diff
 
wl.py ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import datasets
3
+
4
+
5
+ logger = datasets.logging.get_logger(__name__)
6
+
7
+
8
+ _LICENSE = "Creative Commons Attribution 4.0 International"
9
+
10
+ _VERSION = "1.1.0"
11
+
12
+ _URL = "https://huggingface.co/datasets/plncmm/wl/resolve/main/"
13
+ _TRAINING_FILE = "train.conll"
14
+ _DEV_FILE = "dev.conll"
15
+ _TEST_FILE = "test.conll"
16
+
17
+ class WLConfig(datasets.BuilderConfig):
18
+ """BuilderConfig for WL dataset."""
19
+
20
+ def __init__(self, **kwargs):
21
+ super(WLConfig, self).__init__(**kwargs)
22
+
23
+
24
+ class WL(datasets.GeneratorBasedBuilder):
25
+ """WL dataset."""
26
+
27
+ BUILDER_CONFIGS = [
28
+ WLConfig(
29
+ name="WL",
30
+ version=datasets.Version(_VERSION),
31
+ description="WL dataset"),
32
+ ]
33
+
34
+ def _info(self):
35
+ return datasets.DatasetInfo(
36
+ features=datasets.Features(
37
+ {
38
+ "id": datasets.Value("string"),
39
+ "tokens": datasets.Sequence(datasets.Value("string")),
40
+ "ner_tags": datasets.Sequence(
41
+ datasets.features.ClassLabel(
42
+ names=[
43
+ "O",
44
+ "B-Disease",
45
+ "I-Disease",
46
+ "B-Medication",
47
+ "I-Medication",
48
+ "B-Abbreviation",
49
+ "I-Abbreviation",
50
+ "B-Body_Part",
51
+ "I-Body_Part",
52
+ "B-Family_Member",
53
+ "I-Family_Member",
54
+ "B-Sign_or_Symptom",
55
+ "I-Sign_or_Symptom",
56
+ "B-Laboratory_or_Test_Result",
57
+ "I-Laboratory_or_Test_Result",
58
+ "B-Clinical_Finding",
59
+ "I-Clinical_Finding",
60
+ "B-Diagnostic_Procedure",
61
+ "I-Diagnostic_Procedure",
62
+ "B-Laboratory_Procedure",
63
+ "I-Laboratory_Procedure",
64
+ "B-Therapeutic_Procedure",
65
+ "I-Therapeutic_Procedure",
66
+ ]
67
+ )
68
+ ),
69
+ }
70
+ ),
71
+ supervised_keys=None,
72
+ )
73
+
74
+ def _split_generators(self, dl_manager):
75
+ """Returns SplitGenerators."""
76
+ urls_to_download = {
77
+ "train": f"{_URL}{_TRAINING_FILE}",
78
+ "dev": f"{_URL}{_DEV_FILE}",
79
+ "test": f"{_URL}{_TEST_FILE}",
80
+ }
81
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
82
+
83
+ return [
84
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
85
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
86
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
87
+ ]
88
+
89
+ def _generate_examples(self, filepath):
90
+ logger.info("⏳ Generating examples from = %s", filepath)
91
+ with open(filepath, encoding="utf-8") as f:
92
+ guid = 0
93
+ tokens = []
94
+ pos_tags = []
95
+ ner_tags = []
96
+ for line in f:
97
+ if line == "\n":
98
+ if tokens:
99
+ yield guid, {
100
+ "id": str(guid),
101
+ "tokens": tokens,
102
+ "ner_tags": ner_tags,
103
+ }
104
+ guid += 1
105
+ tokens = []
106
+ ner_tags = []
107
+ else:
108
+ splits = line.split(" ")
109
+ tokens.append(splits[0])
110
+ ner_tags.append(splits[-1].rstrip())
111
+ # last example
112
+ yield guid, {
113
+ "id": str(guid),
114
+ "tokens": tokens,
115
+ "ner_tags": ner_tags,
116
+ }