Update MANTRAGSC.py
Browse files- MANTRAGSC.py +39 -0
MANTRAGSC.py
CHANGED
|
@@ -85,6 +85,39 @@ _DATASET_TYPES = {
|
|
| 85 |
"patents": "Patent",
|
| 86 |
}
|
| 87 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 88 |
@dataclass
|
| 89 |
class DrBenchmarkConfig(datasets.BuilderConfig):
|
| 90 |
name: str = None
|
|
@@ -134,6 +167,11 @@ class MANTRAGSC(datasets.GeneratorBasedBuilder):
|
|
| 134 |
names = names,
|
| 135 |
)
|
| 136 |
),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 137 |
}
|
| 138 |
)
|
| 139 |
|
|
@@ -258,6 +296,7 @@ class MANTRAGSC(datasets.GeneratorBasedBuilder):
|
|
| 258 |
"id": u["@id"],
|
| 259 |
"tokens": [t["token"] for t in tokens],
|
| 260 |
"ner_tags": [n[0] for n in ner_tags],
|
|
|
|
| 261 |
}
|
| 262 |
|
| 263 |
all_res.append(obj)
|
|
|
|
| 85 |
"patents": "Patent",
|
| 86 |
}
|
| 87 |
|
| 88 |
+
class StringIndex:
|
| 89 |
+
|
| 90 |
+
def __init__(self, vocab):
|
| 91 |
+
|
| 92 |
+
self.vocab_struct = {}
|
| 93 |
+
|
| 94 |
+
print("Start building the index!")
|
| 95 |
+
for t in vocab:
|
| 96 |
+
|
| 97 |
+
if len(t) == 0:
|
| 98 |
+
continue
|
| 99 |
+
|
| 100 |
+
# Index terms by their first letter and length
|
| 101 |
+
key = (t[0], len(t))
|
| 102 |
+
|
| 103 |
+
if (key in self.vocab_struct) == False:
|
| 104 |
+
self.vocab_struct[key] = []
|
| 105 |
+
|
| 106 |
+
self.vocab_struct[key].append(t)
|
| 107 |
+
|
| 108 |
+
print("Finished building the index!")
|
| 109 |
+
|
| 110 |
+
def find(self, t):
|
| 111 |
+
|
| 112 |
+
key = (t[0], len(t))
|
| 113 |
+
|
| 114 |
+
if (key in self.vocab_struct) == False:
|
| 115 |
+
return "is_oov"
|
| 116 |
+
|
| 117 |
+
return "is_not_oov" if t in self.vocab_struct[key] else "is_oov"
|
| 118 |
+
|
| 119 |
+
_VOCAB = StringIndex(vocab=open("./vocabulary_nachos_lowercased.txt","r").read().split("\n"))
|
| 120 |
+
|
| 121 |
@dataclass
|
| 122 |
class DrBenchmarkConfig(datasets.BuilderConfig):
|
| 123 |
name: str = None
|
|
|
|
| 167 |
names = names,
|
| 168 |
)
|
| 169 |
),
|
| 170 |
+
"is_oov": datasets.Sequence(
|
| 171 |
+
datasets.features.ClassLabel(
|
| 172 |
+
names=['is_not_oov', 'is_oov'],
|
| 173 |
+
),
|
| 174 |
+
),
|
| 175 |
}
|
| 176 |
)
|
| 177 |
|
|
|
|
| 296 |
"id": u["@id"],
|
| 297 |
"tokens": [t["token"] for t in tokens],
|
| 298 |
"ner_tags": [n[0] for n in ner_tags],
|
| 299 |
+
"is_oov": [_VOCAB.find(t["token"].lower()) for t in tokens],
|
| 300 |
}
|
| 301 |
|
| 302 |
all_res.append(obj)
|