schlevik commited on
Commit ·
56aff8b
1
Parent(s): ffb662c
change the bigbio text format to correct text
Browse files
essai.py
CHANGED
|
@@ -128,95 +128,122 @@ class ESSAI(datasets.GeneratorBasedBuilder):
|
|
| 128 |
|
| 129 |
def _generate_examples(self, datadir):
|
| 130 |
key = 0
|
| 131 |
-
for file in ["ESSAI_neg.txt", "ESSAI_spec.txt"]:
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
|
| 161 |
-
|
| 162 |
-
|
| 163 |
-
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
|
| 168 |
-
|
| 169 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 170 |
}
|
| 171 |
-
|
| 172 |
-
|
| 173 |
-
|
| 174 |
-
|
| 175 |
-
|
| 176 |
-
|
| 177 |
"id": key,
|
| 178 |
-
"
|
| 179 |
-
"text":
|
| 180 |
-
"
|
| 181 |
-
|
| 182 |
-
key += 1
|
| 183 |
-
elif self.config.schema == "bigbio_kb":
|
| 184 |
-
for doc_id in set(dic["id_docs"]):
|
| 185 |
-
idces = np.argwhere(dic["id_docs"] == doc_id)[:, 0]
|
| 186 |
-
text = [dic["words"][id] for id in idces]
|
| 187 |
-
POS_tags_ = [dic["POS_tags"][id] for id in idces]
|
| 188 |
-
|
| 189 |
-
data = {
|
| 190 |
-
"id": str(key),
|
| 191 |
-
"document_id": doc_id,
|
| 192 |
-
"passages": [],
|
| 193 |
-
"entities": [],
|
| 194 |
-
"relations": [],
|
| 195 |
-
"events": [],
|
| 196 |
-
"coreferences": [],
|
| 197 |
}
|
|
|
|
| 198 |
key += 1
|
| 199 |
|
| 200 |
-
|
| 201 |
-
{
|
| 202 |
-
"id": str(key + i),
|
| 203 |
-
"type": "sentence",
|
| 204 |
-
"text": [text[i]],
|
| 205 |
-
"offsets": [[i, i + 1]],
|
| 206 |
-
}
|
| 207 |
-
for i in range(len(text))
|
| 208 |
-
]
|
| 209 |
-
key += len(text)
|
| 210 |
-
|
| 211 |
-
for i in range(len(text)):
|
| 212 |
-
entity = {
|
| 213 |
-
"id": key,
|
| 214 |
-
"type": "POS_tag",
|
| 215 |
-
"text": [POS_tags_[i]],
|
| 216 |
-
"offsets": [[i, i + 1]],
|
| 217 |
-
"normalized": [],
|
| 218 |
-
}
|
| 219 |
-
data["entities"].append(entity)
|
| 220 |
-
key += 1
|
| 221 |
-
|
| 222 |
-
yield key, data
|
|
|
|
| 128 |
|
| 129 |
def _generate_examples(self, datadir):
|
| 130 |
key = 0
|
| 131 |
+
# for file in ["ESSAI_neg.txt", "ESSAI_spec.txt"]:
|
| 132 |
+
filepath = os.path.join(datadir, "ESSAI_neg.txt")
|
| 133 |
+
|
| 134 |
+
filepath2 = os.path.join(datadir, 'ESSAI_spec.txt')
|
| 135 |
+
# label = "negation" if "neg" in file else "speculation"
|
| 136 |
+
id_docs = []
|
| 137 |
+
id_docs_2 = []
|
| 138 |
+
id_words = []
|
| 139 |
+
words = []
|
| 140 |
+
lemmas = []
|
| 141 |
+
POS_tags = []
|
| 142 |
+
NER_tags = []
|
| 143 |
+
NER_tags_2 = []
|
| 144 |
+
|
| 145 |
+
with open(filepath) as f:
|
| 146 |
+
for line in f.readlines():
|
| 147 |
+
line_content = line.split("\t")
|
| 148 |
+
if len(line_content) > 1:
|
| 149 |
+
id_docs.append(line_content[0])
|
| 150 |
+
id_words.append(line_content[1])
|
| 151 |
+
words.append(line_content[2])
|
| 152 |
+
lemmas.append(line_content[3])
|
| 153 |
+
POS_tags.append(line_content[4])
|
| 154 |
+
NER_tags.append(line_content[5].strip())
|
| 155 |
+
|
| 156 |
+
with open(filepath2) as f:
|
| 157 |
+
for line in f.readlines():
|
| 158 |
+
line_content = line.split("\t")
|
| 159 |
+
if len(line_content) > 1:
|
| 160 |
+
id_docs_2.append(line_content[0])
|
| 161 |
+
NER_tags_2.append(line_content[5].strip())
|
| 162 |
+
|
| 163 |
+
dic = {
|
| 164 |
+
"id_docs": np.array(list(map(int, id_docs))),
|
| 165 |
+
"id_words": id_words,
|
| 166 |
+
"words": words,
|
| 167 |
+
"lemmas": lemmas,
|
| 168 |
+
"POS_tags": POS_tags,
|
| 169 |
+
"NER_tags": NER_tags
|
| 170 |
+
}
|
| 171 |
+
dic2 = {
|
| 172 |
+
"id_docs": np.array(list(map(int, id_docs_2))),
|
| 173 |
+
"NER_tags": NER_tags_2
|
| 174 |
+
}
|
| 175 |
+
if self.config.schema == "source":
|
| 176 |
+
for doc_id in set(dic["id_docs"]):
|
| 177 |
+
idces = np.argwhere(dic["id_docs"] == doc_id)[:, 0]
|
| 178 |
+
text = [dic["words"][id] for id in idces]
|
| 179 |
+
text_lemmas = [dic["lemmas"][id] for id in idces]
|
| 180 |
+
POS_tags_ = [dic["POS_tags"][id] for id in idces]
|
| 181 |
+
yield key, {
|
| 182 |
+
"id": key,
|
| 183 |
+
"document_id": doc_id,
|
| 184 |
+
"text": text,
|
| 185 |
+
"lemmas": text_lemmas,
|
| 186 |
+
"POS_tags": POS_tags_,
|
| 187 |
+
"labels": [],
|
| 188 |
+
}
|
| 189 |
+
key += 1
|
| 190 |
+
elif self.config.schema == "bigbio_text":
|
| 191 |
+
for doc_id in set(dic["id_docs"]):
|
| 192 |
+
idces = np.argwhere(dic["id_docs"] == doc_id)[:, 0]
|
| 193 |
+
idces_2 = np.argwhere(dic2["id_docs"] == doc_id)[:, 0]
|
| 194 |
+
|
| 195 |
+
text = " ".join([dic["words"][id] for id in idces])
|
| 196 |
+
label_tokens = [dic["NER_tags"][id] for id in idces]
|
| 197 |
+
label2_tokens = [dic2["NER_tags"][id] for id in idces_2]
|
| 198 |
+
label_ = []
|
| 199 |
+
if not all(l == '***' for l in label_tokens):
|
| 200 |
+
label_.append("negation")
|
| 201 |
+
if not all(l == '***' for l in label2_tokens):
|
| 202 |
+
label_.append("speculation")
|
| 203 |
+
yield key, {
|
| 204 |
+
"id": key,
|
| 205 |
+
"document_id": doc_id,
|
| 206 |
+
"text": text,
|
| 207 |
+
"labels": label_,
|
| 208 |
+
}
|
| 209 |
+
key += 1
|
| 210 |
+
elif self.config.schema == "bigbio_kb":
|
| 211 |
+
for doc_id in set(dic["id_docs"]):
|
| 212 |
+
idces = np.argwhere(dic["id_docs"] == doc_id)[:, 0]
|
| 213 |
+
text = [dic["words"][id] for id in idces]
|
| 214 |
+
POS_tags_ = [dic["POS_tags"][id] for id in idces]
|
| 215 |
+
|
| 216 |
+
data = {
|
| 217 |
+
"id": str(key),
|
| 218 |
+
"document_id": doc_id,
|
| 219 |
+
"passages": [],
|
| 220 |
+
"entities": [],
|
| 221 |
+
"relations": [],
|
| 222 |
+
"events": [],
|
| 223 |
+
"coreferences": [],
|
| 224 |
+
}
|
| 225 |
+
key += 1
|
| 226 |
+
|
| 227 |
+
data["passages"] = [
|
| 228 |
+
{
|
| 229 |
+
"id": str(key + i),
|
| 230 |
+
"type": "sentence",
|
| 231 |
+
"text": [text[i]],
|
| 232 |
+
"offsets": [[i, i + 1]],
|
| 233 |
}
|
| 234 |
+
for i in range(len(text))
|
| 235 |
+
]
|
| 236 |
+
key += len(text)
|
| 237 |
+
|
| 238 |
+
for i in range(len(text)):
|
| 239 |
+
entity = {
|
| 240 |
"id": key,
|
| 241 |
+
"type": "POS_tag",
|
| 242 |
+
"text": [POS_tags_[i]],
|
| 243 |
+
"offsets": [[i, i + 1]],
|
| 244 |
+
"normalized": [],
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 245 |
}
|
| 246 |
+
data["entities"].append(entity)
|
| 247 |
key += 1
|
| 248 |
|
| 249 |
+
yield key, data
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|