Datasets:
JiajunJerryHuang commited on
Commit ·
30903b5
1
Parent(s): d9814f8
first
Browse files- test/test.py +290 -0
test/test.py
ADDED
|
@@ -0,0 +1,290 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from torch.random import seed
|
| 3 |
+
import numpy as np
|
| 4 |
+
import ipdb
|
| 5 |
+
from transformers import RobertaTokenizerFast
|
| 6 |
+
from model.RobertaRetriever import RobertaRetriever
|
| 7 |
+
from model.RobertaReader import RobertaReader
|
| 8 |
+
import argparse
|
| 9 |
+
import jsonlines
|
| 10 |
+
from tqdm import tqdm
|
| 11 |
+
import string
|
| 12 |
+
import json
|
| 13 |
+
import re
|
| 14 |
+
|
| 15 |
+
parser = argparse.ArgumentParser()
|
| 16 |
+
parser.add_argument("--Hotpot_test_path", type=str, default="hotpot_dev_distractor_v1.json")
|
| 17 |
+
parser.add_argument("--pred_filename", type=str, default="pred.json")
|
| 18 |
+
parser.add_argument("--re_checkpoint", type=str)
|
| 19 |
+
parser.add_argument("--qa_checkpoint", type=str)
|
| 20 |
+
args = parser.parse_args()
|
| 21 |
+
|
| 22 |
+
device = "cuda"
|
| 23 |
+
seed = 42
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
####################
|
| 27 |
+
def set_seed(seed):
|
| 28 |
+
np.random.seed(seed)
|
| 29 |
+
torch.manual_seed(seed)
|
| 30 |
+
torch.cuda.manual_seed(seed)
|
| 31 |
+
torch.cuda.manual_seed_all(seed)
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
set_seed(seed)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def normalize_answer(s):
|
| 38 |
+
def remove_articles(text):
|
| 39 |
+
return re.sub(r"\b(a|an|the)\b", " ", text)
|
| 40 |
+
|
| 41 |
+
def white_space_fix(text):
|
| 42 |
+
return " ".join(text.split())
|
| 43 |
+
|
| 44 |
+
def remove_punc(text):
|
| 45 |
+
exclude = set(string.punctuation)
|
| 46 |
+
return "".join(ch for ch in text if ch not in exclude)
|
| 47 |
+
|
| 48 |
+
def lower(text):
|
| 49 |
+
return text.lower()
|
| 50 |
+
|
| 51 |
+
return white_space_fix(remove_articles(remove_punc(lower(s))))
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
class HotpotRETestPipe:
|
| 55 |
+
def __init__(self, tokenizer, file_path):
|
| 56 |
+
self.max_length = 512
|
| 57 |
+
self.tokenizer = tokenizer
|
| 58 |
+
self.file_path = file_path
|
| 59 |
+
self.question_ids_list, self.question_length_list = [], []
|
| 60 |
+
self.doc_length_list, self.document_ids_list, self.doc_num_list = [], [], []
|
| 61 |
+
|
| 62 |
+
def load_data(self):
|
| 63 |
+
fields = ["_id", "question", "context"]
|
| 64 |
+
datadict = {}
|
| 65 |
+
for field in fields:
|
| 66 |
+
datadict[field] = []
|
| 67 |
+
with jsonlines.open(self.file_path) as f:
|
| 68 |
+
for data in f:
|
| 69 |
+
for i in range(len(data)):
|
| 70 |
+
for field in fields:
|
| 71 |
+
datadict[field].append(data[i][field])
|
| 72 |
+
self.datadict = datadict
|
| 73 |
+
|
| 74 |
+
def question_tokenize(self, question):
|
| 75 |
+
tokenized_question = self.tokenizer(question, truncation=True, max_length=self.max_length)
|
| 76 |
+
question_ids = tokenized_question["input_ids"]
|
| 77 |
+
|
| 78 |
+
return question_ids, len(question_ids)
|
| 79 |
+
|
| 80 |
+
def document_tokenize(self, context):
|
| 81 |
+
doc_length = []
|
| 82 |
+
document_ids = []
|
| 83 |
+
doc_num = len(context)
|
| 84 |
+
for i in range(doc_num):
|
| 85 |
+
tokenized_document = self.tokenizer(
|
| 86 |
+
"".join([context[i][0]] + context[i][1]),
|
| 87 |
+
truncation=True,
|
| 88 |
+
max_length=self.max_length,
|
| 89 |
+
)
|
| 90 |
+
doc_length.append(len(tokenized_document["input_ids"][1:])) # without [CLS]
|
| 91 |
+
document_ids.append(tokenized_document["input_ids"][1:]) # without [CLS]
|
| 92 |
+
|
| 93 |
+
return doc_length, document_ids, doc_num
|
| 94 |
+
|
| 95 |
+
def process(self):
|
| 96 |
+
self.load_data()
|
| 97 |
+
for qes in self.datadict["question"]:
|
| 98 |
+
question_ids, question_length = self.question_tokenize(qes)
|
| 99 |
+
self.question_ids_list.append(question_ids)
|
| 100 |
+
self.question_length_list.append(question_length)
|
| 101 |
+
for doc in self.datadict["context"]:
|
| 102 |
+
doc_length, document_ids, doc_num = self.document_tokenize(doc)
|
| 103 |
+
self.doc_length_list.append(doc_length)
|
| 104 |
+
self.document_ids_list.append(document_ids)
|
| 105 |
+
self.doc_num_list.append(doc_num)
|
| 106 |
+
return (
|
| 107 |
+
self.datadict["_id"],
|
| 108 |
+
self.question_ids_list,
|
| 109 |
+
self.question_length_list,
|
| 110 |
+
self.document_ids_list,
|
| 111 |
+
self.doc_length_list,
|
| 112 |
+
self.doc_num_list,
|
| 113 |
+
)
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
class HotpotQATestPipe:
|
| 117 |
+
def __init__(self, tokenizer, file_path, selected_pair_dict):
|
| 118 |
+
self.max_length = 512
|
| 119 |
+
self.tokenizer = tokenizer
|
| 120 |
+
self.file_path = file_path
|
| 121 |
+
self.selected_pair_dict = selected_pair_dict
|
| 122 |
+
self.title_dict = {}
|
| 123 |
+
self.DOC1_SEP_num_dict = {}
|
| 124 |
+
self.input_ids_list, self.attention_mask_list, self.SEP_index_list = [], [], []
|
| 125 |
+
# SEP
|
| 126 |
+
self.SEP = "</e>"
|
| 127 |
+
self.SEP_id = tokenizer.convert_tokens_to_ids(self.SEP)
|
| 128 |
+
# DOC
|
| 129 |
+
self.DOC = "</d>"
|
| 130 |
+
self.DOC_id = tokenizer.convert_tokens_to_ids(self.DOC)
|
| 131 |
+
|
| 132 |
+
def load_data(self):
|
| 133 |
+
fields = ["_id", "question", "context"]
|
| 134 |
+
datadict = {}
|
| 135 |
+
for field in fields:
|
| 136 |
+
datadict[field] = []
|
| 137 |
+
with jsonlines.open(self.file_path) as f:
|
| 138 |
+
for data in f:
|
| 139 |
+
for i in range(len(data)):
|
| 140 |
+
for field in fields:
|
| 141 |
+
datadict[field].append(data[i][field])
|
| 142 |
+
self.datadict = datadict
|
| 143 |
+
self.length = len(datadict["_id"])
|
| 144 |
+
|
| 145 |
+
def _tokenize(self):
|
| 146 |
+
for i in range(self.length):
|
| 147 |
+
_id = self.datadict["_id"][i]
|
| 148 |
+
question = self.datadict["question"][i]
|
| 149 |
+
context_list = self.datadict["context"][i]
|
| 150 |
+
doc1 = context_list[self.selected_pair_dict[_id][0][0]]
|
| 151 |
+
doc2 = context_list[self.selected_pair_dict[_id][0][1]]
|
| 152 |
+
self.title_dict[_id] = [doc1[0], doc2[0]]
|
| 153 |
+
self.DOC1_SEP_num_dict[_id] = len(doc1[1])
|
| 154 |
+
context1 = [self.DOC + " " + doc1[0]] + [self.SEP + " " + c for c in doc1[1]]
|
| 155 |
+
context2 = [self.DOC + " " + doc2[0]] + [self.SEP + " " + c for c in doc2[1]]
|
| 156 |
+
context = " ".join(context1 + context2)
|
| 157 |
+
output = self.tokenizer(
|
| 158 |
+
question,
|
| 159 |
+
context,
|
| 160 |
+
truncation=True,
|
| 161 |
+
max_length=self.max_length,
|
| 162 |
+
return_offsets_mapping=True,
|
| 163 |
+
)
|
| 164 |
+
self.input_ids_list.append(output["input_ids"])
|
| 165 |
+
self.attention_mask_list.append(output["attention_mask"])
|
| 166 |
+
|
| 167 |
+
def find_SEP(self, input_ids):
|
| 168 |
+
SEP_index = []
|
| 169 |
+
length_input_ids = len(input_ids)
|
| 170 |
+
for i in range(length_input_ids):
|
| 171 |
+
if input_ids[i] == self.SEP_id:
|
| 172 |
+
SEP_index.append(i)
|
| 173 |
+
return SEP_index
|
| 174 |
+
|
| 175 |
+
def process(self):
|
| 176 |
+
self.load_data()
|
| 177 |
+
self._tokenize()
|
| 178 |
+
for i in range(self.length):
|
| 179 |
+
self.SEP_index_list.append(self.find_SEP(self.input_ids_list[i]))
|
| 180 |
+
return (
|
| 181 |
+
self.datadict["_id"],
|
| 182 |
+
self.title_dict,
|
| 183 |
+
self.DOC1_SEP_num_dict,
|
| 184 |
+
self.input_ids_list,
|
| 185 |
+
self.attention_mask_list,
|
| 186 |
+
self.SEP_index_list,
|
| 187 |
+
)
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
def padding(input_list):
|
| 191 |
+
max_length = max([len(x) for x in input_list])
|
| 192 |
+
for i in range(len(input_list)):
|
| 193 |
+
input_list[i] = input_list[i] + [0] * (max_length - len(input_list[i]))
|
| 194 |
+
return input_list
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
def test_re():
|
| 198 |
+
re_tokenizer = RobertaTokenizerFast.from_pretrained("roberta-large")
|
| 199 |
+
re_model = RobertaRetriever.from_pretrained("roberta-large")
|
| 200 |
+
print("RE Checkpoint:", args.re_checkpoint, "Data:", args.Hotpot_test_path)
|
| 201 |
+
re_model.load_state_dict(torch.load(args.re_checkpoint))
|
| 202 |
+
re_model.to(device)
|
| 203 |
+
repipe = HotpotRETestPipe(tokenizer=re_tokenizer, file_path=args.Hotpot_test_path)
|
| 204 |
+
id_list, question_ids_list, question_length_list, document_ids_list, doc_length_list, doc_num_list = repipe.process()
|
| 205 |
+
selected_pair_dict = {}
|
| 206 |
+
length = len(id_list)
|
| 207 |
+
for i in tqdm(range(length), desc="RE Testing:"):
|
| 208 |
+
question_ids = torch.LongTensor([question_ids_list[i]]).to(device)
|
| 209 |
+
question_length = torch.LongTensor([question_length_list[i]]).to(device)
|
| 210 |
+
document_ids = torch.LongTensor([padding(document_ids_list[i])]).to(device)
|
| 211 |
+
doc_length = torch.LongTensor([doc_length_list[i]]).to(device)
|
| 212 |
+
doc_num = torch.LongTensor([doc_num_list[i]]).to(device)
|
| 213 |
+
|
| 214 |
+
re_model.eval()
|
| 215 |
+
with torch.no_grad():
|
| 216 |
+
re_output = re_model(
|
| 217 |
+
question_ids=question_ids,
|
| 218 |
+
document_ids=document_ids,
|
| 219 |
+
question_length=question_length,
|
| 220 |
+
doc_length=doc_length,
|
| 221 |
+
doc_num=doc_num,
|
| 222 |
+
)
|
| 223 |
+
selected_pair_dict[id_list[i]] = re_output["selected_pair"].cpu().tolist()
|
| 224 |
+
torch.cuda.empty_cache()
|
| 225 |
+
return selected_pair_dict
|
| 226 |
+
|
| 227 |
+
|
| 228 |
+
def test_qa(selected_pair_dict):
|
| 229 |
+
sp_pred = {}
|
| 230 |
+
answer_pred = {}
|
| 231 |
+
SEP = "</e>"
|
| 232 |
+
DOC = "</d>"
|
| 233 |
+
qa_tokenizer = RobertaTokenizerFast.from_pretrained("roberta-large")
|
| 234 |
+
qa_tokenizer.add_tokens([SEP, DOC])
|
| 235 |
+
qa_model = RobertaReader.from_pretrained("roberta-large")
|
| 236 |
+
qa_model.resize_token_embeddings(len(qa_tokenizer))
|
| 237 |
+
print("QA Checkpoint:", args.qa_checkpoint, "Data:", args.Hotpot_test_path)
|
| 238 |
+
qa_model.load_state_dict(torch.load(args.qa_checkpoint))
|
| 239 |
+
qa_model.to(device)
|
| 240 |
+
qapipe = HotpotQATestPipe(tokenizer=qa_tokenizer, file_path=args.Hotpot_test_path, selected_pair_dict=selected_pair_dict)
|
| 241 |
+
id_list, selected_title_dict, DOC1_SEP_num_dict, input_ids_list, attention_mask_list, SEP_index_list = qapipe.process()
|
| 242 |
+
length = len(id_list)
|
| 243 |
+
for i in tqdm(range(length), desc="QA Testing:"):
|
| 244 |
+
input_ids = torch.LongTensor([input_ids_list[i]]).to(device)
|
| 245 |
+
attention_mask = torch.LongTensor([attention_mask_list[i]]).to(device)
|
| 246 |
+
SEP_index = torch.LongTensor([SEP_index_list[i]]).to(device)
|
| 247 |
+
qa_model.eval()
|
| 248 |
+
with torch.no_grad():
|
| 249 |
+
qa_output = qa_model(input_ids=input_ids, attention_mask=attention_mask, sentence_index=SEP_index)
|
| 250 |
+
|
| 251 |
+
# Supporting Facts
|
| 252 |
+
sentence_predictions = qa_output["sentence_predictions"].cpu().tolist()[0]
|
| 253 |
+
title1 = selected_title_dict[id_list[i]][0]
|
| 254 |
+
title2 = selected_title_dict[id_list[i]][1]
|
| 255 |
+
sp = []
|
| 256 |
+
for s in range(len(sentence_predictions)):
|
| 257 |
+
if sentence_predictions[s] == 1:
|
| 258 |
+
if s < DOC1_SEP_num_dict[id_list[i]]:
|
| 259 |
+
sp.append([title1, s])
|
| 260 |
+
else:
|
| 261 |
+
sp.append([title2, s - DOC1_SEP_num_dict[id_list[i]]])
|
| 262 |
+
sp_pred[id_list[i]] = sp
|
| 263 |
+
|
| 264 |
+
# Answer
|
| 265 |
+
anstype = torch.argmax(qa_output["type_logits"]).item()
|
| 266 |
+
if anstype == 0:
|
| 267 |
+
answer_pred[id_list[i]] = "no"
|
| 268 |
+
elif anstype == 1:
|
| 269 |
+
answer_pred[id_list[i]] = "yes"
|
| 270 |
+
else:
|
| 271 |
+
start = torch.argmax(qa_output["start_logits"]).item()
|
| 272 |
+
end = torch.argmax(qa_output["end_logits"]).item()
|
| 273 |
+
span_id = input_ids_list[i][start : end + 1]
|
| 274 |
+
if span_id is None:
|
| 275 |
+
answer_pred[id_list[i]] = ""
|
| 276 |
+
else:
|
| 277 |
+
answer_pred[id_list[i]] = normalize_answer(qa_tokenizer.decode(span_id))
|
| 278 |
+
torch.cuda.empty_cache()
|
| 279 |
+
return sp_pred, answer_pred
|
| 280 |
+
|
| 281 |
+
|
| 282 |
+
def main():
|
| 283 |
+
selected_pair_dict = test_re()
|
| 284 |
+
sp_pred, answer_pred = test_qa(selected_pair_dict)
|
| 285 |
+
with open(args.pred_filename, "w", encoding="utf-8") as f:
|
| 286 |
+
json.dump({"answer": answer_pred, "sp": sp_pred}, f, ensure_ascii=False)
|
| 287 |
+
|
| 288 |
+
|
| 289 |
+
if __name__ == "__main__":
|
| 290 |
+
main()
|