Dorothydu's picture
Add files using upload-large-folder tool
8f79b61 verified
from transformers import AutoTokenizer, BertForSequenceClassification, Trainer, TrainingArguments, BertConfig, BertTokenizer
from torch.utils.data import DataLoader
from data_loader.load_data import *
import pandas as pd
import torch
import pickle as pickle
import numpy as np
import argparse
from tqdm import tqdm
from config import YamlConfigManager
from pprint import pprint
from model.model import MultilabeledSequenceModel
def load_state(model_path):
# model = EfficientNet_b0(6, pretrained=False)
try: # single GPU model_file
state_dict = torch.load(model_path)
# model.load_state_dict(state_dict, strict=True)
except: # multi GPU model_file
state_dict = torch.load(model_path)
state_dict = {k[7:] if k.startswith('module.') else k: state_dict[k] for k in state_dict.keys()}
return state_dict
def inference(tokenized_sent, device, states, tokenizer_len, cfg):
dataloader = DataLoader(tokenized_sent, batch_size=64, shuffle=False)
probs = []
for data in tqdm(dataloader):
avg_preds = []
for state in states:
# model_dir = cfg.values.train_args.output_dir + f'/{k + 1}fold/checkpoint-{cfg.values.test_args[str(k + 1)]}'
model = MultilabeledSequenceModel(cfg.values.model_name, 42, tokenizer_len, 0.0)
model.load_state_dict(state)
model.eval()
model.to(device)
with torch.no_grad():
outputs = model(
input_ids=data['input_ids'].to(device),
attention_mask=data['attention_mask'].to(device),
token_type_ids=data['token_type_ids'].to(device)
)
avg_preds.append(outputs.softmax(1).to('cpu').numpy())
avg_preds = np.mean(avg_preds, axis=0)
probs.append(avg_preds)
probs = np.concatenate(probs)
return probs
def load_test_dataset(dataset_dir, tokenizer):
test_dataset = load_data(dataset_dir)
test_label = test_dataset['label'].values
# tokenizing dataset
tokenized_test = tokenized_dataset(test_dataset, tokenizer)
return tokenized_test, test_label
def main(cfg):
"""
์ฃผ์–ด์ง„ dataset tsv ํŒŒ์ผ๊ณผ ๊ฐ™์€ ํ˜•ํƒœ์ผ ๊ฒฝ์šฐ inference ๊ฐ€๋Šฅํ•œ ์ฝ”๋“œ์ž…๋‹ˆ๋‹ค.
"""
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# load tokenizer
MODEL_NAME = cfg.values.model_name
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
# tokenizer.add_special_tokens({'additional_special_tokens':['[ENT1]', '[ENT2]']})
# load test datset
test_dataset_dir = "/opt/ml/input/data/test/test.tsv"
test_dataset, test_label = load_test_dataset(test_dataset_dir, tokenizer)
test_dataset = RE_Dataset(test_dataset, test_label)
states = [load_state(f'./model_{fold}.bin') for fold in range(5)]
probs = inference(test_dataset, device, states, len(tokenizer), cfg)
# make csv file with predicted answer
# ์•„๋ž˜ directory์™€ columns์˜ ํ˜•ํƒœ๋Š” ์ง€์ผœ์ฃผ์‹œ๊ธฐ ๋ฐ”๋ž๋‹ˆ๋‹ค.
pred_answer = np.argmax(probs, axis=-1)
output = pd.DataFrame(pred_answer, columns=['pred'])
output.to_csv('/opt/ml/submission.csv', index=False)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config_file_path', type=str, default='/opt/ml/code/config.yml')
parser.add_argument('--config', type=str, default='base')
args = parser.parse_args()
cfg = YamlConfigManager(args.config_file_path, args.config)
pprint(cfg.values)
print('\n')
# model dir
main(cfg)