Dorothydu's picture
Add files using upload-large-folder tool
8f79b61 verified
import pickle as pickle
import os
import pandas as pd
import torch
# Dataset ๊ตฌ์„ฑ.
class RE_Dataset(torch.utils.data.Dataset):
def __init__(self, tokenized_dataset, labels):
self.tokenized_dataset = tokenized_dataset
self.labels = labels
def __getitem__(self, idx):
item = {key: val[idx].clone().detach() for key, val in self.tokenized_dataset.items()}
item['labels'] = torch.tensor(self.labels[idx])
return item
def __len__(self):
return len(self.labels)
# ์ฒ˜์Œ ๋ถˆ๋Ÿฌ์˜จ tsv ํŒŒ์ผ์„ ์›ํ•˜๋Š” ํ˜•ํƒœ์˜ DataFrame์œผ๋กœ ๋ณ€๊ฒฝ ์‹œ์ผœ์ค๋‹ˆ๋‹ค.
# ๋ณ€๊ฒฝํ•œ DataFrame ํ˜•ํƒœ๋Š” baseline code description ์ด๋ฏธ์ง€๋ฅผ ์ฐธ๊ณ ํ•ด์ฃผ์„ธ์š”.
def preprocessing_dataset(dataset, label_type):
label = []
for i in dataset[8]:
if i == 'blind':
label.append(100)
else:
label.append(label_type[i])
out_dataset = pd.DataFrame({'sentence':dataset[1],'entity_01':dataset[2],'entity_02':dataset[5],'label':label,})
return out_dataset
# tsv ํŒŒ์ผ์„ ๋ถˆ๋Ÿฌ์˜ต๋‹ˆ๋‹ค.
def load_data(dataset_dir):
# load label_type, classes
with open('/opt/ml/input/data/label_type.pkl', 'rb') as f:
label_type = pickle.load(f)
# load dataset
dataset = pd.read_csv(dataset_dir, delimiter='\t', header=None)
# preprecessing dataset
dataset = preprocessing_dataset(dataset, label_type)
return dataset
# bert input์„ ์œ„ํ•œ tokenizing.
# tip! ๋‹ค์–‘ํ•œ ์ข…๋ฅ˜์˜ tokenizer์™€ special token๋“ค์„ ํ™œ์šฉํ•˜๋Š” ๊ฒƒ์œผ๋กœ๋„ ์ƒˆ๋กœ์šด ์‹œ๋„๋ฅผ ํ•ด๋ณผ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค.
# baseline code์—์„œ๋Š” 2๊ฐ€์ง€ ๋ถ€๋ถ„์„ ํ™œ์šฉํ–ˆ์Šต๋‹ˆ๋‹ค.
def tokenized_dataset(dataset, tokenizer):
concat_entity = []
for e01, e02 in zip(dataset['entity_01'], dataset['entity_02']):
temp = ''
temp = e01 + '[SEP]' + e02
concat_entity.append(temp)
tokenized_sentences = tokenizer(
concat_entity,
list(dataset['sentence']),
return_tensors="pt",
padding=True,
truncation=True,
max_length=100,
add_special_tokens=True,
)
return tokenized_sentences