code stringlengths 17 6.64M |
|---|
def _CalACC(model, dataloader):
model.eval()
correct = 0
label_list = []
pred_list = []
with torch.no_grad():
for (i_batch, data) in enumerate(dataloader):
'Prediction'
(batch_input_tokens, batch_labels, batch_speaker_tokens) = data
(batch_input_tokens, batch_labels) = (batch_input_tokens.cuda(), batch_labels.cuda())
pred_logits = model(batch_input_tokens, batch_speaker_tokens)
'Calculation'
pred_label = pred_logits.argmax(1).item()
true_label = batch_labels.item()
pred_list.append(pred_label)
label_list.append(true_label)
if (pred_label == true_label):
correct += 1
acc = (correct / len(dataloader))
return (acc, pred_list, label_list)
|
def _SaveModel(model, path):
if (not os.path.exists(path)):
os.makedirs(path)
torch.save(model.state_dict(), os.path.join(path, 'model.bin'))
|
def encode_right_truncated(text, tokenizer, max_length=511):
tokenized = tokenizer.tokenize(text)
truncated = tokenized[(- max_length):]
ids = tokenizer.convert_tokens_to_ids(truncated)
return ([tokenizer.cls_token_id] + ids)
|
def padding(ids_list, tokenizer):
max_len = 0
for ids in ids_list:
if (len(ids) > max_len):
max_len = len(ids)
pad_ids = []
for ids in ids_list:
pad_len = (max_len - len(ids))
add_ids = [tokenizer.pad_token_id for _ in range(pad_len)]
pad_ids.append((ids + add_ids))
return torch.tensor(pad_ids)
|
def encode_right_truncated_gpt(text, tokenizer, max_length=511):
tokenized = tokenizer.tokenize(text)
truncated = tokenized[(- max_length):]
ids = tokenizer.convert_tokens_to_ids(truncated)
return (ids + [tokenizer.cls_token_id])
|
def padding_gpt(ids_list, tokenizer):
max_len = 0
for ids in ids_list:
if (len(ids) > max_len):
max_len = len(ids)
pad_ids = []
for ids in ids_list:
pad_len = (max_len - len(ids))
add_ids = [tokenizer.pad_token_id for _ in range(pad_len)]
pad_ids.append((add_ids + ids))
return torch.tensor(pad_ids)
|
def make_batch_roberta(sessions):
(batch_input, batch_labels, batch_speaker_tokens) = ([], [], [])
for session in sessions:
data = session[0]
label_list = session[1]
(context_speaker, context, emotion, sentiment) = data
now_speaker = context_speaker[(- 1)]
speaker_utt_list = []
inputString = ''
for (turn, (speaker, utt)) in enumerate(zip(context_speaker, context)):
inputString += (('<s' + str((speaker + 1))) + '> ')
inputString += (utt + ' ')
if ((turn < (len(context_speaker) - 1)) and (speaker == now_speaker)):
speaker_utt_list.append(encode_right_truncated(utt, roberta_tokenizer))
concat_string = inputString.strip()
batch_input.append(encode_right_truncated(concat_string, roberta_tokenizer))
if (len(label_list) > 3):
label_ind = label_list.index(emotion)
else:
label_ind = label_list.index(sentiment)
batch_labels.append(label_ind)
batch_speaker_tokens.append(padding(speaker_utt_list, roberta_tokenizer))
batch_input_tokens = padding(batch_input, roberta_tokenizer)
batch_labels = torch.tensor(batch_labels)
return (batch_input_tokens, batch_labels, batch_speaker_tokens)
|
def make_batch_bert(sessions):
(batch_input, batch_labels, batch_speaker_tokens) = ([], [], [])
for session in sessions:
data = session[0]
label_list = session[1]
(context_speaker, context, emotion, sentiment) = data
now_speaker = context_speaker[(- 1)]
speaker_utt_list = []
inputString = ''
for (turn, (speaker, utt)) in enumerate(zip(context_speaker, context)):
inputString += (('<s' + str((speaker + 1))) + '> ')
inputString += (utt + ' ')
if ((turn < (len(context_speaker) - 1)) and (speaker == now_speaker)):
speaker_utt_list.append(encode_right_truncated(utt, bert_tokenizer))
concat_string = inputString.strip()
batch_input.append(encode_right_truncated(concat_string, bert_tokenizer))
if (len(label_list) > 3):
label_ind = label_list.index(emotion)
else:
label_ind = label_list.index(sentiment)
batch_labels.append(label_ind)
batch_speaker_tokens.append(padding(speaker_utt_list, bert_tokenizer))
batch_input_tokens = padding(batch_input, bert_tokenizer)
batch_labels = torch.tensor(batch_labels)
return (batch_input_tokens, batch_labels, batch_speaker_tokens)
|
def make_batch_gpt(sessions):
(batch_input, batch_labels, batch_speaker_tokens) = ([], [], [])
for session in sessions:
data = session[0]
label_list = session[1]
(context_speaker, context, emotion, sentiment) = data
now_speaker = context_speaker[(- 1)]
speaker_utt_list = []
inputString = ''
for (turn, (speaker, utt)) in enumerate(zip(context_speaker, context)):
inputString += (('<s' + str((speaker + 1))) + '> ')
inputString += (utt + ' ')
if ((turn < (len(context_speaker) - 1)) and (speaker == now_speaker)):
speaker_utt_list.append(encode_right_truncated_gpt(utt, gpt_tokenizer, max_length=511))
concat_string = inputString.strip()
batch_input.append(encode_right_truncated_gpt(concat_string, gpt_tokenizer, max_length=511))
if (len(label_list) > 3):
label_ind = label_list.index(emotion)
else:
label_ind = label_list.index(sentiment)
batch_labels.append(label_ind)
batch_speaker_tokens.append(padding_gpt(speaker_utt_list, gpt_tokenizer))
batch_input_tokens = padding_gpt(batch_input, gpt_tokenizer)
batch_labels = torch.tensor(batch_labels)
return (batch_input_tokens, batch_labels, batch_speaker_tokens)
|
class MELD_loader(Dataset):
def __init__(self, txt_file, dataclass):
self.dialogs = []
f = open(txt_file, 'r')
dataset = f.readlines()
f.close()
temp_speakerList = []
context = []
context_speaker = []
self.speakerNum = []
emodict = {'anger': 'anger', 'disgust': 'disgust', 'fear': 'fear', 'joy': 'joy', 'neutral': 'neutral', 'sadness': 'sad', 'surprise': 'surprise'}
self.sentidict = {'positive': ['joy'], 'negative': ['anger', 'disgust', 'fear', 'sadness'], 'neutral': ['neutral', 'surprise']}
self.emoSet = set()
self.sentiSet = set()
for (i, data) in enumerate(dataset):
if (i < 2):
continue
if ((data == '\n') and (len(self.dialogs) > 0)):
self.speakerNum.append(len(temp_speakerList))
temp_speakerList = []
context = []
context_speaker = []
continue
(speaker, utt, emo, senti) = data.strip().split('\t')
context.append(utt)
if (speaker not in temp_speakerList):
temp_speakerList.append(speaker)
speakerCLS = temp_speakerList.index(speaker)
context_speaker.append(speakerCLS)
self.dialogs.append([context_speaker[:], context[:], emodict[emo], senti])
self.emoSet.add(emodict[emo])
self.sentiSet.add(senti)
self.emoList = sorted(self.emoSet)
self.sentiList = sorted(self.sentiSet)
if (dataclass == 'emotion'):
self.labelList = self.emoList
else:
self.labelList = self.sentiList
self.speakerNum.append(len(temp_speakerList))
def __len__(self):
return len(self.dialogs)
def __getitem__(self, idx):
return (self.dialogs[idx], self.labelList, self.sentidict)
|
class Emory_loader(Dataset):
def __init__(self, txt_file, dataclass):
self.dialogs = []
f = open(txt_file, 'r')
dataset = f.readlines()
f.close()
'sentiment'
pos = ['Joyful', 'Peaceful', 'Powerful']
neg = ['Mad', 'Sad', 'Scared']
neu = ['Neutral']
emodict = {'Joyful': 'joy', 'Mad': 'mad', 'Peaceful': 'peaceful', 'Powerful': 'powerful', 'Neutral': 'neutral', 'Sad': 'sad', 'Scared': 'scared'}
self.sentidict = {'positive': pos, 'negative': neg, 'neutral': neu}
temp_speakerList = []
context = []
context_speaker = []
self.speakerNum = []
self.emoSet = set()
self.sentiSet = set()
for (i, data) in enumerate(dataset):
if ((data == '\n') and (len(self.dialogs) > 0)):
self.speakerNum.append(len(temp_speakerList))
temp_speakerList = []
context = []
context_speaker = []
continue
(speaker, utt, emo) = data.strip().split('\t')
context.append(utt)
if (emo in pos):
senti = 'positive'
elif (emo in neg):
senti = 'negative'
elif (emo in neu):
senti = 'neutral'
else:
print('ERROR emotion&sentiment')
if (speaker not in temp_speakerList):
temp_speakerList.append(speaker)
speakerCLS = temp_speakerList.index(speaker)
context_speaker.append(speakerCLS)
self.dialogs.append([context_speaker[:], context[:], emodict[emo], senti])
self.emoSet.add(emodict[emo])
self.sentiSet.add(senti)
self.emoList = sorted(self.emoSet)
self.sentiList = sorted(self.sentiSet)
if (dataclass == 'emotion'):
self.labelList = self.emoList
else:
self.labelList = self.sentiList
self.speakerNum.append(len(temp_speakerList))
def __len__(self):
return len(self.dialogs)
def __getitem__(self, idx):
return (self.dialogs[idx], self.labelList, self.sentidict)
|
class IEMOCAP_loader(Dataset):
def __init__(self, txt_file, dataclass):
self.dialogs = []
f = open(txt_file, 'r')
dataset = f.readlines()
f.close()
temp_speakerList = []
context = []
context_speaker = []
self.speakerNum = []
pos = ['exc', 'hap']
neg = ['ang', 'fru', 'sad']
neu = ['neu']
emodict = {'ang': 'angry', 'exc': 'excited', 'fru': 'frustrated', 'hap': 'happy', 'neu': 'neutral', 'sad': 'sad'}
self.sentidict = {'positive': pos, 'negative': neg, 'neutral': neu}
self.emoSet = set()
self.sentiSet = set()
for (i, data) in enumerate(dataset):
if ((data == '\n') and (len(self.dialogs) > 0)):
self.speakerNum.append(len(temp_speakerList))
temp_speakerList = []
context = []
context_speaker = []
continue
speaker = data.strip().split('\t')[0]
utt = ' '.join(data.strip().split('\t')[1:(- 1)])
emo = data.strip().split('\t')[(- 1)]
context.append(utt)
if (emo in pos):
senti = 'positive'
elif (emo in neg):
senti = 'negative'
elif (emo in neu):
senti = 'neutral'
else:
print('ERROR emotion&sentiment')
if (speaker not in temp_speakerList):
temp_speakerList.append(speaker)
speakerCLS = temp_speakerList.index(speaker)
context_speaker.append(speakerCLS)
self.dialogs.append([context_speaker[:], context[:], emodict[emo], senti])
self.emoSet.add(emodict[emo])
self.emoList = sorted(self.emoSet)
self.sentiList = sorted(self.sentiSet)
if (dataclass == 'emotion'):
self.labelList = self.emoList
else:
self.labelList = self.sentiList
self.speakerNum.append(len(temp_speakerList))
def __len__(self):
return len(self.dialogs)
def __getitem__(self, idx):
return (self.dialogs[idx], self.labelList, self.sentidict)
|
class DD_loader(Dataset):
def __init__(self, txt_file, dataclass):
self.dialogs = []
f = open(txt_file, 'r')
dataset = f.readlines()
f.close()
temp_speakerList = []
context = []
context_speaker = []
self.speakerNum = []
self.emoSet = set()
self.sentiSet = set()
pos = ['happiness']
neg = ['anger', 'disgust', 'fear', 'sadness']
neu = ['neutral', 'surprise']
emodict = {'anger': 'anger', 'disgust': 'disgust', 'fear': 'fear', 'happiness': 'happy', 'neutral': 'neutral', 'sadness': 'sad', 'surprise': 'surprise'}
self.sentidict = {'positive': pos, 'negative': neg, 'neutral': neu}
for (i, data) in enumerate(dataset):
if ((data == '\n') and (len(self.dialogs) > 0)):
self.speakerNum.append(len(temp_speakerList))
temp_speakerList = []
context = []
context_speaker = []
continue
speaker = data.strip().split('\t')[0]
utt = ' '.join(data.strip().split('\t')[1:(- 1)])
emo = data.strip().split('\t')[(- 1)]
if (emo in pos):
senti = 'positive'
elif (emo in neg):
senti = 'negative'
elif (emo in neu):
senti = 'neutral'
else:
print('ERROR emotion&sentiment')
context.append(utt)
if (speaker not in temp_speakerList):
temp_speakerList.append(speaker)
speakerCLS = temp_speakerList.index(speaker)
context_speaker.append(speakerCLS)
self.dialogs.append([context_speaker[:], context[:], emodict[emo], senti])
self.emoSet.add(emodict[emo])
self.emoList = sorted(self.emoSet)
self.sentiList = sorted(self.sentiSet)
if (dataclass == 'emotion'):
self.labelList = self.emoList
else:
self.labelList = self.sentiList
self.speakerNum.append(len(temp_speakerList))
def __len__(self):
return len(self.dialogs)
def __getitem__(self, idx):
return (self.dialogs[idx], self.labelList, self.sentidict)
|
def CELoss(pred_outs, labels):
'\n pred_outs: [batch, clsNum]\n labels: [batch]\n '
loss = nn.CrossEntropyLoss()
loss_val = loss(pred_outs, labels)
return loss_val
|
def main():
'Dataset Loading'
batch_size = args.batch
dataset = args.dataset
dataclass = args.cls
sample = args.sample
model_type = args.pretrained
dataType = 'multi'
if (dataset == 'MELD'):
if args.dyadic:
dataType = 'dyadic'
else:
dataType = 'multi'
data_path = (('../dataset/MELD/' + dataType) + '/')
DATA_loader = MELD_loader
elif (dataset == 'EMORY'):
data_path = '../dataset/EMORY/'
DATA_loader = Emory_loader
elif (dataset == 'iemocap'):
data_path = '../dataset/iemocap/'
DATA_loader = IEMOCAP_loader
elif (dataset == 'dailydialog'):
data_path = '../dataset/dailydialog/'
DATA_loader = DD_loader
if (model_type == 'roberta-large'):
make_batch = make_batch_roberta
elif (model_type == 'bert-large-uncased'):
make_batch = make_batch_bert
else:
make_batch = make_batch_gpt
train_path = ((data_path + dataset) + '_train.txt')
dev_path = ((data_path + dataset) + '_dev.txt')
test_path = ((data_path + dataset) + '_test.txt')
train_dataset = DATA_loader(train_path, dataclass)
if (sample < 1.0):
logger.info('shuffle False')
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=False, num_workers=4, collate_fn=make_batch)
else:
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=4, collate_fn=make_batch)
train_sample_num = int((len(train_dataloader) * sample))
dev_dataset = DATA_loader(dev_path, dataclass)
dev_dataloader = DataLoader(dev_dataset, batch_size=1, shuffle=False, num_workers=4, collate_fn=make_batch)
test_dataset = DATA_loader(test_path, dataclass)
test_dataloader = DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=4, collate_fn=make_batch)
'logging and path'
save_path = os.path.join((dataset + '_models'), model_type, dataclass, str(sample))
print('###Save Path### ', save_path)
log_path = os.path.join(save_path, 'train.log')
if (not os.path.exists(save_path)):
os.makedirs(save_path)
fileHandler = logging.FileHandler(log_path)
logger.addHandler(streamHandler)
logger.addHandler(fileHandler)
logger.setLevel(level=logging.DEBUG)
'Model Loading'
if ('gpt2' in model_type):
last = True
else:
last = False
print('DataClass: ', dataclass, '!!!')
clsNum = len(train_dataset.labelList)
model = ERC_model(model_type, clsNum, last)
model = model.cuda()
model.train()
'Training Setting'
training_epochs = args.epoch
save_term = int((training_epochs / 5))
max_grad_norm = args.norm
lr = args.lr
num_training_steps = (len(train_dataset) * training_epochs)
num_warmup_steps = len(train_dataset)
optimizer = torch.optim.AdamW(model.parameters(), lr=lr)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps)
'Input & Label Setting'
(best_dev_fscore, best_test_fscore) = (0, 0)
(best_dev_fscore_macro, best_dev_fscore_micro, best_test_fscore_macro, best_test_fscore_micro) = (0, 0, 0, 0)
best_epoch = 0
for epoch in tqdm(range(training_epochs)):
model.train()
for (i_batch, data) in enumerate(train_dataloader):
if (i_batch > train_sample_num):
print('check: ', i_batch, train_sample_num)
break
'Prediction'
(batch_input_tokens, batch_labels) = data
(batch_input_tokens, batch_labels) = (batch_input_tokens.cuda(), batch_labels.cuda())
try:
pred_logits = model(batch_input_tokens)
except:
pdb.set_trace()
'Loss calculation & training'
loss_val = CELoss(pred_logits, batch_labels)
loss_val.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_grad_norm)
optimizer.step()
scheduler.step()
optimizer.zero_grad()
'Dev & Test evaluation'
model.eval()
if (dataset == 'dailydialog'):
(dev_prek, dev_pred_list, dev_label_list) = _CalACC(model, dev_dataloader)
(dev_pre_macro, dev_rec_macro, dev_fbeta_macro, _) = precision_recall_fscore_support(dev_label_list, dev_pred_list, average='macro')
(dev_pre_micro, dev_rec_micro, dev_fbeta_micro, _) = precision_recall_fscore_support(dev_label_list, dev_pred_list, labels=[0, 1, 2, 3, 5, 6], average='micro')
dev_fscore = (dev_fbeta_macro + dev_fbeta_micro)
'Best Score & Model Save'
if (dev_fscore > (best_dev_fscore_macro + best_dev_fscore_micro)):
best_dev_fscore_macro = dev_fbeta_macro
best_dev_fscore_micro = dev_fbeta_micro
(test_prek, test_pred_list, test_label_list) = _CalACC(model, test_dataloader)
(test_pre_macro, test_rec_macro, test_fbeta_macro, _) = precision_recall_fscore_support(test_label_list, test_pred_list, average='macro')
(test_pre_micro, test_rec_micro, test_fbeta_micro, _) = precision_recall_fscore_support(test_label_list, test_pred_list, labels=[0, 1, 2, 3, 5, 6], average='micro')
best_epoch = epoch
_SaveModel(model, save_path)
else:
(dev_prek, dev_pred_list, dev_label_list) = _CalACC(model, dev_dataloader)
(dev_pre, dev_rec, dev_fbeta, _) = precision_recall_fscore_support(dev_label_list, dev_pred_list, average='weighted')
'Best Score & Model Save'
if (dev_fbeta > best_dev_fscore):
best_dev_fscore = dev_fbeta
(test_prek, test_pred_list, test_label_list) = _CalACC(model, test_dataloader)
(test_pre, test_rec, test_fbeta, _) = precision_recall_fscore_support(test_label_list, test_pred_list, average='weighted')
best_epoch = epoch
_SaveModel(model, save_path)
logger.info('Epoch: {}'.format(epoch))
if (dataset == 'dailydialog'):
logger.info('Devleopment ## precision: {}, macro-fscore: {}, micro-fscore: {}'.format(dev_prek, dev_fbeta_macro, dev_fbeta_micro))
logger.info('')
else:
logger.info('Devleopment ## precision: {}, precision: {}, recall: {}, fscore: {}'.format(dev_prek, dev_pre, dev_rec, dev_fbeta))
logger.info('')
if (dataset == 'dailydialog'):
logger.info('Final Fscore ## test-precision: {}, test-macro: {}, test-micro: {}, test_epoch: {}'.format(test_prek, test_fbeta_macro, test_fbeta_micro, best_epoch))
else:
logger.info('Final Fscore ## test-precision: {}, test-fscore: {}, test_epoch: {}'.format(test_prek, test_fbeta, best_epoch))
|
def _CalACC(model, dataloader):
model.eval()
correct = 0
label_list = []
pred_list = []
(p1num, p2num, p3num) = (0, 0, 0)
with torch.no_grad():
for (i_batch, data) in enumerate(dataloader):
'Prediction'
(batch_input_tokens, batch_labels) = data
(batch_input_tokens, batch_labels) = (batch_input_tokens.cuda(), batch_labels.cuda())
pred_logits = model(batch_input_tokens)
'Calculation'
pred_logits_sort = pred_logits.sort(descending=True)
indices = pred_logits_sort.indices.tolist()[0]
pred_label = indices[0]
true_label = batch_labels.item()
pred_list.append(pred_label)
label_list.append(true_label)
if (pred_label == true_label):
correct += 1
'Calculation precision'
if (true_label in indices[:1]):
p1num += 1
if (true_label in indices[:2]):
p2num += (1 / 2)
if (true_label in indices[:3]):
p3num += (1 / 3)
p1 = round(((p1num / len(dataloader)) * 100), 2)
p2 = round(((p2num / len(dataloader)) * 100), 2)
p3 = round(((p3num / len(dataloader)) * 100), 2)
return ([p1, p2, p3], pred_list, label_list)
|
def _SaveModel(model, path):
if (not os.path.exists(path)):
os.makedirs(path)
torch.save(model.state_dict(), os.path.join(path, 'model.bin'))
|
def encode_right_truncated(text, tokenizer, max_length=511):
tokenized = tokenizer.tokenize(text)
truncated = tokenized[(- max_length):]
ids = tokenizer.convert_tokens_to_ids(truncated)
return ([tokenizer.cls_token_id] + ids)
|
def padding(ids_list, tokenizer):
max_len = 0
for ids in ids_list:
if (len(ids) > max_len):
max_len = len(ids)
pad_ids = []
for ids in ids_list:
pad_len = (max_len - len(ids))
add_ids = [tokenizer.pad_token_id for _ in range(pad_len)]
pad_ids.append((ids + add_ids))
return torch.tensor(pad_ids)
|
def encode_right_truncated_gpt(text, tokenizer, max_length=511):
tokenized = tokenizer.tokenize(text)
truncated = tokenized[(- max_length):]
ids = tokenizer.convert_tokens_to_ids(truncated)
return (ids + [tokenizer.cls_token_id])
|
def padding_gpt(ids_list, tokenizer):
max_len = 0
for ids in ids_list:
if (len(ids) > max_len):
max_len = len(ids)
pad_ids = []
for ids in ids_list:
pad_len = (max_len - len(ids))
add_ids = [tokenizer.pad_token_id for _ in range(pad_len)]
pad_ids.append((add_ids + ids))
return torch.tensor(pad_ids)
|
def make_batch_roberta(sessions):
(batch_input, batch_labels) = ([], [])
for session in sessions:
data = session[0]
label_list = session[1]
(context_speaker, context, emotion, sentiment) = data
now_speaker = context_speaker[(- 1)]
speaker_utt_list = []
inputString = ''
for (turn, (speaker, utt)) in enumerate(zip(context_speaker, context)):
inputString += (('<s' + str((speaker + 1))) + '> ')
inputString += (utt + ' ')
if ((turn < (len(context_speaker) - 1)) and (speaker == now_speaker)):
speaker_utt_list.append(encode_right_truncated(utt, roberta_tokenizer))
concat_string = inputString.strip()
batch_input.append(encode_right_truncated(concat_string, roberta_tokenizer))
if (len(label_list) > 3):
label_ind = label_list.index(emotion)
else:
label_ind = label_list.index(sentiment)
batch_labels.append(label_ind)
batch_input_tokens = padding(batch_input, roberta_tokenizer)
batch_labels = torch.tensor(batch_labels)
return (batch_input_tokens, batch_labels)
|
def make_batch_bert(sessions):
(batch_input, batch_labels) = ([], [])
for session in sessions:
data = session[0]
label_list = session[1]
(context_speaker, context, emotion, sentiment) = data
now_speaker = context_speaker[(- 1)]
speaker_utt_list = []
inputString = ''
for (turn, (speaker, utt)) in enumerate(zip(context_speaker, context)):
inputString += (('<s' + str((speaker + 1))) + '> ')
inputString += (utt + ' ')
if ((turn < (len(context_speaker) - 1)) and (speaker == now_speaker)):
speaker_utt_list.append(encode_right_truncated(utt, bert_tokenizer))
concat_string = inputString.strip()
batch_input.append(encode_right_truncated(concat_string, bert_tokenizer))
if (len(label_list) > 3):
label_ind = label_list.index(emotion)
else:
label_ind = label_list.index(sentiment)
batch_labels.append(label_ind)
batch_input_tokens = padding(batch_input, bert_tokenizer)
batch_labels = torch.tensor(batch_labels)
return (batch_input_tokens, batch_labels)
|
def make_batch_gpt(sessions):
(batch_input, batch_labels) = ([], [])
for session in sessions:
data = session[0]
label_list = session[1]
(context_speaker, context, emotion, sentiment) = data
now_speaker = context_speaker[(- 1)]
speaker_utt_list = []
inputString = ''
for (turn, (speaker, utt)) in enumerate(zip(context_speaker, context)):
inputString += (('<s' + str((speaker + 1))) + '> ')
inputString += (utt + ' ')
if ((turn < (len(context_speaker) - 1)) and (speaker == now_speaker)):
speaker_utt_list.append(encode_right_truncated_gpt(utt, gpt_tokenizer, max_length=511))
concat_string = inputString.strip()
batch_input.append(encode_right_truncated_gpt(concat_string, gpt_tokenizer, max_length=511))
if (len(label_list) > 3):
label_ind = label_list.index(emotion)
else:
label_ind = label_list.index(sentiment)
batch_labels.append(label_ind)
batch_input_tokens = padding_gpt(batch_input, gpt_tokenizer)
batch_labels = torch.tensor(batch_labels)
return (batch_input_tokens, batch_labels)
|
class MELD_loader(Dataset):
def __init__(self, txt_file, dataclass):
self.dialogs = []
f = open(txt_file, 'r')
dataset = f.readlines()
f.close()
temp_speakerList = []
context = []
context_speaker = []
self.speakerNum = []
emodict = {'anger': 'anger', 'disgust': 'disgust', 'fear': 'fear', 'joy': 'joy', 'neutral': 'neutral', 'sadness': 'sad', 'surprise': 'surprise'}
self.sentidict = {'positive': ['joy'], 'negative': ['anger', 'disgust', 'fear', 'sadness'], 'neutral': ['neutral', 'surprise']}
self.emoSet = set()
self.sentiSet = set()
for (i, data) in enumerate(dataset):
if (i < 2):
continue
if ((data == '\n') and (len(self.dialogs) > 0)):
self.speakerNum.append(len(temp_speakerList))
temp_speakerList = []
context = []
context_speaker = []
continue
(speaker, utt, emo, senti) = data.strip().split('\t')
context.append(utt)
if (speaker not in temp_speakerList):
temp_speakerList.append(speaker)
speakerCLS = temp_speakerList.index(speaker)
context_speaker.append(speakerCLS)
self.dialogs.append([context_speaker[:], context[:], emodict[emo], senti])
self.emoSet.add(emodict[emo])
self.sentiSet.add(senti)
self.emoList = sorted(self.emoSet)
self.sentiList = sorted(self.sentiSet)
if (dataclass == 'emotion'):
self.labelList = self.emoList
else:
self.labelList = self.sentiList
self.speakerNum.append(len(temp_speakerList))
def __len__(self):
return len(self.dialogs)
def __getitem__(self, idx):
return (self.dialogs[idx], self.labelList, self.sentidict)
|
class Emory_loader(Dataset):
def __init__(self, txt_file, dataclass):
self.dialogs = []
f = open(txt_file, 'r')
dataset = f.readlines()
f.close()
'sentiment'
pos = ['Joyful', 'Peaceful', 'Powerful']
neg = ['Mad', 'Sad', 'Scared']
neu = ['Neutral']
emodict = {'Joyful': 'joy', 'Mad': 'mad', 'Peaceful': 'peaceful', 'Powerful': 'powerful', 'Neutral': 'neutral', 'Sad': 'sad', 'Scared': 'scared'}
self.sentidict = {'positive': pos, 'negative': neg, 'neutral': neu}
temp_speakerList = []
context = []
context_speaker = []
self.speakerNum = []
self.emoSet = set()
self.sentiSet = set()
for (i, data) in enumerate(dataset):
if ((data == '\n') and (len(self.dialogs) > 0)):
self.speakerNum.append(len(temp_speakerList))
temp_speakerList = []
context = []
context_speaker = []
continue
(speaker, utt, emo) = data.strip().split('\t')
context.append(utt)
if (emo in pos):
senti = 'positive'
elif (emo in neg):
senti = 'negative'
elif (emo in neu):
senti = 'neutral'
else:
print('ERROR emotion&sentiment')
if (speaker not in temp_speakerList):
temp_speakerList.append(speaker)
speakerCLS = temp_speakerList.index(speaker)
context_speaker.append(speakerCLS)
self.dialogs.append([context_speaker[:], context[:], emodict[emo], senti])
self.emoSet.add(emodict[emo])
self.sentiSet.add(senti)
self.emoList = sorted(self.emoSet)
self.sentiList = sorted(self.sentiSet)
if (dataclass == 'emotion'):
self.labelList = self.emoList
else:
self.labelList = self.sentiList
self.speakerNum.append(len(temp_speakerList))
def __len__(self):
return len(self.dialogs)
def __getitem__(self, idx):
return (self.dialogs[idx], self.labelList, self.sentidict)
|
class IEMOCAP_loader(Dataset):
def __init__(self, txt_file, dataclass):
self.dialogs = []
f = open(txt_file, 'r')
dataset = f.readlines()
f.close()
temp_speakerList = []
context = []
context_speaker = []
self.speakerNum = []
pos = ['exc', 'hap']
neg = ['ang', 'fru', 'sad']
neu = ['neu']
emodict = {'ang': 'angry', 'exc': 'excited', 'fru': 'frustrated', 'hap': 'happy', 'neu': 'neutral', 'sad': 'sad'}
self.sentidict = {'positive': pos, 'negative': neg, 'neutral': neu}
self.emoSet = set()
self.sentiSet = set()
for (i, data) in enumerate(dataset):
if ((data == '\n') and (len(self.dialogs) > 0)):
self.speakerNum.append(len(temp_speakerList))
temp_speakerList = []
context = []
context_speaker = []
continue
speaker = data.strip().split('\t')[0]
utt = ' '.join(data.strip().split('\t')[1:(- 1)])
emo = data.strip().split('\t')[(- 1)]
context.append(utt)
if (emo in pos):
senti = 'positive'
elif (emo in neg):
senti = 'negative'
elif (emo in neu):
senti = 'neutral'
else:
print('ERROR emotion&sentiment')
if (speaker not in temp_speakerList):
temp_speakerList.append(speaker)
speakerCLS = temp_speakerList.index(speaker)
context_speaker.append(speakerCLS)
self.dialogs.append([context_speaker[:], context[:], emodict[emo], senti])
self.emoSet.add(emodict[emo])
self.emoList = sorted(self.emoSet)
self.sentiList = sorted(self.sentiSet)
if (dataclass == 'emotion'):
self.labelList = self.emoList
else:
self.labelList = self.sentiList
self.speakerNum.append(len(temp_speakerList))
def __len__(self):
return len(self.dialogs)
def __getitem__(self, idx):
return (self.dialogs[idx], self.labelList, self.sentidict)
|
class DD_loader(Dataset):
def __init__(self, txt_file, dataclass):
self.dialogs = []
f = open(txt_file, 'r')
dataset = f.readlines()
f.close()
temp_speakerList = []
context = []
context_speaker = []
self.speakerNum = []
self.emoSet = set()
self.sentiSet = set()
pos = ['happiness']
neg = ['anger', 'disgust', 'fear', 'sadness']
neu = ['neutral', 'surprise']
emodict = {'anger': 'anger', 'disgust': 'disgust', 'fear': 'fear', 'happiness': 'happy', 'neutral': 'neutral', 'sadness': 'sad', 'surprise': 'surprise'}
self.sentidict = {'positive': pos, 'negative': neg, 'neutral': neu}
for (i, data) in enumerate(dataset):
if ((data == '\n') and (len(self.dialogs) > 0)):
self.speakerNum.append(len(temp_speakerList))
temp_speakerList = []
context = []
context_speaker = []
continue
speaker = data.strip().split('\t')[0]
utt = ' '.join(data.strip().split('\t')[1:(- 1)])
emo = data.strip().split('\t')[(- 1)]
if (emo in pos):
senti = 'positive'
elif (emo in neg):
senti = 'negative'
elif (emo in neu):
senti = 'neutral'
else:
print('ERROR emotion&sentiment')
context.append(utt)
if (speaker not in temp_speakerList):
temp_speakerList.append(speaker)
speakerCLS = temp_speakerList.index(speaker)
context_speaker.append(speakerCLS)
self.dialogs.append([context_speaker[:], context[:], emodict[emo], senti])
self.emoSet.add(emodict[emo])
self.emoList = sorted(self.emoSet)
self.sentiList = sorted(self.sentiSet)
if (dataclass == 'emotion'):
self.labelList = self.emoList
else:
self.labelList = self.sentiList
self.speakerNum.append(len(temp_speakerList))
def __len__(self):
return len(self.dialogs)
def __getitem__(self, idx):
return (self.dialogs[idx], self.labelList, self.sentidict)
|
def CELoss(pred_outs, labels):
'\n pred_outs: [batch, clsNum]\n labels: [batch]\n '
loss = nn.CrossEntropyLoss()
loss_val = loss(pred_outs, labels)
return loss_val
|
def main():
'Dataset Loading'
batch_size = args.batch
dataset = args.dataset
dataclass = args.cls
sample = args.sample
model_type = args.pretrained
dataType = 'multi'
if (dataset == 'MELD'):
if args.dyadic:
dataType = 'dyadic'
else:
dataType = 'multi'
data_path = (('../dataset/MELD/' + dataType) + '/')
DATA_loader = MELD_loader
elif (dataset == 'EMORY'):
data_path = '../dataset/EMORY/'
DATA_loader = Emory_loader
elif (dataset == 'iemocap'):
data_path = '../dataset/iemocap/'
DATA_loader = IEMOCAP_loader
elif (dataset == 'dailydialog'):
data_path = '../dataset/dailydialog/'
DATA_loader = DD_loader
if (model_type == 'roberta-large'):
make_batch = make_batch_roberta
elif (model_type == 'bert-large-uncased'):
make_batch = make_batch_bert
else:
make_batch = make_batch_gpt
train_path = ((data_path + dataset) + '_train.txt')
dev_path = ((data_path + dataset) + '_dev.txt')
test_path = ((data_path + dataset) + '_test.txt')
train_dataset = DATA_loader(train_path, dataclass)
if (sample < 1.0):
logger.info('shuffle False')
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=False, num_workers=4, collate_fn=make_batch)
else:
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=4, collate_fn=make_batch)
train_sample_num = int((len(train_dataloader) * sample))
dev_dataset = DATA_loader(dev_path, dataclass)
dev_dataloader = DataLoader(dev_dataset, batch_size=1, shuffle=False, num_workers=4, collate_fn=make_batch)
test_dataset = DATA_loader(test_path, dataclass)
test_dataloader = DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=4, collate_fn=make_batch)
'logging and path'
save_path = os.path.join((dataset + '_models'), model_type, dataclass, str(sample))
print('###Save Path### ', save_path)
log_path = os.path.join(save_path, 'train.log')
if (not os.path.exists(save_path)):
os.makedirs(save_path)
fileHandler = logging.FileHandler(log_path)
logger.addHandler(streamHandler)
logger.addHandler(fileHandler)
logger.setLevel(level=logging.DEBUG)
'Model Loading'
if ('gpt2' in model_type):
last = True
else:
last = False
print('DataClass: ', dataclass, '!!!')
clsNum = len(train_dataset.labelList)
model = ERC_model(model_type, clsNum, last)
model = model.cuda()
model.train()
'Training Setting'
training_epochs = args.epoch
save_term = int((training_epochs / 5))
max_grad_norm = args.norm
lr = args.lr
num_training_steps = (len(train_dataset) * training_epochs)
num_warmup_steps = len(train_dataset)
optimizer = torch.optim.AdamW(model.parameters(), lr=lr)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps)
'Input & Label Setting'
(best_dev_fscore, best_test_fscore) = (0, 0)
(best_dev_fscore_macro, best_dev_fscore_micro, best_test_fscore_macro, best_test_fscore_micro) = (0, 0, 0, 0)
best_epoch = 0
for epoch in tqdm(range(training_epochs)):
model.train()
for (i_batch, data) in enumerate(train_dataloader):
if (i_batch > train_sample_num):
print('check: ', i_batch, train_sample_num)
break
'Prediction'
(batch_input_tokens, batch_labels) = data
(batch_input_tokens, batch_labels) = (batch_input_tokens.cuda(), batch_labels.cuda())
try:
pred_logits = model(batch_input_tokens)
except:
pdb.set_trace()
'Loss calculation & training'
loss_val = CELoss(pred_logits, batch_labels)
loss_val.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_grad_norm)
optimizer.step()
scheduler.step()
optimizer.zero_grad()
'Dev & Test evaluation'
model.eval()
if (dataset == 'dailydialog'):
(dev_prek, dev_pred_list, dev_label_list) = _CalACC(model, dev_dataloader)
(dev_pre_macro, dev_rec_macro, dev_fbeta_macro, _) = precision_recall_fscore_support(dev_label_list, dev_pred_list, average='macro')
(dev_pre_micro, dev_rec_micro, dev_fbeta_micro, _) = precision_recall_fscore_support(dev_label_list, dev_pred_list, labels=[0, 1, 2, 3, 5, 6], average='micro')
dev_fscore = (dev_fbeta_macro + dev_fbeta_micro)
'Best Score & Model Save'
if (dev_fscore > (best_dev_fscore_macro + best_dev_fscore_micro)):
best_dev_fscore_macro = dev_fbeta_macro
best_dev_fscore_micro = dev_fbeta_micro
(test_prek, test_pred_list, test_label_list) = _CalACC(model, test_dataloader)
(test_pre_macro, test_rec_macro, test_fbeta_macro, _) = precision_recall_fscore_support(test_label_list, test_pred_list, average='macro')
(test_pre_micro, test_rec_micro, test_fbeta_micro, _) = precision_recall_fscore_support(test_label_list, test_pred_list, labels=[0, 1, 2, 3, 5, 6], average='micro')
best_epoch = epoch
_SaveModel(model, save_path)
else:
(dev_prek, dev_pred_list, dev_label_list) = _CalACC(model, dev_dataloader)
(dev_pre, dev_rec, dev_fbeta, _) = precision_recall_fscore_support(dev_label_list, dev_pred_list, average='weighted')
'Best Score & Model Save'
if (dev_fbeta > best_dev_fscore):
best_dev_fscore = dev_fbeta
(test_prek, test_pred_list, test_label_list) = _CalACC(model, test_dataloader)
(test_pre, test_rec, test_fbeta, _) = precision_recall_fscore_support(test_label_list, test_pred_list, average='weighted')
best_epoch = epoch
_SaveModel(model, save_path)
logger.info('Epoch: {}'.format(epoch))
if (dataset == 'dailydialog'):
logger.info('Devleopment ## precision: {}, macro-fscore: {}, micro-fscore: {}'.format(dev_prek, dev_fbeta_macro, dev_fbeta_micro))
logger.info('')
else:
logger.info('Devleopment ## precision: {}, precision: {}, recall: {}, fscore: {}'.format(dev_prek, dev_pre, dev_rec, dev_fbeta))
logger.info('')
if (dataset == 'dailydialog'):
logger.info('Final Fscore ## test-precision: {}, test-macro: {}, test-micro: {}, test_epoch: {}'.format(test_prek, test_fbeta_macro, test_fbeta_micro, best_epoch))
else:
logger.info('Final Fscore ## test-precision: {}, test-fscore: {}, test_epoch: {}'.format(test_prek, test_fbeta, best_epoch))
|
def _CalACC(model, dataloader):
model.eval()
correct = 0
label_list = []
pred_list = []
(p1num, p2num, p3num) = (0, 0, 0)
with torch.no_grad():
for (i_batch, data) in enumerate(dataloader):
'Prediction'
(batch_input_tokens, batch_labels) = data
(batch_input_tokens, batch_labels) = (batch_input_tokens.cuda(), batch_labels.cuda())
pred_logits = model(batch_input_tokens)
'Calculation'
pred_logits_sort = pred_logits.sort(descending=True)
indices = pred_logits_sort.indices.tolist()[0]
pred_label = indices[0]
true_label = batch_labels.item()
pred_list.append(pred_label)
label_list.append(true_label)
if (pred_label == true_label):
correct += 1
'Calculation precision'
if (true_label in indices[:1]):
p1num += 1
if (true_label in indices[:2]):
p2num += (1 / 2)
if (true_label in indices[:3]):
p3num += (1 / 3)
p1 = round(((p1num / len(dataloader)) * 100), 2)
p2 = round(((p2num / len(dataloader)) * 100), 2)
p3 = round(((p3num / len(dataloader)) * 100), 2)
return ([p1, p2, p3], pred_list, label_list)
|
def _SaveModel(model, path):
if (not os.path.exists(path)):
os.makedirs(path)
torch.save(model.state_dict(), os.path.join(path, 'model.bin'))
|
def encode_right_truncated(text, tokenizer, max_length=511):
tokenized = tokenizer.tokenize(text)
truncated = tokenized[(- max_length):]
ids = tokenizer.convert_tokens_to_ids(truncated)
return ([tokenizer.cls_token_id] + ids)
|
def padding(ids_list, tokenizer):
max_len = 0
for ids in ids_list:
if (len(ids) > max_len):
max_len = len(ids)
pad_ids = []
for ids in ids_list:
pad_len = (max_len - len(ids))
add_ids = [tokenizer.pad_token_id for _ in range(pad_len)]
pad_ids.append((ids + add_ids))
return torch.tensor(pad_ids)
|
def encode_right_truncated_gpt(text, tokenizer, max_length=511):
tokenized = tokenizer.tokenize(text)
truncated = tokenized[(- max_length):]
ids = tokenizer.convert_tokens_to_ids(truncated)
return (ids + [tokenizer.cls_token_id])
|
def padding_gpt(ids_list, tokenizer):
max_len = 0
for ids in ids_list:
if (len(ids) > max_len):
max_len = len(ids)
pad_ids = []
for ids in ids_list:
pad_len = (max_len - len(ids))
add_ids = [tokenizer.pad_token_id for _ in range(pad_len)]
pad_ids.append((add_ids + ids))
return torch.tensor(pad_ids)
|
def make_batch_roberta(sessions):
(batch_input, batch_labels) = ([], [])
for session in sessions:
data = session[0]
label_list = session[1]
(context_speaker, context, emotion, sentiment) = data
now_speaker = context_speaker[(- 1)]
speaker_utt_list = []
inputString = ''
for (turn, (speaker, utt)) in enumerate(zip(context_speaker, context)):
inputString += (('<s' + str((speaker + 1))) + '> ')
inputString += (utt + ' ')
if ((turn < (len(context_speaker) - 1)) and (speaker == now_speaker)):
speaker_utt_list.append(encode_right_truncated(utt, roberta_tokenizer))
concat_string = inputString.strip()
batch_input.append(encode_right_truncated(concat_string, roberta_tokenizer))
if (len(label_list) > 3):
label_ind = label_list.index(emotion)
else:
label_ind = label_list.index(sentiment)
batch_labels.append(label_ind)
batch_input_tokens = padding(batch_input, roberta_tokenizer)
batch_labels = torch.tensor(batch_labels)
return (batch_input_tokens, batch_labels)
|
def make_batch_bert(sessions):
(batch_input, batch_labels) = ([], [])
for session in sessions:
data = session[0]
label_list = session[1]
(context_speaker, context, emotion, sentiment) = data
now_speaker = context_speaker[(- 1)]
speaker_utt_list = []
inputString = ''
for (turn, (speaker, utt)) in enumerate(zip(context_speaker, context)):
inputString += (('<s' + str((speaker + 1))) + '> ')
inputString += (utt + ' ')
if ((turn < (len(context_speaker) - 1)) and (speaker == now_speaker)):
speaker_utt_list.append(encode_right_truncated(utt, bert_tokenizer))
concat_string = inputString.strip()
batch_input.append(encode_right_truncated(concat_string, bert_tokenizer))
if (len(label_list) > 3):
label_ind = label_list.index(emotion)
else:
label_ind = label_list.index(sentiment)
batch_labels.append(label_ind)
batch_input_tokens = padding(batch_input, bert_tokenizer)
batch_labels = torch.tensor(batch_labels)
return (batch_input_tokens, batch_labels)
|
def make_batch_gpt(sessions):
(batch_input, batch_labels) = ([], [])
for session in sessions:
data = session[0]
label_list = session[1]
(context_speaker, context, emotion, sentiment) = data
now_speaker = context_speaker[(- 1)]
speaker_utt_list = []
inputString = ''
for (turn, (speaker, utt)) in enumerate(zip(context_speaker, context)):
inputString += (('<s' + str((speaker + 1))) + '> ')
inputString += (utt + ' ')
if ((turn < (len(context_speaker) - 1)) and (speaker == now_speaker)):
speaker_utt_list.append(encode_right_truncated_gpt(utt, gpt_tokenizer, max_length=511))
concat_string = inputString.strip()
batch_input.append(encode_right_truncated_gpt(concat_string, gpt_tokenizer, max_length=511))
if (len(label_list) > 3):
label_ind = label_list.index(emotion)
else:
label_ind = label_list.index(sentiment)
batch_labels.append(label_ind)
batch_input_tokens = padding_gpt(batch_input, gpt_tokenizer)
batch_labels = torch.tensor(batch_labels)
return (batch_input_tokens, batch_labels)
|
class MELD_loader(Dataset):
def __init__(self, txt_file, dataclass):
self.dialogs = []
f = open(txt_file, 'r')
dataset = f.readlines()
f.close()
temp_speakerList = []
context = []
context_speaker = []
self.speakerNum = []
emodict = {'anger': 'anger', 'disgust': 'disgust', 'fear': 'fear', 'joy': 'joy', 'neutral': 'neutral', 'sadness': 'sad', 'surprise': 'surprise'}
self.sentidict = {'positive': ['joy'], 'negative': ['anger', 'disgust', 'fear', 'sadness'], 'neutral': ['neutral', 'surprise']}
self.emoSet = set()
self.sentiSet = set()
for (i, data) in enumerate(dataset):
if (i < 2):
continue
if ((data == '\n') and (len(self.dialogs) > 0)):
self.speakerNum.append(len(temp_speakerList))
temp_speakerList = []
context = []
context_speaker = []
continue
(speaker, utt, emo, senti) = data.strip().split('\t')
context.append(utt)
if (speaker not in temp_speakerList):
temp_speakerList.append(speaker)
speakerCLS = temp_speakerList.index(speaker)
context_speaker.append(speakerCLS)
self.dialogs.append([context_speaker[:], context[:], emodict[emo], senti])
self.emoSet.add(emodict[emo])
self.sentiSet.add(senti)
self.emoList = sorted(self.emoSet)
self.sentiList = sorted(self.sentiSet)
if (dataclass == 'emotion'):
self.labelList = self.emoList
else:
self.labelList = self.sentiList
self.speakerNum.append(len(temp_speakerList))
def __len__(self):
return len(self.dialogs)
def __getitem__(self, idx):
return (self.dialogs[idx], self.labelList, self.sentidict)
|
class Emory_loader(Dataset):
def __init__(self, txt_file, dataclass):
self.dialogs = []
f = open(txt_file, 'r')
dataset = f.readlines()
f.close()
'sentiment'
pos = ['Joyful', 'Peaceful', 'Powerful']
neg = ['Mad', 'Sad', 'Scared']
neu = ['Neutral']
emodict = {'Joyful': 'joy', 'Mad': 'mad', 'Peaceful': 'peaceful', 'Powerful': 'powerful', 'Neutral': 'neutral', 'Sad': 'sad', 'Scared': 'scared'}
self.sentidict = {'positive': pos, 'negative': neg, 'neutral': neu}
temp_speakerList = []
context = []
context_speaker = []
self.speakerNum = []
self.emoSet = set()
self.sentiSet = set()
for (i, data) in enumerate(dataset):
if ((data == '\n') and (len(self.dialogs) > 0)):
self.speakerNum.append(len(temp_speakerList))
temp_speakerList = []
context = []
context_speaker = []
continue
(speaker, utt, emo) = data.strip().split('\t')
context.append(utt)
if (emo in pos):
senti = 'positive'
elif (emo in neg):
senti = 'negative'
elif (emo in neu):
senti = 'neutral'
else:
print('ERROR emotion&sentiment')
if (speaker not in temp_speakerList):
temp_speakerList.append(speaker)
speakerCLS = temp_speakerList.index(speaker)
context_speaker.append(speakerCLS)
self.dialogs.append([context_speaker[:], context[:], emodict[emo], senti])
self.emoSet.add(emodict[emo])
self.sentiSet.add(senti)
self.emoList = sorted(self.emoSet)
self.sentiList = sorted(self.sentiSet)
if (dataclass == 'emotion'):
self.labelList = self.emoList
else:
self.labelList = self.sentiList
self.speakerNum.append(len(temp_speakerList))
def __len__(self):
return len(self.dialogs)
def __getitem__(self, idx):
return (self.dialogs[idx], self.labelList, self.sentidict)
|
class IEMOCAP_loader(Dataset):
def __init__(self, txt_file, dataclass):
self.dialogs = []
f = open(txt_file, 'r')
dataset = f.readlines()
f.close()
temp_speakerList = []
context = []
context_speaker = []
self.speakerNum = []
pos = ['exc', 'hap']
neg = ['ang', 'fru', 'sad']
neu = ['neu']
emodict = {'ang': 'angry', 'exc': 'excited', 'fru': 'frustrated', 'hap': 'happy', 'neu': 'neutral', 'sad': 'sad'}
self.sentidict = {'positive': pos, 'negative': neg, 'neutral': neu}
self.emoSet = set()
self.sentiSet = set()
for (i, data) in enumerate(dataset):
if ((data == '\n') and (len(self.dialogs) > 0)):
self.speakerNum.append(len(temp_speakerList))
temp_speakerList = []
context = []
context_speaker = []
continue
speaker = data.strip().split('\t')[0]
utt = ' '.join(data.strip().split('\t')[1:(- 1)])
emo = data.strip().split('\t')[(- 1)]
context.append(utt)
if (emo in pos):
senti = 'positive'
elif (emo in neg):
senti = 'negative'
elif (emo in neu):
senti = 'neutral'
else:
print('ERROR emotion&sentiment')
if (speaker not in temp_speakerList):
temp_speakerList.append(speaker)
speakerCLS = temp_speakerList.index(speaker)
context_speaker.append(speakerCLS)
self.dialogs.append([context_speaker[:], context[:], emodict[emo], senti])
self.emoSet.add(emodict[emo])
self.emoList = sorted(self.emoSet)
self.sentiList = sorted(self.sentiSet)
if (dataclass == 'emotion'):
self.labelList = self.emoList
else:
self.labelList = self.sentiList
self.speakerNum.append(len(temp_speakerList))
def __len__(self):
return len(self.dialogs)
def __getitem__(self, idx):
return (self.dialogs[idx], self.labelList, self.sentidict)
|
class DD_loader(Dataset):
def __init__(self, txt_file, dataclass):
self.dialogs = []
f = open(txt_file, 'r')
dataset = f.readlines()
f.close()
temp_speakerList = []
context = []
context_speaker = []
self.speakerNum = []
self.emoSet = set()
self.sentiSet = set()
pos = ['happiness']
neg = ['anger', 'disgust', 'fear', 'sadness']
neu = ['neutral', 'surprise']
emodict = {'anger': 'anger', 'disgust': 'disgust', 'fear': 'fear', 'happiness': 'happy', 'neutral': 'neutral', 'sadness': 'sad', 'surprise': 'surprise'}
self.sentidict = {'positive': pos, 'negative': neg, 'neutral': neu}
for (i, data) in enumerate(dataset):
if ((data == '\n') and (len(self.dialogs) > 0)):
self.speakerNum.append(len(temp_speakerList))
temp_speakerList = []
context = []
context_speaker = []
continue
speaker = data.strip().split('\t')[0]
utt = ' '.join(data.strip().split('\t')[1:(- 1)])
emo = data.strip().split('\t')[(- 1)]
if (emo in pos):
senti = 'positive'
elif (emo in neg):
senti = 'negative'
elif (emo in neu):
senti = 'neutral'
else:
print('ERROR emotion&sentiment')
context.append(utt)
if (speaker not in temp_speakerList):
temp_speakerList.append(speaker)
speakerCLS = temp_speakerList.index(speaker)
context_speaker.append(speakerCLS)
self.dialogs.append([context_speaker[:], context[:], emodict[emo], senti])
self.emoSet.add(emodict[emo])
self.emoList = sorted(self.emoSet)
self.sentiList = sorted(self.sentiSet)
if (dataclass == 'emotion'):
self.labelList = self.emoList
else:
self.labelList = self.sentiList
self.speakerNum.append(len(temp_speakerList))
def __len__(self):
return len(self.dialogs)
def __getitem__(self, idx):
return (self.dialogs[idx], self.labelList, self.sentidict)
|
def CELoss(pred_outs, labels):
'\n pred_outs: [batch, clsNum]\n labels: [batch]\n '
loss = nn.CrossEntropyLoss()
loss_val = loss(pred_outs, labels)
return loss_val
|
def main():
'Dataset Loading'
batch_size = args.batch
dataset = args.dataset
dataclass = args.cls
sample = args.sample
context_type = args.context_type
speaker_type = args.speaker_type
freeze = args.freeze
dataType = 'multi'
if (dataset == 'MELD'):
if args.dyadic:
dataType = 'dyadic'
else:
dataType = 'multi'
data_path = (('../dataset/MELD/' + dataType) + '/')
DATA_loader = MELD_loader
elif (dataset == 'EMORY'):
data_path = '../dataset/EMORY/'
DATA_loader = Emory_loader
elif (dataset == 'iemocap'):
data_path = '../dataset/iemocap/'
DATA_loader = IEMOCAP_loader
elif (dataset == 'dailydialog'):
data_path = '../dataset/dailydialog/'
DATA_loader = DD_loader
if ((context_type == 'roberta-large') and (speaker_type == 'bert-large-uncased')):
make_batch = make_batch_roberta_bert
elif ((context_type == 'roberta-large') and ('gpt' in speaker_type)):
make_batch = make_batch_roberta_gpt
elif ((context_type == 'bert-large-uncased') and (speaker_type == 'roberta-large')):
make_batch = make_batch_bert_roberta
else:
print('batch error')
if freeze:
freeze_type = 'freeze'
else:
freeze_type = 'no_freeze'
train_path = ((data_path + dataset) + '_train.txt')
dev_path = ((data_path + dataset) + '_dev.txt')
test_path = ((data_path + dataset) + '_test.txt')
train_dataset = DATA_loader(train_path, dataclass)
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=4, collate_fn=make_batch)
train_sample_num = int((len(train_dataset) * sample))
dev_dataset = DATA_loader(dev_path, dataclass)
dev_dataloader = DataLoader(dev_dataset, batch_size=1, shuffle=False, num_workers=4, collate_fn=make_batch)
test_dataset = DATA_loader(test_path, dataclass)
test_dataloader = DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=4, collate_fn=make_batch)
'logging and path'
save_path = os.path.join((dataset + '_models'), ((context_type + '_') + speaker_type), freeze_type, dataclass)
print('###Save Path### ', save_path)
log_path = os.path.join(save_path, 'train.log')
if (not os.path.exists(save_path)):
os.makedirs(save_path)
fileHandler = logging.FileHandler(log_path)
logger.addHandler(streamHandler)
logger.addHandler(fileHandler)
logger.setLevel(level=logging.DEBUG)
'Model Loading'
print('DataClass: ', dataclass, '!!!')
clsNum = len(train_dataset.labelList)
model = ERC_model(context_type, speaker_type, clsNum, freeze)
model = model.cuda()
model.train()
'Training Setting'
training_epochs = args.epoch
save_term = int((training_epochs / 5))
max_grad_norm = args.norm
lr = args.lr
num_training_steps = (len(train_dataset) * training_epochs)
num_warmup_steps = len(train_dataset)
optimizer = torch.optim.AdamW(model.train_params, lr=lr)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps)
'Input & Label Setting'
(best_dev_fscore, best_test_fscore) = (0, 0)
(best_dev_fscore_macro, best_dev_fscore_micro, best_test_fscore_macro, best_test_fscore_micro) = (0, 0, 0, 0)
best_epoch = 0
for epoch in tqdm(range(training_epochs)):
model.train()
for (i_batch, data) in enumerate(train_dataloader):
if (i_batch > train_sample_num):
print(i_batch, train_sample_num)
break
'Prediction'
(batch_input_tokens, batch_labels, batch_speaker_tokens) = data
(batch_input_tokens, batch_labels) = (batch_input_tokens.cuda(), batch_labels.cuda())
pred_logits = model(batch_input_tokens, batch_speaker_tokens)
'Loss calculation & training'
loss_val = CELoss(pred_logits, batch_labels)
loss_val.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_grad_norm)
optimizer.step()
scheduler.step()
optimizer.zero_grad()
'Dev & Test evaluation'
model.eval()
if (dataset == 'dailydialog'):
(dev_acc, dev_pred_list, dev_label_list) = _CalACC(model, dev_dataloader)
(dev_pre_macro, dev_rec_macro, dev_fbeta_macro, _) = precision_recall_fscore_support(dev_label_list, dev_pred_list, average='macro')
(dev_pre_micro, dev_rec_micro, dev_fbeta_micro, _) = precision_recall_fscore_support(dev_label_list, dev_pred_list, labels=[0, 1, 2, 3, 5, 6], average='micro')
dev_fscore = (dev_fbeta_macro + dev_fbeta_micro)
'Best Score & Model Save'
if (dev_fscore > (best_dev_fscore_macro + best_dev_fscore_micro)):
best_dev_fscore_macro = dev_fbeta_macro
best_dev_fscore_micro = dev_fbeta_micro
(test_acc, test_pred_list, test_label_list) = _CalACC(model, test_dataloader)
(test_pre_macro, test_rec_macro, test_fbeta_macro, _) = precision_recall_fscore_support(test_label_list, test_pred_list, average='macro')
(test_pre_micro, test_rec_micro, test_fbeta_micro, _) = precision_recall_fscore_support(test_label_list, test_pred_list, labels=[0, 1, 2, 3, 5, 6], average='micro')
best_epoch = epoch
_SaveModel(model, save_path)
else:
(dev_acc, dev_pred_list, dev_label_list) = _CalACC(model, dev_dataloader)
(dev_pre, dev_rec, dev_fbeta, _) = precision_recall_fscore_support(dev_label_list, dev_pred_list, average='weighted')
'Best Score & Model Save'
if (dev_fbeta > best_dev_fscore):
best_dev_fscore = dev_fbeta
(test_acc, test_pred_list, test_label_list) = _CalACC(model, test_dataloader)
(test_pre, test_rec, test_fbeta, _) = precision_recall_fscore_support(test_label_list, test_pred_list, average='weighted')
best_epoch = epoch
_SaveModel(model, save_path)
logger.info('Epoch: {}'.format(epoch))
if (dataset == 'dailydialog'):
logger.info('Devleopment ## accuracy: {}, macro-fscore: {}, micro-fscore: {}'.format(dev_acc, dev_fbeta_macro, dev_fbeta_micro))
logger.info('')
else:
logger.info('Devleopment ## accuracy: {}, precision: {}, recall: {}, fscore: {}'.format(dev_acc, dev_pre, dev_rec, dev_fbeta))
logger.info('')
if (dataset == 'dailydialog'):
logger.info('Final Fscore ## test-accuracy: {}, test-macro: {}, test-micro: {}, test_epoch: {}'.format(test_acc, test_fbeta_macro, test_fbeta_micro, best_epoch))
else:
logger.info('Final Fscore ## test-accuracy: {}, test-fscore: {}, test_epoch: {}'.format(test_acc, test_fbeta, best_epoch))
|
def _CalACC(model, dataloader):
model.eval()
correct = 0
label_list = []
pred_list = []
with torch.no_grad():
for (i_batch, data) in enumerate(dataloader):
'Prediction'
(batch_input_tokens, batch_labels, batch_speaker_tokens) = data
(batch_input_tokens, batch_labels) = (batch_input_tokens.cuda(), batch_labels.cuda())
pred_logits = model(batch_input_tokens, batch_speaker_tokens)
'Calculation'
pred_label = pred_logits.argmax(1).item()
true_label = batch_labels.item()
pred_list.append(pred_label)
label_list.append(true_label)
if (pred_label == true_label):
correct += 1
acc = (correct / len(dataloader))
return (acc, pred_list, label_list)
|
def _SaveModel(model, path):
if (not os.path.exists(path)):
os.makedirs(path)
torch.save(model.state_dict(), os.path.join(path, 'model.bin'))
|
def encode_right_truncated(text, tokenizer, max_length=511):
tokenized = tokenizer.tokenize(text)
truncated = tokenized[(- max_length):]
ids = tokenizer.convert_tokens_to_ids(truncated)
return ([tokenizer.cls_token_id] + ids)
|
def padding(ids_list, tokenizer):
max_len = 0
for ids in ids_list:
if (len(ids) > max_len):
max_len = len(ids)
pad_ids = []
for ids in ids_list:
pad_len = (max_len - len(ids))
add_ids = [tokenizer.pad_token_id for _ in range(pad_len)]
pad_ids.append((ids + add_ids))
return torch.tensor(pad_ids)
|
def encode_right_truncated_gpt(text, tokenizer, max_length=511):
tokenized = tokenizer.tokenize(text)
truncated = tokenized[(- max_length):]
ids = tokenizer.convert_tokens_to_ids(truncated)
return (ids + [tokenizer.cls_token_id])
|
def padding_gpt(ids_list, tokenizer):
max_len = 0
for ids in ids_list:
if (len(ids) > max_len):
max_len = len(ids)
pad_ids = []
for ids in ids_list:
pad_len = (max_len - len(ids))
add_ids = [tokenizer.pad_token_id for _ in range(pad_len)]
pad_ids.append((add_ids + ids))
return torch.tensor(pad_ids)
|
def make_batch_roberta(sessions):
(batch_input, batch_labels, batch_speaker_tokens) = ([], [], [])
for session in sessions:
data = session[0]
label_list = session[1]
(context_speaker, context, emotion, sentiment) = data
now_speaker = context_speaker[(- 1)]
speaker_utt_list = []
inputString = ''
for (turn, (speaker, utt)) in enumerate(zip(context_speaker, context)):
inputString += (('<s' + str((speaker + 1))) + '> ')
inputString += (utt + ' ')
if ((turn < (len(context_speaker) - 1)) and (speaker == now_speaker)):
speaker_utt_list.append(encode_right_truncated(utt, roberta_tokenizer))
concat_string = inputString.strip()
batch_input.append(encode_right_truncated(concat_string, roberta_tokenizer))
if (len(label_list) > 3):
label_ind = label_list.index(emotion)
else:
label_ind = label_list.index(sentiment)
batch_labels.append(label_ind)
batch_speaker_tokens.append(padding(speaker_utt_list, roberta_tokenizer))
batch_input_tokens = padding(batch_input, roberta_tokenizer)
batch_labels = torch.tensor(batch_labels)
return (batch_input_tokens, batch_labels, batch_speaker_tokens)
|
def make_batch_bert(sessions):
(batch_input, batch_labels, batch_speaker_tokens) = ([], [], [])
for session in sessions:
data = session[0]
label_list = session[1]
(context_speaker, context, emotion, sentiment) = data
now_speaker = context_speaker[(- 1)]
speaker_utt_list = []
inputString = ''
for (turn, (speaker, utt)) in enumerate(zip(context_speaker, context)):
inputString += (('<s' + str((speaker + 1))) + '> ')
inputString += (utt + ' ')
if ((turn < (len(context_speaker) - 1)) and (speaker == now_speaker)):
speaker_utt_list.append(encode_right_truncated(utt, bert_tokenizer))
concat_string = inputString.strip()
batch_input.append(encode_right_truncated(concat_string, bert_tokenizer))
if (len(label_list) > 3):
label_ind = label_list.index(emotion)
else:
label_ind = label_list.index(sentiment)
batch_labels.append(label_ind)
batch_speaker_tokens.append(padding(speaker_utt_list, bert_tokenizer))
batch_input_tokens = padding(batch_input, bert_tokenizer)
batch_labels = torch.tensor(batch_labels)
return (batch_input_tokens, batch_labels, batch_speaker_tokens)
|
def make_batch_gpt(sessions):
(batch_input, batch_labels, batch_speaker_tokens) = ([], [], [])
for session in sessions:
data = session[0]
label_list = session[1]
(context_speaker, context, emotion, sentiment) = data
now_speaker = context_speaker[(- 1)]
speaker_utt_list = []
inputString = ''
for (turn, (speaker, utt)) in enumerate(zip(context_speaker, context)):
inputString += (('<s' + str((speaker + 1))) + '> ')
inputString += (utt + ' ')
if ((turn < (len(context_speaker) - 1)) and (speaker == now_speaker)):
speaker_utt_list.append(encode_right_truncated_gpt(utt, gpt_tokenizer, max_length=511))
concat_string = inputString.strip()
batch_input.append(encode_right_truncated_gpt(concat_string, gpt_tokenizer, max_length=511))
if (len(label_list) > 3):
label_ind = label_list.index(emotion)
else:
label_ind = label_list.index(sentiment)
batch_labels.append(label_ind)
batch_speaker_tokens.append(padding_gpt(speaker_utt_list, gpt_tokenizer))
batch_input_tokens = padding_gpt(batch_input, gpt_tokenizer)
batch_labels = torch.tensor(batch_labels)
return (batch_input_tokens, batch_labels, batch_speaker_tokens)
|
def make_batch_roberta_bert(sessions):
(batch_input, batch_labels, batch_speaker_tokens) = ([], [], [])
for session in sessions:
data = session[0]
label_list = session[1]
(context_speaker, context, emotion, sentiment) = data
now_speaker = context_speaker[(- 1)]
speaker_utt_list = []
inputString = ''
for (turn, (speaker, utt)) in enumerate(zip(context_speaker, context)):
inputString += (('<s' + str((speaker + 1))) + '> ')
inputString += (utt + ' ')
if ((turn < (len(context_speaker) - 1)) and (speaker == now_speaker)):
speaker_utt_list.append(encode_right_truncated(utt, bert_tokenizer, max_length=511))
concat_string = inputString.strip()
batch_input.append(encode_right_truncated(concat_string, roberta_tokenizer, max_length=511))
if (len(label_list) > 3):
label_ind = label_list.index(emotion)
else:
label_ind = label_list.index(sentiment)
batch_labels.append(label_ind)
batch_speaker_tokens.append(padding(speaker_utt_list, bert_tokenizer))
batch_input_tokens = padding(batch_input, roberta_tokenizer)
batch_labels = torch.tensor(batch_labels)
return (batch_input_tokens, batch_labels, batch_speaker_tokens)
|
def make_batch_roberta_gpt(sessions):
(batch_input, batch_labels, batch_speaker_tokens) = ([], [], [])
for session in sessions:
data = session[0]
label_list = session[1]
(context_speaker, context, emotion, sentiment) = data
now_speaker = context_speaker[(- 1)]
speaker_utt_list = []
inputString = ''
for (turn, (speaker, utt)) in enumerate(zip(context_speaker, context)):
inputString += (('<s' + str((speaker + 1))) + '> ')
inputString += (utt + ' ')
if ((turn < (len(context_speaker) - 1)) and (speaker == now_speaker)):
speaker_utt_list.append(encode_right_truncated_gpt(utt, gpt_tokenizer, max_length=511))
concat_string = inputString.strip()
batch_input.append(encode_right_truncated(concat_string, roberta_tokenizer, max_length=511))
if (len(label_list) > 3):
label_ind = label_list.index(emotion)
else:
label_ind = label_list.index(sentiment)
batch_labels.append(label_ind)
batch_speaker_tokens.append(padding_gpt(speaker_utt_list, gpt_tokenizer))
batch_input_tokens = padding(batch_input, roberta_tokenizer)
batch_labels = torch.tensor(batch_labels)
return (batch_input_tokens, batch_labels, batch_speaker_tokens)
|
def make_batch_bert_roberta(sessions):
(batch_input, batch_labels, batch_speaker_tokens) = ([], [], [])
for session in sessions:
data = session[0]
label_list = session[1]
(context_speaker, context, emotion, sentiment) = data
now_speaker = context_speaker[(- 1)]
speaker_utt_list = []
inputString = ''
for (turn, (speaker, utt)) in enumerate(zip(context_speaker, context)):
inputString += (('<s' + str((speaker + 1))) + '> ')
inputString += (utt + ' ')
if ((turn < (len(context_speaker) - 1)) and (speaker == now_speaker)):
speaker_utt_list.append(encode_right_truncated(utt, roberta_tokenizer, max_length=511))
concat_string = inputString.strip()
batch_input.append(encode_right_truncated(concat_string, bert_tokenizer, max_length=511))
if (len(label_list) > 3):
label_ind = label_list.index(emotion)
else:
label_ind = label_list.index(sentiment)
batch_labels.append(label_ind)
batch_speaker_tokens.append(padding(speaker_utt_list, roberta_tokenizer))
batch_input_tokens = padding(batch_input, bert_tokenizer)
batch_labels = torch.tensor(batch_labels)
return (batch_input_tokens, batch_labels, batch_speaker_tokens)
|
class MELD_loader(Dataset):
def __init__(self, txt_file, dataclass):
self.dialogs = []
f = open(txt_file, 'r')
dataset = f.readlines()
f.close()
temp_speakerList = []
context = []
context_speaker = []
self.speakerNum = []
emodict = {'anger': 'anger', 'disgust': 'disgust', 'fear': 'fear', 'joy': 'joy', 'neutral': 'neutral', 'sadness': 'sad', 'surprise': 'surprise'}
self.sentidict = {'positive': ['joy'], 'negative': ['anger', 'disgust', 'fear', 'sadness'], 'neutral': ['neutral', 'surprise']}
self.emoSet = set()
self.sentiSet = set()
for (i, data) in enumerate(dataset):
if (i < 2):
continue
if ((data == '\n') and (len(self.dialogs) > 0)):
self.speakerNum.append(len(temp_speakerList))
temp_speakerList = []
context = []
context_speaker = []
continue
(speaker, utt, emo, senti) = data.strip().split('\t')
context.append(utt)
if (speaker not in temp_speakerList):
temp_speakerList.append(speaker)
speakerCLS = temp_speakerList.index(speaker)
context_speaker.append(speakerCLS)
self.dialogs.append([context_speaker[:], context[:], emodict[emo], senti])
self.emoSet.add(emodict[emo])
self.sentiSet.add(senti)
self.emoList = sorted(self.emoSet)
self.sentiList = sorted(self.sentiSet)
if (dataclass == 'emotion'):
self.labelList = self.emoList
else:
self.labelList = self.sentiList
self.speakerNum.append(len(temp_speakerList))
def __len__(self):
return len(self.dialogs)
def __getitem__(self, idx):
return (self.dialogs[idx], self.labelList, self.sentidict)
|
class Emory_loader(Dataset):
def __init__(self, txt_file, dataclass):
self.dialogs = []
f = open(txt_file, 'r')
dataset = f.readlines()
f.close()
'sentiment'
pos = ['Joyful', 'Peaceful', 'Powerful']
neg = ['Mad', 'Sad', 'Scared']
neu = ['Neutral']
emodict = {'Joyful': 'joy', 'Mad': 'mad', 'Peaceful': 'peaceful', 'Powerful': 'powerful', 'Neutral': 'neutral', 'Sad': 'sad', 'Scared': 'scared'}
self.sentidict = {'positive': pos, 'negative': neg, 'neutral': neu}
temp_speakerList = []
context = []
context_speaker = []
self.speakerNum = []
self.emoSet = set()
self.sentiSet = set()
for (i, data) in enumerate(dataset):
if ((data == '\n') and (len(self.dialogs) > 0)):
self.speakerNum.append(len(temp_speakerList))
temp_speakerList = []
context = []
context_speaker = []
continue
(speaker, utt, emo) = data.strip().split('\t')
context.append(utt)
if (emo in pos):
senti = 'positive'
elif (emo in neg):
senti = 'negative'
elif (emo in neu):
senti = 'neutral'
else:
print('ERROR emotion&sentiment')
if (speaker not in temp_speakerList):
temp_speakerList.append(speaker)
speakerCLS = temp_speakerList.index(speaker)
context_speaker.append(speakerCLS)
self.dialogs.append([context_speaker[:], context[:], emodict[emo], senti])
self.emoSet.add(emodict[emo])
self.sentiSet.add(senti)
self.emoList = sorted(self.emoSet)
self.sentiList = sorted(self.sentiSet)
if (dataclass == 'emotion'):
self.labelList = self.emoList
else:
self.labelList = self.sentiList
self.speakerNum.append(len(temp_speakerList))
def __len__(self):
return len(self.dialogs)
def __getitem__(self, idx):
return (self.dialogs[idx], self.labelList, self.sentidict)
|
class IEMOCAP_loader(Dataset):
def __init__(self, txt_file, dataclass):
self.dialogs = []
f = open(txt_file, 'r')
dataset = f.readlines()
f.close()
temp_speakerList = []
context = []
context_speaker = []
self.speakerNum = []
pos = ['exc', 'hap']
neg = ['ang', 'fru', 'sad']
neu = ['neu']
emodict = {'ang': 'angry', 'exc': 'excited', 'fru': 'frustrated', 'hap': 'happy', 'neu': 'neutral', 'sad': 'sad'}
self.sentidict = {'positive': pos, 'negative': neg, 'neutral': neu}
self.emoSet = set()
self.sentiSet = set()
for (i, data) in enumerate(dataset):
if ((data == '\n') and (len(self.dialogs) > 0)):
self.speakerNum.append(len(temp_speakerList))
temp_speakerList = []
context = []
context_speaker = []
continue
speaker = data.strip().split('\t')[0]
utt = ' '.join(data.strip().split('\t')[1:(- 1)])
emo = data.strip().split('\t')[(- 1)]
context.append(utt)
if (emo in pos):
senti = 'positive'
elif (emo in neg):
senti = 'negative'
elif (emo in neu):
senti = 'neutral'
else:
print('ERROR emotion&sentiment')
if (speaker not in temp_speakerList):
temp_speakerList.append(speaker)
speakerCLS = temp_speakerList.index(speaker)
context_speaker.append(speakerCLS)
self.dialogs.append([context_speaker[:], context[:], emodict[emo], senti])
self.emoSet.add(emodict[emo])
self.emoList = sorted(self.emoSet)
self.sentiList = sorted(self.sentiSet)
if (dataclass == 'emotion'):
self.labelList = self.emoList
else:
self.labelList = self.sentiList
self.speakerNum.append(len(temp_speakerList))
def __len__(self):
return len(self.dialogs)
def __getitem__(self, idx):
return (self.dialogs[idx], self.labelList, self.sentidict)
|
class DD_loader(Dataset):
def __init__(self, txt_file, dataclass):
self.dialogs = []
f = open(txt_file, 'r')
dataset = f.readlines()
f.close()
temp_speakerList = []
context = []
context_speaker = []
self.speakerNum = []
self.emoSet = set()
self.sentiSet = set()
pos = ['happiness']
neg = ['anger', 'disgust', 'fear', 'sadness']
neu = ['neutral', 'surprise']
emodict = {'anger': 'anger', 'disgust': 'disgust', 'fear': 'fear', 'happiness': 'happy', 'neutral': 'neutral', 'sadness': 'sad', 'surprise': 'surprise'}
self.sentidict = {'positive': pos, 'negative': neg, 'neutral': neu}
for (i, data) in enumerate(dataset):
if ((data == '\n') and (len(self.dialogs) > 0)):
self.speakerNum.append(len(temp_speakerList))
temp_speakerList = []
context = []
context_speaker = []
continue
speaker = data.strip().split('\t')[0]
utt = ' '.join(data.strip().split('\t')[1:(- 1)])
emo = data.strip().split('\t')[(- 1)]
if (emo in pos):
senti = 'positive'
elif (emo in neg):
senti = 'negative'
elif (emo in neu):
senti = 'neutral'
else:
print('ERROR emotion&sentiment')
context.append(utt)
if (speaker not in temp_speakerList):
temp_speakerList.append(speaker)
speakerCLS = temp_speakerList.index(speaker)
context_speaker.append(speakerCLS)
self.dialogs.append([context_speaker[:], context[:], emodict[emo], senti])
self.emoSet.add(emodict[emo])
self.emoList = sorted(self.emoSet)
self.sentiList = sorted(self.sentiSet)
if (dataclass == 'emotion'):
self.labelList = self.emoList
else:
self.labelList = self.sentiList
self.speakerNum.append(len(temp_speakerList))
def __len__(self):
return len(self.dialogs)
def __getitem__(self, idx):
return (self.dialogs[idx], self.labelList, self.sentidict)
|
def CELoss(pred_outs, labels):
'\n pred_outs: [batch, clsNum]\n labels: [batch]\n '
loss = nn.CrossEntropyLoss()
loss_val = loss(pred_outs, labels)
return loss_val
|
def main():
'Dataset Loading'
batch_size = args.batch
dataset = args.dataset
dataclass = args.cls
sample = args.sample
context_type = args.context_type
speaker_type = args.speaker_type
freeze = args.freeze
dataType = 'multi'
if (dataset == 'MELD'):
if args.dyadic:
dataType = 'dyadic'
else:
dataType = 'multi'
data_path = (('../dataset/MELD/' + dataType) + '/')
DATA_loader = MELD_loader
elif (dataset == 'EMORY'):
data_path = '../dataset/EMORY/'
DATA_loader = Emory_loader
elif (dataset == 'iemocap'):
data_path = '../dataset/iemocap/'
DATA_loader = IEMOCAP_loader
elif (dataset == 'dailydialog'):
data_path = '../dataset/dailydialog/'
DATA_loader = DD_loader
if ((context_type == 'roberta-large') and (speaker_type == 'bert-large-uncased')):
make_batch = make_batch_roberta_bert
elif ((context_type == 'roberta-large') and ('gpt' in speaker_type)):
make_batch = make_batch_roberta_gpt
elif ((context_type == 'bert-large-uncased') and (speaker_type == 'roberta-large')):
make_batch = make_batch_bert_roberta
else:
print('batch error')
if freeze:
freeze_type = 'freeze'
else:
freeze_type = 'no_freeze'
train_path = ((data_path + dataset) + '_train.txt')
dev_path = ((data_path + dataset) + '_dev.txt')
test_path = ((data_path + dataset) + '_test.txt')
train_dataset = DATA_loader(train_path, dataclass)
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=4, collate_fn=make_batch)
train_sample_num = int((len(train_dataset) * sample))
dev_dataset = DATA_loader(dev_path, dataclass)
dev_dataloader = DataLoader(dev_dataset, batch_size=1, shuffle=False, num_workers=4, collate_fn=make_batch)
test_dataset = DATA_loader(test_path, dataclass)
test_dataloader = DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=4, collate_fn=make_batch)
'logging and path'
save_path = os.path.join((dataset + '_models'), ((context_type + '_') + speaker_type), freeze_type, dataclass)
print('###Save Path### ', save_path)
log_path = os.path.join(save_path, 'train.log')
if (not os.path.exists(save_path)):
os.makedirs(save_path)
fileHandler = logging.FileHandler(log_path)
logger.addHandler(streamHandler)
logger.addHandler(fileHandler)
logger.setLevel(level=logging.DEBUG)
'Model Loading'
print('DataClass: ', dataclass, '!!!')
clsNum = len(train_dataset.labelList)
model = ERC_model(context_type, speaker_type, clsNum, freeze)
model = model.cuda()
model.train()
'Training Setting'
training_epochs = args.epoch
save_term = int((training_epochs / 5))
max_grad_norm = args.norm
lr = args.lr
num_training_steps = (len(train_dataset) * training_epochs)
num_warmup_steps = len(train_dataset)
optimizer = torch.optim.AdamW(model.train_params, lr=lr)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps)
'Input & Label Setting'
(best_dev_fscore, best_test_fscore) = (0, 0)
(best_dev_fscore_macro, best_dev_fscore_micro, best_test_fscore_macro, best_test_fscore_micro) = (0, 0, 0, 0)
best_epoch = 0
for epoch in tqdm(range(training_epochs)):
model.train()
for (i_batch, data) in enumerate(train_dataloader):
if (i_batch > train_sample_num):
print(i_batch, train_sample_num)
break
'Prediction'
(batch_input_tokens, batch_labels, batch_speaker_tokens) = data
(batch_input_tokens, batch_labels) = (batch_input_tokens.cuda(), batch_labels.cuda())
pred_logits = model(batch_input_tokens, batch_speaker_tokens)
'Loss calculation & training'
loss_val = CELoss(pred_logits, batch_labels)
loss_val.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_grad_norm)
optimizer.step()
scheduler.step()
optimizer.zero_grad()
'Dev & Test evaluation'
model.eval()
if (dataset == 'dailydialog'):
(dev_acc, dev_pred_list, dev_label_list) = _CalACC(model, dev_dataloader)
(dev_pre_macro, dev_rec_macro, dev_fbeta_macro, _) = precision_recall_fscore_support(dev_label_list, dev_pred_list, average='macro')
(dev_pre_micro, dev_rec_micro, dev_fbeta_micro, _) = precision_recall_fscore_support(dev_label_list, dev_pred_list, labels=[0, 1, 2, 3, 5, 6], average='micro')
dev_fscore = (dev_fbeta_macro + dev_fbeta_micro)
'Best Score & Model Save'
if (dev_fscore > (best_dev_fscore_macro + best_dev_fscore_micro)):
best_dev_fscore_macro = dev_fbeta_macro
best_dev_fscore_micro = dev_fbeta_micro
(test_acc, test_pred_list, test_label_list) = _CalACC(model, test_dataloader)
(test_pre_macro, test_rec_macro, test_fbeta_macro, _) = precision_recall_fscore_support(test_label_list, test_pred_list, average='macro')
(test_pre_micro, test_rec_micro, test_fbeta_micro, _) = precision_recall_fscore_support(test_label_list, test_pred_list, labels=[0, 1, 2, 3, 5, 6], average='micro')
best_epoch = epoch
_SaveModel(model, save_path)
else:
(dev_acc, dev_pred_list, dev_label_list) = _CalACC(model, dev_dataloader)
(dev_pre, dev_rec, dev_fbeta, _) = precision_recall_fscore_support(dev_label_list, dev_pred_list, average='weighted')
'Best Score & Model Save'
if (dev_fbeta > best_dev_fscore):
best_dev_fscore = dev_fbeta
(test_acc, test_pred_list, test_label_list) = _CalACC(model, test_dataloader)
(test_pre, test_rec, test_fbeta, _) = precision_recall_fscore_support(test_label_list, test_pred_list, average='weighted')
best_epoch = epoch
_SaveModel(model, save_path)
logger.info('Epoch: {}'.format(epoch))
if (dataset == 'dailydialog'):
logger.info('Devleopment ## accuracy: {}, macro-fscore: {}, micro-fscore: {}'.format(dev_acc, dev_fbeta_macro, dev_fbeta_micro))
logger.info('')
else:
logger.info('Devleopment ## accuracy: {}, precision: {}, recall: {}, fscore: {}'.format(dev_acc, dev_pre, dev_rec, dev_fbeta))
logger.info('')
if (dataset == 'dailydialog'):
logger.info('Final Fscore ## test-accuracy: {}, test-macro: {}, test-micro: {}, test_epoch: {}'.format(test_acc, test_fbeta_macro, test_fbeta_micro, best_epoch))
else:
logger.info('Final Fscore ## test-accuracy: {}, test-fscore: {}, test_epoch: {}'.format(test_acc, test_fbeta, best_epoch))
|
def _CalACC(model, dataloader):
model.eval()
correct = 0
label_list = []
pred_list = []
with torch.no_grad():
for (i_batch, data) in enumerate(dataloader):
'Prediction'
(batch_input_tokens, batch_labels, batch_speaker_tokens) = data
(batch_input_tokens, batch_labels) = (batch_input_tokens.cuda(), batch_labels.cuda())
pred_logits = model(batch_input_tokens, batch_speaker_tokens)
'Calculation'
pred_label = pred_logits.argmax(1).item()
true_label = batch_labels.item()
pred_list.append(pred_label)
label_list.append(true_label)
if (pred_label == true_label):
correct += 1
acc = (correct / len(dataloader))
return (acc, pred_list, label_list)
|
def _SaveModel(model, path):
if (not os.path.exists(path)):
os.makedirs(path)
torch.save(model.state_dict(), os.path.join(path, 'model.bin'))
|
def encode_right_truncated(text, tokenizer, max_length=511):
tokenized = tokenizer.tokenize(text)
truncated = tokenized[(- max_length):]
ids = tokenizer.convert_tokens_to_ids(truncated)
return ([tokenizer.cls_token_id] + ids)
|
def padding(ids_list, tokenizer):
max_len = 0
for ids in ids_list:
if (len(ids) > max_len):
max_len = len(ids)
pad_ids = []
for ids in ids_list:
pad_len = (max_len - len(ids))
add_ids = [tokenizer.pad_token_id for _ in range(pad_len)]
pad_ids.append((ids + add_ids))
return torch.tensor(pad_ids)
|
def encode_right_truncated_gpt(text, tokenizer, max_length=511):
tokenized = tokenizer.tokenize(text)
truncated = tokenized[(- max_length):]
ids = tokenizer.convert_tokens_to_ids(truncated)
return (ids + [tokenizer.cls_token_id])
|
def padding_gpt(ids_list, tokenizer):
max_len = 0
for ids in ids_list:
if (len(ids) > max_len):
max_len = len(ids)
pad_ids = []
for ids in ids_list:
pad_len = (max_len - len(ids))
add_ids = [tokenizer.pad_token_id for _ in range(pad_len)]
pad_ids.append((add_ids + ids))
return torch.tensor(pad_ids)
|
def make_batch_roberta(sessions):
(batch_input, batch_labels, batch_speaker_tokens) = ([], [], [])
for session in sessions:
data = session[0]
label_list = session[1]
(context_speaker, context, emotion, sentiment) = data
now_speaker = context_speaker[(- 1)]
speaker_utt_list = []
inputString = ''
for (turn, (speaker, utt)) in enumerate(zip(context_speaker, context)):
inputString += (('<s' + str((speaker + 1))) + '> ')
inputString += (utt + ' ')
if ((turn < (len(context_speaker) - 1)) and (speaker == now_speaker)):
speaker_utt_list.append(encode_right_truncated(utt, roberta_tokenizer))
concat_string = inputString.strip()
batch_input.append(encode_right_truncated(concat_string, roberta_tokenizer))
if (len(label_list) > 3):
label_ind = label_list.index(emotion)
else:
label_ind = label_list.index(sentiment)
batch_labels.append(label_ind)
batch_speaker_tokens.append(padding(speaker_utt_list, roberta_tokenizer))
batch_input_tokens = padding(batch_input, roberta_tokenizer)
batch_labels = torch.tensor(batch_labels)
return (batch_input_tokens, batch_labels, batch_speaker_tokens)
|
def make_batch_bert(sessions):
(batch_input, batch_labels, batch_speaker_tokens) = ([], [], [])
for session in sessions:
data = session[0]
label_list = session[1]
(context_speaker, context, emotion, sentiment) = data
now_speaker = context_speaker[(- 1)]
speaker_utt_list = []
inputString = ''
for (turn, (speaker, utt)) in enumerate(zip(context_speaker, context)):
inputString += (('<s' + str((speaker + 1))) + '> ')
inputString += (utt + ' ')
if ((turn < (len(context_speaker) - 1)) and (speaker == now_speaker)):
speaker_utt_list.append(encode_right_truncated(utt, bert_tokenizer))
concat_string = inputString.strip()
batch_input.append(encode_right_truncated(concat_string, bert_tokenizer))
if (len(label_list) > 3):
label_ind = label_list.index(emotion)
else:
label_ind = label_list.index(sentiment)
batch_labels.append(label_ind)
batch_speaker_tokens.append(padding(speaker_utt_list, bert_tokenizer))
batch_input_tokens = padding(batch_input, bert_tokenizer)
batch_labels = torch.tensor(batch_labels)
return (batch_input_tokens, batch_labels, batch_speaker_tokens)
|
def make_batch_gpt(sessions):
(batch_input, batch_labels, batch_speaker_tokens) = ([], [], [])
for session in sessions:
data = session[0]
label_list = session[1]
(context_speaker, context, emotion, sentiment) = data
now_speaker = context_speaker[(- 1)]
speaker_utt_list = []
inputString = ''
for (turn, (speaker, utt)) in enumerate(zip(context_speaker, context)):
inputString += (('<s' + str((speaker + 1))) + '> ')
inputString += (utt + ' ')
if ((turn < (len(context_speaker) - 1)) and (speaker == now_speaker)):
speaker_utt_list.append(encode_right_truncated_gpt(utt, gpt_tokenizer, max_length=511))
concat_string = inputString.strip()
batch_input.append(encode_right_truncated_gpt(concat_string, gpt_tokenizer, max_length=511))
if (len(label_list) > 3):
label_ind = label_list.index(emotion)
else:
label_ind = label_list.index(sentiment)
batch_labels.append(label_ind)
batch_speaker_tokens.append(padding_gpt(speaker_utt_list, gpt_tokenizer))
batch_input_tokens = padding_gpt(batch_input, gpt_tokenizer)
batch_labels = torch.tensor(batch_labels)
return (batch_input_tokens, batch_labels, batch_speaker_tokens)
|
def make_batch_roberta_bert(sessions):
(batch_input, batch_labels, batch_speaker_tokens) = ([], [], [])
for session in sessions:
data = session[0]
label_list = session[1]
(context_speaker, context, emotion, sentiment) = data
now_speaker = context_speaker[(- 1)]
speaker_utt_list = []
inputString = ''
for (turn, (speaker, utt)) in enumerate(zip(context_speaker, context)):
inputString += (('<s' + str((speaker + 1))) + '> ')
inputString += (utt + ' ')
if ((turn < (len(context_speaker) - 1)) and (speaker == now_speaker)):
speaker_utt_list.append(encode_right_truncated(utt, bert_tokenizer, max_length=511))
concat_string = inputString.strip()
batch_input.append(encode_right_truncated(concat_string, roberta_tokenizer, max_length=511))
if (len(label_list) > 3):
label_ind = label_list.index(emotion)
else:
label_ind = label_list.index(sentiment)
batch_labels.append(label_ind)
batch_speaker_tokens.append(padding(speaker_utt_list, bert_tokenizer))
batch_input_tokens = padding(batch_input, roberta_tokenizer)
batch_labels = torch.tensor(batch_labels)
return (batch_input_tokens, batch_labels, batch_speaker_tokens)
|
def make_batch_roberta_gpt(sessions):
(batch_input, batch_labels, batch_speaker_tokens) = ([], [], [])
for session in sessions:
data = session[0]
label_list = session[1]
(context_speaker, context, emotion, sentiment) = data
now_speaker = context_speaker[(- 1)]
speaker_utt_list = []
inputString = ''
for (turn, (speaker, utt)) in enumerate(zip(context_speaker, context)):
inputString += (('<s' + str((speaker + 1))) + '> ')
inputString += (utt + ' ')
if ((turn < (len(context_speaker) - 1)) and (speaker == now_speaker)):
speaker_utt_list.append(encode_right_truncated_gpt(utt, gpt_tokenizer, max_length=511))
concat_string = inputString.strip()
batch_input.append(encode_right_truncated(concat_string, roberta_tokenizer, max_length=511))
if (len(label_list) > 3):
label_ind = label_list.index(emotion)
else:
label_ind = label_list.index(sentiment)
batch_labels.append(label_ind)
batch_speaker_tokens.append(padding_gpt(speaker_utt_list, gpt_tokenizer))
batch_input_tokens = padding(batch_input, roberta_tokenizer)
batch_labels = torch.tensor(batch_labels)
return (batch_input_tokens, batch_labels, batch_speaker_tokens)
|
def make_batch_bert_roberta(sessions):
(batch_input, batch_labels, batch_speaker_tokens) = ([], [], [])
for session in sessions:
data = session[0]
label_list = session[1]
(context_speaker, context, emotion, sentiment) = data
now_speaker = context_speaker[(- 1)]
speaker_utt_list = []
inputString = ''
for (turn, (speaker, utt)) in enumerate(zip(context_speaker, context)):
inputString += (('<s' + str((speaker + 1))) + '> ')
inputString += (utt + ' ')
if ((turn < (len(context_speaker) - 1)) and (speaker == now_speaker)):
speaker_utt_list.append(encode_right_truncated(utt, roberta_tokenizer, max_length=511))
concat_string = inputString.strip()
batch_input.append(encode_right_truncated(concat_string, bert_tokenizer, max_length=511))
if (len(label_list) > 3):
label_ind = label_list.index(emotion)
else:
label_ind = label_list.index(sentiment)
batch_labels.append(label_ind)
batch_speaker_tokens.append(padding(speaker_utt_list, roberta_tokenizer))
batch_input_tokens = padding(batch_input, bert_tokenizer)
batch_labels = torch.tensor(batch_labels)
return (batch_input_tokens, batch_labels, batch_speaker_tokens)
|
class MELD_loader(Dataset):
def __init__(self, txt_file, dataclass):
self.dialogs = []
f = open(txt_file, 'r')
dataset = f.readlines()
f.close()
temp_speakerList = []
context = []
context_speaker = []
self.speakerNum = []
emodict = {'anger': 'anger', 'disgust': 'disgust', 'fear': 'fear', 'joy': 'joy', 'neutral': 'neutral', 'sadness': 'sad', 'surprise': 'surprise'}
self.sentidict = {'positive': ['joy'], 'negative': ['anger', 'disgust', 'fear', 'sadness'], 'neutral': ['neutral', 'surprise']}
self.emoSet = set()
self.sentiSet = set()
for (i, data) in enumerate(dataset):
if (i < 2):
continue
if ((data == '\n') and (len(self.dialogs) > 0)):
self.speakerNum.append(len(temp_speakerList))
temp_speakerList = []
context = []
context_speaker = []
continue
(speaker, utt, emo, senti) = data.strip().split('\t')
context.append(utt)
if (speaker not in temp_speakerList):
temp_speakerList.append(speaker)
speakerCLS = temp_speakerList.index(speaker)
context_speaker.append(speakerCLS)
self.dialogs.append([context_speaker[:], context[:], emodict[emo], senti])
self.emoSet.add(emodict[emo])
self.sentiSet.add(senti)
self.emoList = sorted(self.emoSet)
self.sentiList = sorted(self.sentiSet)
if (dataclass == 'emotion'):
self.labelList = self.emoList
else:
self.labelList = self.sentiList
self.speakerNum.append(len(temp_speakerList))
def __len__(self):
return len(self.dialogs)
def __getitem__(self, idx):
return (self.dialogs[idx], self.labelList, self.sentidict)
|
class Emory_loader(Dataset):
def __init__(self, txt_file, dataclass):
self.dialogs = []
f = open(txt_file, 'r')
dataset = f.readlines()
f.close()
'sentiment'
pos = ['Joyful', 'Peaceful', 'Powerful']
neg = ['Mad', 'Sad', 'Scared']
neu = ['Neutral']
emodict = {'Joyful': 'joy', 'Mad': 'mad', 'Peaceful': 'peaceful', 'Powerful': 'powerful', 'Neutral': 'neutral', 'Sad': 'sad', 'Scared': 'scared'}
self.sentidict = {'positive': pos, 'negative': neg, 'neutral': neu}
temp_speakerList = []
context = []
context_speaker = []
self.speakerNum = []
self.emoSet = set()
self.sentiSet = set()
for (i, data) in enumerate(dataset):
if ((data == '\n') and (len(self.dialogs) > 0)):
self.speakerNum.append(len(temp_speakerList))
temp_speakerList = []
context = []
context_speaker = []
continue
(speaker, utt, emo) = data.strip().split('\t')
context.append(utt)
if (emo in pos):
senti = 'positive'
elif (emo in neg):
senti = 'negative'
elif (emo in neu):
senti = 'neutral'
else:
print('ERROR emotion&sentiment')
if (speaker not in temp_speakerList):
temp_speakerList.append(speaker)
speakerCLS = temp_speakerList.index(speaker)
context_speaker.append(speakerCLS)
self.dialogs.append([context_speaker[:], context[:], emodict[emo], senti])
self.emoSet.add(emodict[emo])
self.sentiSet.add(senti)
self.emoList = sorted(self.emoSet)
self.sentiList = sorted(self.sentiSet)
if (dataclass == 'emotion'):
self.labelList = self.emoList
else:
self.labelList = self.sentiList
self.speakerNum.append(len(temp_speakerList))
def __len__(self):
return len(self.dialogs)
def __getitem__(self, idx):
return (self.dialogs[idx], self.labelList, self.sentidict)
|
class IEMOCAP_loader(Dataset):
def __init__(self, txt_file, dataclass):
self.dialogs = []
f = open(txt_file, 'r')
dataset = f.readlines()
f.close()
temp_speakerList = []
context = []
context_speaker = []
self.speakerNum = []
pos = ['ang', 'exc', 'hap']
neg = ['fru', 'sad']
neu = ['neu']
emodict = {'ang': 'angry', 'exc': 'excited', 'fru': 'frustrated', 'hap': 'happy', 'neu': 'neutral', 'sad': 'sad'}
self.sentidict = {'positive': pos, 'negative': neg, 'neutral': neu}
self.emoSet = set()
self.sentiSet = set()
for (i, data) in enumerate(dataset):
if ((data == '\n') and (len(self.dialogs) > 0)):
self.speakerNum.append(len(temp_speakerList))
temp_speakerList = []
context = []
context_speaker = []
continue
speaker = data.strip().split('\t')[0]
utt = ' '.join(data.strip().split('\t')[1:(- 1)])
emo = data.strip().split('\t')[(- 1)]
context.append(utt)
if (emo in pos):
senti = 'positive'
elif (emo in neg):
senti = 'negative'
elif (emo in neu):
senti = 'neutral'
else:
print('ERROR emotion&sentiment')
if (speaker not in temp_speakerList):
temp_speakerList.append(speaker)
speakerCLS = temp_speakerList.index(speaker)
context_speaker.append(speakerCLS)
self.dialogs.append([context_speaker[:], context[:], emodict[emo], senti])
self.emoSet.add(emodict[emo])
self.emoList = sorted(self.emoSet)
self.sentiList = sorted(self.sentiSet)
if (dataclass == 'emotion'):
self.labelList = self.emoList
else:
self.labelList = self.sentiList
self.speakerNum.append(len(temp_speakerList))
def __len__(self):
return len(self.dialogs)
def __getitem__(self, idx):
return (self.dialogs[idx], self.labelList, self.sentidict)
|
class DD_loader(Dataset):
def __init__(self, txt_file, dataclass):
self.dialogs = []
f = open(txt_file, 'r')
dataset = f.readlines()
f.close()
temp_speakerList = []
context = []
context_speaker = []
self.speakerNum = []
self.emoSet = set()
self.sentiSet = set()
pos = ['happiness']
neg = ['anger', 'disgust', 'fear', 'sadness']
neu = ['neutral', 'surprise']
emodict = {'anger': 'anger', 'disgust': 'disgust', 'fear': 'fear', 'happiness': 'happy', 'neutral': 'neutral', 'sadness': 'sad', 'surprise': 'surprise'}
self.sentidict = {'positive': pos, 'negative': neg, 'neutral': neu}
for (i, data) in enumerate(dataset):
if ((data == '\n') and (len(self.dialogs) > 0)):
self.speakerNum.append(len(temp_speakerList))
temp_speakerList = []
context = []
context_speaker = []
continue
speaker = data.strip().split('\t')[0]
utt = ' '.join(data.strip().split('\t')[1:(- 1)])
emo = data.strip().split('\t')[(- 1)]
if (emo in pos):
senti = 'positive'
elif (emo in neg):
senti = 'negative'
elif (emo in neu):
senti = 'neutral'
else:
print('ERROR emotion&sentiment')
context.append(utt)
if (speaker not in temp_speakerList):
temp_speakerList.append(speaker)
speakerCLS = temp_speakerList.index(speaker)
context_speaker.append(speakerCLS)
self.dialogs.append([context_speaker[:], context[:], emodict[emo], senti])
self.emoSet.add(emodict[emo])
self.emoList = sorted(self.emoSet)
self.sentiList = sorted(self.sentiSet)
if (dataclass == 'emotion'):
self.labelList = self.emoList
else:
self.labelList = self.sentiList
self.speakerNum.append(len(temp_speakerList))
def __len__(self):
return len(self.dialogs)
def __getitem__(self, idx):
return (self.dialogs[idx], self.labelList, self.sentidict)
|
class MELD_loader(Dataset):
def __init__(self, txt_file, dataclass):
self.dialogs = []
f = open(txt_file, 'r')
dataset = f.readlines()
f.close()
temp_speakerList = []
self.speakerNum = []
emodict = {'anger': 'anger', 'disgust': 'disgust', 'fear': 'fear', 'joy': 'joy', 'neutral': 'neutral', 'sadness': 'sad', 'surprise': 'surprise'}
self.sentidict = {'positive': ['joy'], 'negative': ['anger', 'disgust', 'fear', 'sadness'], 'neutral': ['neutral', 'surprise']}
self.emoSet = set()
self.sentiSet = set()
for (i, data) in enumerate(dataset):
if (i < 2):
continue
if ((data == '\n') and (len(self.dialogs) > 0)):
continue
(speaker, utt, emo, senti) = data.strip().split('\t')
self.dialogs.append([utt, emodict[emo], senti])
self.emoSet.add(emodict[emo])
self.sentiSet.add(senti)
self.emoList = sorted(self.emoSet)
self.sentiList = sorted(self.sentiSet)
if (dataclass == 'emotion'):
self.labelList = self.emoList
else:
self.labelList = self.sentiList
self.speakerNum.append(len(temp_speakerList))
def __len__(self):
return len(self.dialogs)
def __getitem__(self, idx):
return (self.dialogs[idx], self.labelList, self.sentidict)
|
class Emory_loader(Dataset):
def __init__(self, txt_file, dataclass):
self.dialogs = []
f = open(txt_file, 'r')
dataset = f.readlines()
f.close()
'sentiment'
pos = ['Joyful', 'Peaceful', 'Powerful']
neg = ['Mad', 'Sad', 'Scared']
neu = ['Neutral']
emodict = {'Joyful': 'joy', 'Mad': 'mad', 'Peaceful': 'peaceful', 'Powerful': 'powerful', 'Neutral': 'neutral', 'Sad': 'sad', 'Scared': 'scared'}
self.sentidict = {'positive': pos, 'negative': neg, 'neutral': neu}
self.emoSet = set()
self.sentiSet = set()
for (i, data) in enumerate(dataset):
if ((data == '\n') and (len(self.dialogs) > 0)):
continue
(speaker, utt, emo) = data.strip().split('\t')
if (emo in pos):
senti = 'positive'
elif (emo in neg):
senti = 'negative'
elif (emo in neu):
senti = 'neutral'
else:
print('ERROR emotion&sentiment')
self.dialogs.append([utt, emodict[emo], senti])
self.emoSet.add(emodict[emo])
self.sentiSet.add(senti)
self.emoList = sorted(self.emoSet)
self.sentiList = sorted(self.sentiSet)
if (dataclass == 'emotion'):
self.labelList = self.emoList
else:
self.labelList = self.sentiList
def __len__(self):
return len(self.dialogs)
def __getitem__(self, idx):
return (self.dialogs[idx], self.labelList, self.sentidict)
|
class IEMOCAP_loader(Dataset):
def __init__(self, txt_file, dataclass):
self.dialogs = []
f = open(txt_file, 'r')
dataset = f.readlines()
f.close()
self.speakerNum = []
pos = ['exc', 'hap']
neg = ['ang', 'fru', 'sad']
neu = ['neu']
emodict = {'ang': 'angry', 'exc': 'excited', 'fru': 'frustrated', 'hap': 'happy', 'neu': 'neutral', 'sad': 'sad'}
self.sentidict = {'positive': pos, 'negative': neg, 'neutral': neu}
self.emoSet = set()
self.sentiSet = set()
for (i, data) in enumerate(dataset):
if ((data == '\n') and (len(self.dialogs) > 0)):
continue
speaker = data.strip().split('\t')[0]
utt = ' '.join(data.strip().split('\t')[1:(- 1)])
emo = data.strip().split('\t')[(- 1)]
if (emo in pos):
senti = 'positive'
elif (emo in neg):
senti = 'negative'
elif (emo in neu):
senti = 'neutral'
else:
print('ERROR emotion&sentiment')
self.dialogs.append([utt, emodict[emo], senti])
self.emoSet.add(emodict[emo])
self.emoList = sorted(self.emoSet)
self.sentiList = sorted(self.sentiSet)
if (dataclass == 'emotion'):
self.labelList = self.emoList
else:
self.labelList = self.sentiList
def __len__(self):
return len(self.dialogs)
def __getitem__(self, idx):
return (self.dialogs[idx], self.labelList, self.sentidict)
|
class DD_loader(Dataset):
def __init__(self, txt_file, dataclass):
self.dialogs = []
f = open(txt_file, 'r')
dataset = f.readlines()
f.close()
self.speakerNum = []
self.emoSet = set()
self.sentiSet = set()
pos = ['happiness']
neg = ['anger', 'disgust', 'fear', 'sadness']
neu = ['neutral', 'surprise']
emodict = {'anger': 'anger', 'disgust': 'disgust', 'fear': 'fear', 'happiness': 'happy', 'neutral': 'neutral', 'sadness': 'sad', 'surprise': 'surprise'}
self.sentidict = {'positive': pos, 'negative': neg, 'neutral': neu}
for (i, data) in enumerate(dataset):
if ((data == '\n') and (len(self.dialogs) > 0)):
continue
speaker = data.strip().split('\t')[0]
utt = ' '.join(data.strip().split('\t')[1:(- 1)])
emo = data.strip().split('\t')[(- 1)]
if (emo in pos):
senti = 'positive'
elif (emo in neg):
senti = 'negative'
elif (emo in neu):
senti = 'neutral'
else:
print('ERROR emotion&sentiment')
self.dialogs.append([utt, emodict[emo], senti])
self.emoSet.add(emodict[emo])
self.emoList = sorted(self.emoSet)
self.sentiList = sorted(self.sentiSet)
if (dataclass == 'emotion'):
self.labelList = self.emoList
else:
self.labelList = self.sentiList
def __len__(self):
return len(self.dialogs)
def __getitem__(self, idx):
return (self.dialogs[idx], self.labelList, self.sentidict)
|
def CELoss(pred_outs, labels):
'\n pred_outs: [batch, clsNum]\n labels: [batch]\n '
loss = nn.CrossEntropyLoss()
loss_val = loss(pred_outs, labels)
return loss_val
|
def main():
'Dataset Loading'
batch_size = args.batch
dataset = args.dataset
dataclass = args.cls
sample = args.sample
model_type = args.pretrained
dataType = 'multi'
if (dataset == 'MELD'):
if args.dyadic:
dataType = 'dyadic'
else:
dataType = 'multi'
data_path = (('../dataset/MELD/' + dataType) + '/')
DATA_loader = MELD_loader
elif (dataset == 'EMORY'):
data_path = '../dataset/EMORY/'
DATA_loader = Emory_loader
elif (dataset == 'iemocap'):
data_path = '../dataset/iemocap/'
DATA_loader = IEMOCAP_loader
elif (dataset == 'dailydialog'):
data_path = '../dataset/dailydialog/'
DATA_loader = DD_loader
if (model_type == 'roberta-large'):
make_batch = make_batch_roberta
elif (model_type == 'bert-large-uncased'):
make_batch = make_batch_bert
else:
make_batch = make_batch_gpt
train_path = ((data_path + dataset) + '_train.txt')
dev_path = ((data_path + dataset) + '_dev.txt')
test_path = ((data_path + dataset) + '_test.txt')
train_dataset = DATA_loader(train_path, dataclass)
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=4, collate_fn=make_batch)
train_sample_num = int((len(train_dataset) * sample))
dev_dataset = DATA_loader(dev_path, dataclass)
dev_dataloader = DataLoader(dev_dataset, batch_size=1, shuffle=False, num_workers=4, collate_fn=make_batch)
test_dataset = DATA_loader(test_path, dataclass)
test_dataloader = DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=4, collate_fn=make_batch)
'logging and path'
save_path = os.path.join((dataset + '_models'), model_type, dataclass)
print('###Save Path### ', save_path)
log_path = os.path.join(save_path, 'train.log')
if (not os.path.exists(save_path)):
os.makedirs(save_path)
fileHandler = logging.FileHandler(log_path)
logger.addHandler(streamHandler)
logger.addHandler(fileHandler)
logger.setLevel(level=logging.DEBUG)
'Model Loading'
if ('gpt2' in model_type):
last = True
else:
last = False
print('DataClass: ', dataclass, '!!!')
clsNum = len(train_dataset.labelList)
model = ERC_model(model_type, clsNum, last)
model = model.cuda()
model.train()
'Training Setting'
training_epochs = args.epoch
save_term = int((training_epochs / 5))
max_grad_norm = args.norm
lr = args.lr
num_training_steps = (len(train_dataset) * training_epochs)
num_warmup_steps = len(train_dataset)
optimizer = torch.optim.AdamW(model.parameters(), lr=lr)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps)
'Input & Label Setting'
(best_dev_fscore, best_test_fscore) = (0, 0)
(best_dev_fscore_macro, best_dev_fscore_micro, best_test_fscore_macro, best_test_fscore_micro) = (0, 0, 0, 0)
best_epoch = 0
for epoch in tqdm(range(training_epochs)):
model.train()
for (i_batch, data) in enumerate(train_dataloader):
if (i_batch > train_sample_num):
print(i_batch, train_sample_num)
break
'Prediction'
(batch_input_tokens, batch_labels) = data
(batch_input_tokens, batch_labels) = (batch_input_tokens.cuda(), batch_labels.cuda())
pred_logits = model(batch_input_tokens)
'Loss calculation & training'
loss_val = CELoss(pred_logits, batch_labels)
loss_val.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_grad_norm)
optimizer.step()
scheduler.step()
optimizer.zero_grad()
'Dev & Test evaluation'
model.eval()
if (dataset == 'dailydialog'):
(dev_acc, dev_pred_list, dev_label_list) = _CalACC(model, dev_dataloader)
(dev_pre_macro, dev_rec_macro, dev_fbeta_macro, _) = precision_recall_fscore_support(dev_label_list, dev_pred_list, average='macro')
(dev_pre_micro, dev_rec_micro, dev_fbeta_micro, _) = precision_recall_fscore_support(dev_label_list, dev_pred_list, labels=[0, 1, 2, 3, 5, 6], average='micro')
dev_fscore = (dev_fbeta_macro + dev_fbeta_micro)
'Best Score & Model Save'
if (dev_fscore > (best_dev_fscore_macro + best_dev_fscore_micro)):
best_dev_fscore_macro = dev_fbeta_macro
best_dev_fscore_micro = dev_fbeta_micro
(test_acc, test_pred_list, test_label_list) = _CalACC(model, test_dataloader)
(test_pre_macro, test_rec_macro, test_fbeta_macro, _) = precision_recall_fscore_support(test_label_list, test_pred_list, average='macro')
(test_pre_micro, test_rec_micro, test_fbeta_micro, _) = precision_recall_fscore_support(test_label_list, test_pred_list, labels=[0, 1, 2, 3, 5, 6], average='micro')
best_epoch = epoch
_SaveModel(model, save_path)
else:
(dev_acc, dev_pred_list, dev_label_list) = _CalACC(model, dev_dataloader)
(dev_pre, dev_rec, dev_fbeta, _) = precision_recall_fscore_support(dev_label_list, dev_pred_list, average='weighted')
'Best Score & Model Save'
if (dev_fbeta > best_dev_fscore):
best_dev_fscore = dev_fbeta
(test_acc, test_pred_list, test_label_list) = _CalACC(model, test_dataloader)
(test_pre, test_rec, test_fbeta, _) = precision_recall_fscore_support(test_label_list, test_pred_list, average='weighted')
best_epoch = epoch
_SaveModel(model, save_path)
logger.info('Epoch: {}'.format(epoch))
if (dataset == 'dailydialog'):
logger.info('Devleopment ## accuracy: {}, macro-fscore: {}, micro-fscore: {}'.format(dev_acc, dev_fbeta_macro, dev_fbeta_micro))
logger.info('')
else:
logger.info('Devleopment ## accuracy: {}, precision: {}, recall: {}, fscore: {}'.format(dev_acc, dev_pre, dev_rec, dev_fbeta))
logger.info('')
if (dataset == 'dailydialog'):
logger.info('Final Fscore ## test-accuracy: {}, test-macro: {}, test-micro: {}, test_epoch: {}'.format(test_acc, test_fbeta_macro, test_fbeta_micro, best_epoch))
else:
logger.info('Final Fscore ## test-accuracy: {}, test-fscore: {}, test_epoch: {}'.format(test_acc, test_fbeta, best_epoch))
|
def _CalACC(model, dataloader):
model.eval()
correct = 0
label_list = []
pred_list = []
with torch.no_grad():
for (i_batch, data) in enumerate(dataloader):
'Prediction'
(batch_input_tokens, batch_labels) = data
(batch_input_tokens, batch_labels) = (batch_input_tokens.cuda(), batch_labels.cuda())
pred_logits = model(batch_input_tokens)
'Calculation'
pred_label = pred_logits.argmax(1).item()
true_label = batch_labels.item()
pred_list.append(pred_label)
label_list.append(true_label)
if (pred_label == true_label):
correct += 1
acc = (correct / len(dataloader))
return (acc, pred_list, label_list)
|
def _SaveModel(model, path):
if (not os.path.exists(path)):
os.makedirs(path)
torch.save(model.state_dict(), os.path.join(path, 'model.bin'))
|
def encode_right_truncated(text, tokenizer, max_length=511):
tokenized = tokenizer.tokenize(text)
truncated = tokenized[(- max_length):]
ids = tokenizer.convert_tokens_to_ids(truncated)
return ([tokenizer.cls_token_id] + ids)
|
def padding(ids_list, tokenizer):
max_len = 0
for ids in ids_list:
if (len(ids) > max_len):
max_len = len(ids)
pad_ids = []
for ids in ids_list:
pad_len = (max_len - len(ids))
add_ids = [tokenizer.pad_token_id for _ in range(pad_len)]
pad_ids.append((ids + add_ids))
return torch.tensor(pad_ids)
|
def encode_right_truncated_gpt(text, tokenizer, max_length=511):
tokenized = tokenizer.tokenize(text)
truncated = tokenized[(- max_length):]
ids = tokenizer.convert_tokens_to_ids(truncated)
return (ids + [tokenizer.cls_token_id])
|
def padding_gpt(ids_list, tokenizer):
max_len = 0
for ids in ids_list:
if (len(ids) > max_len):
max_len = len(ids)
pad_ids = []
for ids in ids_list:
pad_len = (max_len - len(ids))
add_ids = [tokenizer.pad_token_id for _ in range(pad_len)]
pad_ids.append((add_ids + ids))
return torch.tensor(pad_ids)
|
def make_batch_roberta(sessions):
(batch_input, batch_labels) = ([], [])
for session in sessions:
data = session[0]
label_list = session[1]
(utt, emotion, sentiment) = data
batch_input.append(encode_right_truncated(utt.strip(), roberta_tokenizer))
if (len(label_list) > 3):
label_ind = label_list.index(emotion)
else:
label_ind = label_list.index(sentiment)
batch_labels.append(label_ind)
batch_input_tokens = padding(batch_input, roberta_tokenizer)
batch_labels = torch.tensor(batch_labels)
return (batch_input_tokens, batch_labels)
|
def make_batch_bert(sessions):
(batch_input, batch_labels) = ([], [])
for session in sessions:
data = session[0]
label_list = session[1]
(utt, emotion, sentiment) = data
batch_input.append(encode_right_truncated(utt.strip(), bert_tokenizer))
if (len(label_list) > 3):
label_ind = label_list.index(emotion)
else:
label_ind = label_list.index(sentiment)
batch_labels.append(label_ind)
batch_input_tokens = padding(batch_input, roberta_tokenizer)
batch_labels = torch.tensor(batch_labels)
return (batch_input_tokens, batch_labels)
|
def make_batch_gpt(sessions):
(batch_input, batch_labels) = ([], [])
for session in sessions:
data = session[0]
label_list = session[1]
(utt, emotion, sentiment) = data
batch_input.append(encode_right_truncated_gpt(utt.strip(), gpt_tokenizer, max_length=511))
if (len(label_list) > 3):
label_ind = label_list.index(emotion)
else:
label_ind = label_list.index(sentiment)
batch_labels.append(label_ind)
batch_input_tokens = padding(batch_input, roberta_tokenizer)
batch_labels = torch.tensor(batch_labels)
return (batch_input_tokens, batch_labels)
|
class MELD_loader(Dataset):
def __init__(self, txt_file, dataclass):
self.dialogs = []
f = open(txt_file, 'r')
dataset = f.readlines()
f.close()
temp_speakerList = []
self.speakerNum = []
emodict = {'anger': 'anger', 'disgust': 'disgust', 'fear': 'fear', 'joy': 'joy', 'neutral': 'neutral', 'sadness': 'sad', 'surprise': 'surprise'}
self.sentidict = {'positive': ['joy'], 'negative': ['anger', 'disgust', 'fear', 'sadness'], 'neutral': ['neutral', 'surprise']}
self.emoSet = set()
self.sentiSet = set()
for (i, data) in enumerate(dataset):
if (i < 2):
continue
if ((data == '\n') and (len(self.dialogs) > 0)):
continue
(speaker, utt, emo, senti) = data.strip().split('\t')
self.dialogs.append([utt, emodict[emo], senti])
self.emoSet.add(emodict[emo])
self.sentiSet.add(senti)
self.emoList = sorted(self.emoSet)
self.sentiList = sorted(self.sentiSet)
if (dataclass == 'emotion'):
self.labelList = self.emoList
else:
self.labelList = self.sentiList
self.speakerNum.append(len(temp_speakerList))
def __len__(self):
return len(self.dialogs)
def __getitem__(self, idx):
return (self.dialogs[idx], self.labelList, self.sentidict)
|
class Emory_loader(Dataset):
def __init__(self, txt_file, dataclass):
self.dialogs = []
f = open(txt_file, 'r')
dataset = f.readlines()
f.close()
'sentiment'
pos = ['Joyful', 'Peaceful', 'Powerful']
neg = ['Mad', 'Sad', 'Scared']
neu = ['Neutral']
emodict = {'Joyful': 'joy', 'Mad': 'mad', 'Peaceful': 'peaceful', 'Powerful': 'powerful', 'Neutral': 'neutral', 'Sad': 'sad', 'Scared': 'scared'}
self.sentidict = {'positive': pos, 'negative': neg, 'neutral': neu}
self.emoSet = set()
self.sentiSet = set()
for (i, data) in enumerate(dataset):
if ((data == '\n') and (len(self.dialogs) > 0)):
continue
(speaker, utt, emo) = data.strip().split('\t')
if (emo in pos):
senti = 'positive'
elif (emo in neg):
senti = 'negative'
elif (emo in neu):
senti = 'neutral'
else:
print('ERROR emotion&sentiment')
self.dialogs.append([utt, emodict[emo], senti])
self.emoSet.add(emodict[emo])
self.sentiSet.add(senti)
self.emoList = sorted(self.emoSet)
self.sentiList = sorted(self.sentiSet)
if (dataclass == 'emotion'):
self.labelList = self.emoList
else:
self.labelList = self.sentiList
def __len__(self):
return len(self.dialogs)
def __getitem__(self, idx):
return (self.dialogs[idx], self.labelList, self.sentidict)
|
class IEMOCAP_loader(Dataset):
def __init__(self, txt_file, dataclass):
self.dialogs = []
f = open(txt_file, 'r')
dataset = f.readlines()
f.close()
self.speakerNum = []
pos = ['exc', 'hap']
neg = ['ang', 'fru', 'sad']
neu = ['neu']
emodict = {'ang': 'angry', 'exc': 'excited', 'fru': 'frustrated', 'hap': 'happy', 'neu': 'neutral', 'sad': 'sad'}
self.sentidict = {'positive': pos, 'negative': neg, 'neutral': neu}
self.emoSet = set()
self.sentiSet = set()
for (i, data) in enumerate(dataset):
if ((data == '\n') and (len(self.dialogs) > 0)):
continue
speaker = data.strip().split('\t')[0]
utt = ' '.join(data.strip().split('\t')[1:(- 1)])
emo = data.strip().split('\t')[(- 1)]
if (emo in pos):
senti = 'positive'
elif (emo in neg):
senti = 'negative'
elif (emo in neu):
senti = 'neutral'
else:
print('ERROR emotion&sentiment')
self.dialogs.append([utt, emodict[emo], senti])
self.emoSet.add(emodict[emo])
self.emoList = sorted(self.emoSet)
self.sentiList = sorted(self.sentiSet)
if (dataclass == 'emotion'):
self.labelList = self.emoList
else:
self.labelList = self.sentiList
def __len__(self):
return len(self.dialogs)
def __getitem__(self, idx):
return (self.dialogs[idx], self.labelList, self.sentidict)
|
class DD_loader(Dataset):
def __init__(self, txt_file, dataclass):
self.dialogs = []
f = open(txt_file, 'r')
dataset = f.readlines()
f.close()
self.speakerNum = []
self.emoSet = set()
self.sentiSet = set()
pos = ['happiness']
neg = ['anger', 'disgust', 'fear', 'sadness']
neu = ['neutral', 'surprise']
emodict = {'anger': 'anger', 'disgust': 'disgust', 'fear': 'fear', 'happiness': 'happy', 'neutral': 'neutral', 'sadness': 'sad', 'surprise': 'surprise'}
self.sentidict = {'positive': pos, 'negative': neg, 'neutral': neu}
for (i, data) in enumerate(dataset):
if ((data == '\n') and (len(self.dialogs) > 0)):
continue
speaker = data.strip().split('\t')[0]
utt = ' '.join(data.strip().split('\t')[1:(- 1)])
emo = data.strip().split('\t')[(- 1)]
if (emo in pos):
senti = 'positive'
elif (emo in neg):
senti = 'negative'
elif (emo in neu):
senti = 'neutral'
else:
print('ERROR emotion&sentiment')
self.dialogs.append([utt, emodict[emo], senti])
self.emoSet.add(emodict[emo])
self.emoList = sorted(self.emoSet)
self.sentiList = sorted(self.sentiSet)
if (dataclass == 'emotion'):
self.labelList = self.emoList
else:
self.labelList = self.sentiList
def __len__(self):
return len(self.dialogs)
def __getitem__(self, idx):
return (self.dialogs[idx], self.labelList, self.sentidict)
|
def CELoss(pred_outs, labels):
'\n pred_outs: [batch, clsNum]\n labels: [batch]\n '
loss = nn.CrossEntropyLoss()
loss_val = loss(pred_outs, labels)
return loss_val
|
def main():
'Dataset Loading'
batch_size = args.batch
dataset = args.dataset
dataclass = args.cls
sample = args.sample
model_type = args.pretrained
dataType = 'multi'
if (dataset == 'MELD'):
if args.dyadic:
dataType = 'dyadic'
else:
dataType = 'multi'
data_path = (('../dataset/MELD/' + dataType) + '/')
DATA_loader = MELD_loader
elif (dataset == 'EMORY'):
data_path = '../dataset/EMORY/'
DATA_loader = Emory_loader
elif (dataset == 'iemocap'):
data_path = '../dataset/iemocap/'
DATA_loader = IEMOCAP_loader
elif (dataset == 'dailydialog'):
data_path = '../dataset/dailydialog/'
DATA_loader = DD_loader
if (model_type == 'roberta-large'):
make_batch = make_batch_roberta
elif (model_type == 'bert-large-uncased'):
make_batch = make_batch_bert
else:
make_batch = make_batch_gpt
train_path = ((data_path + dataset) + '_train.txt')
dev_path = ((data_path + dataset) + '_dev.txt')
test_path = ((data_path + dataset) + '_test.txt')
train_dataset = DATA_loader(train_path, dataclass)
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=4, collate_fn=make_batch)
train_sample_num = int((len(train_dataset) * sample))
dev_dataset = DATA_loader(dev_path, dataclass)
dev_dataloader = DataLoader(dev_dataset, batch_size=1, shuffle=False, num_workers=4, collate_fn=make_batch)
test_dataset = DATA_loader(test_path, dataclass)
test_dataloader = DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=4, collate_fn=make_batch)
'logging and path'
save_path = os.path.join((dataset + '_models'), model_type, dataclass)
print('###Save Path### ', save_path)
log_path = os.path.join(save_path, 'train.log')
if (not os.path.exists(save_path)):
os.makedirs(save_path)
fileHandler = logging.FileHandler(log_path)
logger.addHandler(streamHandler)
logger.addHandler(fileHandler)
logger.setLevel(level=logging.DEBUG)
'Model Loading'
if ('gpt2' in model_type):
last = True
else:
last = False
print('DataClass: ', dataclass, '!!!')
clsNum = len(train_dataset.labelList)
model = ERC_model(model_type, clsNum, last)
model = model.cuda()
model.train()
'Training Setting'
training_epochs = args.epoch
save_term = int((training_epochs / 5))
max_grad_norm = args.norm
lr = args.lr
num_training_steps = (len(train_dataset) * training_epochs)
num_warmup_steps = len(train_dataset)
optimizer = torch.optim.AdamW(model.parameters(), lr=lr)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps)
'Input & Label Setting'
(best_dev_fscore, best_test_fscore) = (0, 0)
(best_dev_fscore_macro, best_dev_fscore_micro, best_test_fscore_macro, best_test_fscore_micro) = (0, 0, 0, 0)
best_epoch = 0
for epoch in tqdm(range(training_epochs)):
model.train()
for (i_batch, data) in enumerate(train_dataloader):
if (i_batch > train_sample_num):
print(i_batch, train_sample_num)
break
'Prediction'
(batch_input_tokens, batch_labels) = data
(batch_input_tokens, batch_labels) = (batch_input_tokens.cuda(), batch_labels.cuda())
pred_logits = model(batch_input_tokens)
'Loss calculation & training'
loss_val = CELoss(pred_logits, batch_labels)
loss_val.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_grad_norm)
optimizer.step()
scheduler.step()
optimizer.zero_grad()
'Dev & Test evaluation'
model.eval()
if (dataset == 'dailydialog'):
(dev_acc, dev_pred_list, dev_label_list) = _CalACC(model, dev_dataloader)
(dev_pre_macro, dev_rec_macro, dev_fbeta_macro, _) = precision_recall_fscore_support(dev_label_list, dev_pred_list, average='macro')
(dev_pre_micro, dev_rec_micro, dev_fbeta_micro, _) = precision_recall_fscore_support(dev_label_list, dev_pred_list, labels=[0, 1, 2, 3, 5, 6], average='micro')
dev_fscore = (dev_fbeta_macro + dev_fbeta_micro)
'Best Score & Model Save'
if (dev_fscore > (best_dev_fscore_macro + best_dev_fscore_micro)):
best_dev_fscore_macro = dev_fbeta_macro
best_dev_fscore_micro = dev_fbeta_micro
(test_acc, test_pred_list, test_label_list) = _CalACC(model, test_dataloader)
(test_pre_macro, test_rec_macro, test_fbeta_macro, _) = precision_recall_fscore_support(test_label_list, test_pred_list, average='macro')
(test_pre_micro, test_rec_micro, test_fbeta_micro, _) = precision_recall_fscore_support(test_label_list, test_pred_list, labels=[0, 1, 2, 3, 5, 6], average='micro')
best_epoch = epoch
_SaveModel(model, save_path)
else:
(dev_acc, dev_pred_list, dev_label_list) = _CalACC(model, dev_dataloader)
(dev_pre, dev_rec, dev_fbeta, _) = precision_recall_fscore_support(dev_label_list, dev_pred_list, average='weighted')
'Best Score & Model Save'
if (dev_fbeta > best_dev_fscore):
best_dev_fscore = dev_fbeta
(test_acc, test_pred_list, test_label_list) = _CalACC(model, test_dataloader)
(test_pre, test_rec, test_fbeta, _) = precision_recall_fscore_support(test_label_list, test_pred_list, average='weighted')
best_epoch = epoch
_SaveModel(model, save_path)
logger.info('Epoch: {}'.format(epoch))
if (dataset == 'dailydialog'):
logger.info('Devleopment ## accuracy: {}, macro-fscore: {}, micro-fscore: {}'.format(dev_acc, dev_fbeta_macro, dev_fbeta_micro))
logger.info('')
else:
logger.info('Devleopment ## accuracy: {}, precision: {}, recall: {}, fscore: {}'.format(dev_acc, dev_pre, dev_rec, dev_fbeta))
logger.info('')
if (dataset == 'dailydialog'):
logger.info('Final Fscore ## test-accuracy: {}, test-macro: {}, test-micro: {}, test_epoch: {}'.format(test_acc, test_fbeta_macro, test_fbeta_micro, best_epoch))
else:
logger.info('Final Fscore ## test-accuracy: {}, test-fscore: {}, test_epoch: {}'.format(test_acc, test_fbeta, best_epoch))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.