code stringlengths 17 6.64M |
|---|
def main():
f = open('amazon_vocab.json')
token2num = json.load(f)
num2token = {}
for (key, value) in token2num.items():
num2token[value] = key
f.close()
data_path = '/DATA/joosung/sentiment_data/Sentiment-and-Style-Transfer-master/data'
train_amazon_neg_path = (data_path + '/amazon/sentiment.train.0')
train_amazon_neg_open = open(train_amazon_neg_path, 'r')
train_amazon_neg_dataset = train_amazon_neg_open.readlines()
dev_amazon_neg_path = (data_path + '/amazon/sentiment.dev.0')
dev_amazon_neg_open = open(dev_amazon_neg_path, 'r')
dev_amazon_neg_dataset = dev_amazon_neg_open.readlines()
amazon_neg_dataset = (train_amazon_neg_dataset + dev_amazon_neg_dataset)
neg_len = len(amazon_neg_dataset)
train_amazon_neg_open.close()
dev_amazon_neg_open.close()
train_amazon_pos_path = (data_path + '/amazon/sentiment.train.1')
train_amazon_pos_open = open(train_amazon_pos_path, 'r')
train_amazon_pos_dataset = train_amazon_pos_open.readlines()
dev_amazon_pos_path = (data_path + '/amazon/sentiment.dev.1')
dev_amazon_pos_open = open(dev_amazon_pos_path, 'r')
dev_amazon_pos_dataset = dev_amazon_pos_open.readlines()
amazon_pos_dataset = (train_amazon_pos_dataset + dev_amazon_pos_dataset)
pos_len = len(amazon_pos_dataset)
train_amazon_pos_open.close()
dev_amazon_pos_open.close()
'training parameter'
aed_initial_lr = 1e-05
gen_initial_lr = 0.001
aed_trainer = optim.Adamax(genmodel.aed_params, lr=aed_initial_lr)
gen_trainer = optim.Adamax(genmodel.aed_params, lr=gen_initial_lr)
max_grad_norm = 10
batch = 1
epoch = 6
epoch_len = max(pos_len, neg_len)
stop_point = (epoch_len * epoch)
pre_epoch = 0
for start in tqdm(range(0, stop_point)):
now_epoch = ((start + 1) // pos_len)
'data start point'
neg_start = (start % neg_len)
pos_start = (start % pos_len)
'data setting'
neg_sentence = amazon_neg_dataset[neg_start].strip()
pos_sentence = amazon_pos_dataset[pos_start].strip()
neg_labels = []
neg_labels.append([1, 0])
neg_attribute = torch.from_numpy(np.asarray(neg_labels)).type(torch.FloatTensor).cuda()
pos_labels = []
pos_labels.append([0, 1])
pos_attribute = torch.from_numpy(np.asarray(pos_labels)).type(torch.FloatTensor).cuda()
sentences = [neg_sentence, pos_sentence]
attributes = [neg_attribute, pos_attribute]
sentiments = [0, 1]
'data input'
for i in range(2):
sentence = sentences[i]
attribute = attributes[i]
fake_attribute = attributes[abs((1 - i))]
token_idx = torch.tensor(gpt_tokenizer.encode(sentence)).unsqueeze(0).cuda()
max_len = int((token_idx.shape[1] / 2))
dis_out = dismodel.discriminator(token_idx)
sentiment = dis_out.argmax(1).cpu().item()
del_idx = token_idx
for k in range(max_len):
del_idx = dismodel.att_prob(del_idx, sentiment)
dis_out = dismodel.discriminator(del_idx)
sent_porb = F.softmax(dis_out, 1).squeeze(0)[sentiment].cpu().detach().numpy().item()
if (sent_porb < 0.7):
break
'auto-encoder loss & traning'
enc_out = genmodel.encoder(del_idx)
(dec_out, vocab_out) = genmodel.decoder(enc_out, token_idx, attribute)
recon_loss = genmodel.recon_loss(token_idx, vocab_out)
summary.add_scalar('reconstruction loss', recon_loss.item(), start)
aed_trainer.zero_grad()
recon_loss.backward(retain_graph=True)
grad_norm = torch.nn.utils.clip_grad_norm_(genmodel.aed_params, max_grad_norm)
aed_trainer.step()
'decoder classification loss & training'
gen_cls_out = dismodel.gen_discriminator(vocab_out)
gen_cls_loss = genmodel.cls_loss(attribute, gen_cls_out)
summary.add_scalar('generated sentence loss', gen_cls_loss.item(), start)
gen_trainer.zero_grad()
gen_cls_loss.backward()
grad_norm = torch.nn.utils.clip_grad_norm_(genmodel.aed_params, max_grad_norm)
gen_trainer.step()
'savining point'
if (((start + 1) % epoch_len) == 0):
random.shuffle(amazon_neg_dataset)
random.shuffle(amazon_pos_dataset)
save_model(((start + 1) // pos_len))
save_model('final')
|
def save_model(iter):
if (not os.path.exists('models/')):
os.makedirs('models/')
torch.save(genmodel.state_dict(), 'models/gen_model_{}'.format(iter))
|
def main():
f = open('amazon_vocab.json')
token2num = json.load(f)
num2token = {}
for (key, value) in token2num.items():
num2token[value] = key
f.close()
data_path = '/DATA/joosung/sentiment_data/Sentiment-and-Style-Transfer-master/data'
train_amazon_neg_path = (data_path + '/amazon/sentiment.train.0')
train_amazon_neg_open = open(train_amazon_neg_path, 'r')
train_amazon_neg_dataset = train_amazon_neg_open.readlines()
dev_amazon_neg_path = (data_path + '/amazon/sentiment.dev.0')
dev_amazon_neg_open = open(dev_amazon_neg_path, 'r')
dev_amazon_neg_dataset = dev_amazon_neg_open.readlines()
amazon_neg_dataset = (train_amazon_neg_dataset + dev_amazon_neg_dataset)
neg_len = len(amazon_neg_dataset)
train_amazon_neg_open.close()
dev_amazon_neg_open.close()
train_amazon_pos_path = (data_path + '/amazon/sentiment.train.1')
train_amazon_pos_open = open(train_amazon_pos_path, 'r')
train_amazon_pos_dataset = train_amazon_pos_open.readlines()
dev_amazon_pos_path = (data_path + '/amazon/sentiment.dev.1')
dev_amazon_pos_open = open(dev_amazon_pos_path, 'r')
dev_amazon_pos_dataset = dev_amazon_pos_open.readlines()
amazon_pos_dataset = (train_amazon_pos_dataset + dev_amazon_pos_dataset)
pos_len = len(amazon_pos_dataset)
train_amazon_pos_open.close()
dev_amazon_pos_open.close()
'training parameter'
cls_initial_lr = 0.001
cls_trainer = optim.Adamax(dismodel.cls_params, lr=cls_initial_lr)
max_grad_norm = 25
batch = 1
epoch = 6
stop_point = (pos_len * epoch)
pre_epoch = 0
for start in tqdm(range(0, stop_point)):
'data start point'
neg_start = (start % neg_len)
pos_start = (start % pos_len)
'data setting'
neg_sentence = amazon_neg_dataset[neg_start].strip()
pos_sentence = amazon_pos_dataset[pos_start].strip()
neg_labels = []
neg_labels.append([1, 0])
neg_attribute = torch.from_numpy(np.asarray(neg_labels)).type(torch.FloatTensor).cuda()
pos_labels = []
pos_labels.append([0, 1])
pos_attribute = torch.from_numpy(np.asarray(pos_labels)).type(torch.FloatTensor).cuda()
sentences = [neg_sentence, pos_sentence]
attributes = [neg_attribute, pos_attribute]
'data input'
for i in range(2):
sentence = sentences[i]
attribute = attributes[i]
token_idx = torch.tensor(gpt_tokenizer.encode(sentence)).unsqueeze(0).cuda()
dis_out = dismodel.discriminator(token_idx)
'calculation loss & traning'
cls_loss = dismodel.cls_loss(attribute, dis_out)
summary.add_scalar('discriminator loss', cls_loss.item(), start)
cls_trainer.zero_grad()
cls_loss.backward()
grad_norm = torch.nn.utils.clip_grad_norm_(dismodel.cls_params, max_grad_norm)
cls_trainer.step()
'savining point'
if (((start + 1) % pos_len) == 0):
random.shuffle(amazon_neg_dataset)
random.shuffle(amazon_pos_dataset)
save_model(((start + 1) // pos_len))
save_model('final')
|
def save_model(iter):
if (not os.path.exists('models/')):
os.makedirs('models/')
torch.save(dismodel.state_dict(), 'models/cls_model_{}'.format(iter))
|
def main():
f = open('amazon_vocab.json')
token2num = json.load(f)
num2token = {}
for (key, value) in token2num.items():
num2token[value] = key
f.close()
data_path = '/DATA/joosung/sentiment_data/Sentiment-and-Style-Transfer-master/data'
train_amazon_neg_path = (data_path + '/amazon/sentiment.train.0')
train_amazon_neg_open = open(train_amazon_neg_path, 'r')
train_amazon_neg_dataset = train_amazon_neg_open.readlines()
dev_amazon_neg_path = (data_path + '/amazon/sentiment.dev.0')
dev_amazon_neg_open = open(dev_amazon_neg_path, 'r')
dev_amazon_neg_dataset = dev_amazon_neg_open.readlines()
amazon_neg_dataset = (train_amazon_neg_dataset + dev_amazon_neg_dataset)
neg_len = len(amazon_neg_dataset)
train_amazon_neg_open.close()
dev_amazon_neg_open.close()
train_amazon_pos_path = (data_path + '/amazon/sentiment.train.1')
train_amazon_pos_open = open(train_amazon_pos_path, 'r')
train_amazon_pos_dataset = train_amazon_pos_open.readlines()
dev_amazon_pos_path = (data_path + '/amazon/sentiment.dev.1')
dev_amazon_pos_open = open(dev_amazon_pos_path, 'r')
dev_amazon_pos_dataset = dev_amazon_pos_open.readlines()
amazon_pos_dataset = (train_amazon_pos_dataset + dev_amazon_pos_dataset)
pos_len = len(amazon_pos_dataset)
train_amazon_pos_open.close()
dev_amazon_pos_open.close()
'training parameter'
aed_initial_lr = 1e-05
gen_initial_lr = 0.001
aed_trainer = optim.Adamax(genmodel.aed_params, lr=aed_initial_lr)
gen_trainer = optim.Adamax(genmodel.aed_params, lr=gen_initial_lr)
max_grad_norm = 10
batch = 1
epoch = 6
epoch_len = max(pos_len, neg_len)
stop_point = (epoch_len * epoch)
pre_epoch = 0
for start in tqdm(range(0, stop_point)):
now_epoch = ((start + 1) // pos_len)
'data start point'
neg_start = (start % neg_len)
pos_start = (start % pos_len)
'data setting'
neg_sentence = amazon_neg_dataset[neg_start].strip()
pos_sentence = amazon_pos_dataset[pos_start].strip()
neg_labels = []
neg_labels.append([1, 0])
neg_attribute = torch.from_numpy(np.asarray(neg_labels)).type(torch.FloatTensor).cuda()
pos_labels = []
pos_labels.append([0, 1])
pos_attribute = torch.from_numpy(np.asarray(pos_labels)).type(torch.FloatTensor).cuda()
sentences = [neg_sentence, pos_sentence]
attributes = [neg_attribute, pos_attribute]
sentiments = [0, 1]
'data input'
for i in range(2):
sentence = sentences[i]
attribute = attributes[i]
fake_attribute = attributes[abs((1 - i))]
token_idx = torch.tensor(gpt_tokenizer.encode(sentence)).unsqueeze(0).cuda()
max_len = int((token_idx.shape[1] / 2))
dis_out = dismodel.discriminator(token_idx)
sentiment = dis_out.argmax(1).cpu().item()
del_idx = token_idx
for k in range(max_len):
del_idx = dismodel.att_prob(del_idx, sentiment)
dis_out = dismodel.discriminator(del_idx)
sent_porb = F.softmax(dis_out, 1).squeeze(0)[sentiment].cpu().detach().numpy().item()
if (sent_porb < 0.7):
break
'auto-encoder loss & traning'
enc_out = genmodel.encoder(del_idx)
(dec_out, vocab_out) = genmodel.decoder(enc_out, token_idx, attribute)
recon_loss = genmodel.recon_loss(token_idx, vocab_out)
summary.add_scalar('reconstruction loss', recon_loss.item(), start)
aed_trainer.zero_grad()
recon_loss.backward(retain_graph=True)
grad_norm = torch.nn.utils.clip_grad_norm_(genmodel.aed_params, max_grad_norm)
aed_trainer.step()
'decoder classification loss & training'
gen_cls_out = dismodel.gen_discriminator(vocab_out)
gen_cls_loss = genmodel.cls_loss(attribute, gen_cls_out)
summary.add_scalar('generated sentence loss', gen_cls_loss.item(), start)
gen_trainer.zero_grad()
gen_cls_loss.backward()
grad_norm = torch.nn.utils.clip_grad_norm_(genmodel.aed_params, max_grad_norm)
gen_trainer.step()
'savining point'
if (((start + 1) % epoch_len) == 0):
random.shuffle(amazon_neg_dataset)
random.shuffle(amazon_pos_dataset)
save_model(((start + 1) // pos_len))
save_model('final')
|
def save_model(iter):
if (not os.path.exists('models/')):
os.makedirs('models/')
torch.save(genmodel.state_dict(), 'models/gen_model_{}'.format(iter))
|
def main():
f = open('gpt_yelp_vocab.json')
token2num = json.load(f)
num2token = {}
for (key, value) in token2num.items():
num2token[value] = key
f.close()
data_path = '/DATA/joosung/sentiment_data/Sentiment-and-Style-Transfer-master/data'
train_yelp_neg_path = (data_path + '/yelp/sentiment.train.0')
train_yelp_neg_open = open(train_yelp_neg_path, 'r')
train_yelp_neg_dataset = train_yelp_neg_open.readlines()
yelp_neg_dataset = train_yelp_neg_dataset
neg_len = len(yelp_neg_dataset)
train_yelp_neg_open.close()
train_yelp_pos_path = (data_path + '/yelp/sentiment.train.1')
train_yelp_pos_open = open(train_yelp_pos_path, 'r')
train_yelp_pos_dataset = train_yelp_pos_open.readlines()
yelp_pos_dataset = train_yelp_pos_dataset
pos_len = len(yelp_pos_dataset)
train_yelp_pos_open.close()
'training parameter'
aed_initial_lr = 1e-05
gen_initial_lr = 0.001
aed_trainer = optim.Adamax(genmodel.aed_params, lr=aed_initial_lr)
gen_trainer = optim.Adamax(genmodel.aed_params, lr=gen_initial_lr)
max_grad_norm = 20
batch = 1
epoch = 6
stop_point = (pos_len * epoch)
pre_epoch = 0
for start in tqdm(range(0, stop_point)):
now_epoch = ((start + 1) // pos_len)
'data start point'
neg_start = (start % neg_len)
pos_start = (start % pos_len)
'data setting'
neg_sentence = yelp_neg_dataset[neg_start].strip()
pos_sentence = yelp_pos_dataset[pos_start].strip()
neg_labels = []
neg_labels.append([1, 0])
neg_attribute = torch.from_numpy(np.asarray(neg_labels)).type(torch.FloatTensor).cuda()
pos_labels = []
pos_labels.append([0, 1])
pos_attribute = torch.from_numpy(np.asarray(pos_labels)).type(torch.FloatTensor).cuda()
sentences = [neg_sentence, pos_sentence]
attributes = [neg_attribute, pos_attribute]
sentiments = [0, 1]
'data input'
for i in range(2):
sentence = sentences[i]
attribute = attributes[i]
fake_attribute = attributes[abs((1 - i))]
token_idx = torch.tensor(gpt_tokenizer.encode(sentence)).unsqueeze(0).cuda()
max_len = int((token_idx.shape[1] / 2))
dis_out = dismodel.discriminator(token_idx)
sentiment = dis_out.argmax(1).cpu().item()
del_idx = token_idx
for k in range(max_len):
del_idx = dismodel.att_prob(del_idx, sentiment)
dis_out = dismodel.discriminator(del_idx)
sent_porb = F.softmax(dis_out, 1).squeeze(0)[sentiment].cpu().detach().numpy().item()
if (sent_porb < 0.7):
break
'auto-encoder loss & traning'
enc_out = genmodel.encoder(del_idx)
(dec_out, vocab_out) = genmodel.decoder(enc_out, token_idx, attribute)
recon_loss = genmodel.recon_loss(token_idx, vocab_out)
summary.add_scalar('reconstruction loss', recon_loss.item(), start)
aed_trainer.zero_grad()
recon_loss.backward(retain_graph=True)
grad_norm = torch.nn.utils.clip_grad_norm_(genmodel.aed_params, max_grad_norm)
aed_trainer.step()
'decoder classification loss & training'
gen_cls_out = dismodel.gen_discriminator(vocab_out)
gen_cls_loss = genmodel.cls_loss(attribute, gen_cls_out)
summary.add_scalar('generated sentence loss', gen_cls_loss.item(), start)
gen_trainer.zero_grad()
gen_cls_loss.backward()
grad_norm = torch.nn.utils.clip_grad_norm_(genmodel.aed_params, max_grad_norm)
gen_trainer.step()
'savining point'
if (((start + 1) % pos_len) == 0):
random.shuffle(yelp_neg_dataset)
random.shuffle(yelp_pos_dataset)
save_model(((start + 1) // pos_len))
save_model('final')
|
def save_model(iter):
if (not os.path.exists('models/')):
os.makedirs('models/')
torch.save(genmodel.state_dict(), 'models/gen_model_{}'.format(iter))
|
def main():
f = open('../gpt_yelp_vocab.json')
token2num = json.load(f)
num2token = {}
for (key, value) in token2num.items():
num2token[value] = key
f.close()
data_path = '/DATA/joosung/sentiment_data/Sentiment-and-Style-Transfer-master/data'
yelp_neg_path = (data_path + '/yelp/sentiment.train.0')
yelp_neg_open = open(yelp_neg_path, 'r')
yelp_neg_dataset = yelp_neg_open.readlines()
neg_len = len(yelp_neg_dataset)
yelp_neg_open.close()
yelp_pos_path = (data_path + '/yelp/sentiment.train.1')
yelp_pos_open = open(yelp_pos_path, 'r')
yelp_pos_dataset = yelp_pos_open.readlines()
pos_len = len(yelp_pos_dataset)
yelp_pos_open.close()
'training parameter'
cls_initial_lr = 0.001
cls_trainer = optim.Adamax(dismodel.cls_params, lr=cls_initial_lr)
max_grad_norm = 25
batch = 1
epoch = 5
stop_point = (pos_len * epoch)
pre_epoch = 0
for start in tqdm(range(0, stop_point)):
now_epoch = ((start + 1) // pos_len)
if (now_epoch == 4):
cls_initial_lr = (cls_initial_lr / 2)
cls_trainer = optim.Adamax(dismodel.cls_params, lr=cls_initial_lr)
'data start point'
neg_start = (start % neg_len)
pos_start = (start % pos_len)
'data setting'
neg_sentence = yelp_neg_dataset[neg_start].strip()
pos_sentence = yelp_pos_dataset[pos_start].strip()
neg_labels = []
neg_labels.append([1, 0])
neg_attribute = torch.from_numpy(np.asarray(neg_labels)).type(torch.FloatTensor).cuda()
pos_labels = []
pos_labels.append([0, 1])
pos_attribute = torch.from_numpy(np.asarray(pos_labels)).type(torch.FloatTensor).cuda()
sentences = [neg_sentence, pos_sentence]
attributes = [neg_attribute, pos_attribute]
'data input'
for i in range(2):
sentence = sentences[i]
attribute = attributes[i]
token_idx = torch.tensor(gpt_tokenizer.encode(sentence)).unsqueeze(0).cuda()
dis_out = dismodel.discriminator(token_idx)
'calculation loss & traning'
cls_loss = dismodel.cls_loss(attribute, dis_out)
summary.add_scalar('discriminator loss', cls_loss.item(), start)
cls_trainer.zero_grad()
cls_loss.backward()
grad_norm = torch.nn.utils.clip_grad_norm_(dismodel.cls_params, max_grad_norm)
cls_trainer.step()
'savining point'
if (((start + 1) % pos_len) == 0):
random.shuffle(yelp_neg_dataset)
random.shuffle(yelp_pos_dataset)
save_model(((start + 1) // pos_len))
save_model('final')
|
def save_model(iter):
if (not os.path.exists('models/')):
os.makedirs('models/')
torch.save(dismodel.state_dict(), 'models/cls_model_{}'.format(iter))
|
def main():
f = open('../gpt_yelp_vocab.json')
token2num = json.load(f)
num2token = {}
for (key, value) in token2num.items():
num2token[value] = key
f.close()
data_path = '/DATA/joosung/sentiment_data/Sentiment-and-Style-Transfer-master/data'
yelp_neg_path = (data_path + '/yelp/sentiment.train.0')
yelp_neg_open = open(yelp_neg_path, 'r')
yelp_neg_dataset = yelp_neg_open.readlines()
neg_len = len(yelp_neg_dataset)
yelp_neg_open.close()
yelp_pos_path = (data_path + '/yelp/sentiment.train.1')
yelp_pos_open = open(yelp_pos_path, 'r')
yelp_pos_dataset = yelp_pos_open.readlines()
pos_len = len(yelp_pos_dataset)
yelp_pos_open.close()
'training parameter'
cls_initial_lr = 0.001
cls_trainer = optim.Adamax(dismodel.cls_params, lr=cls_initial_lr)
max_grad_norm = 25
batch = 1
epoch = 5
stop_point = (pos_len * epoch)
pre_epoch = 0
for start in tqdm(range(0, stop_point)):
now_epoch = ((start + 1) // pos_len)
if (now_epoch == 4):
cls_initial_lr = (cls_initial_lr / 2)
cls_trainer = optim.Adamax(dismodel.cls_params, lr=cls_initial_lr)
'data start point'
neg_start = (start % neg_len)
pos_start = (start % pos_len)
'data setting'
neg_sentence = yelp_neg_dataset[neg_start].strip()
pos_sentence = yelp_pos_dataset[pos_start].strip()
neg_labels = []
neg_labels.append([1, 0])
neg_attribute = torch.from_numpy(np.asarray(neg_labels)).type(torch.FloatTensor).cuda()
pos_labels = []
pos_labels.append([0, 1])
pos_attribute = torch.from_numpy(np.asarray(pos_labels)).type(torch.FloatTensor).cuda()
sentences = [neg_sentence, pos_sentence]
attributes = [neg_attribute, pos_attribute]
'data input'
for i in range(2):
sentence = sentences[i]
attribute = attributes[i]
token_idx = torch.tensor(gpt_tokenizer.encode(sentence)).unsqueeze(0).cuda()
dis_out = dismodel.discriminator(token_idx)
'calculation loss & traning'
cls_loss = dismodel.cls_loss(attribute, dis_out)
summary.add_scalar('discriminator loss', cls_loss.item(), start)
cls_trainer.zero_grad()
cls_loss.backward()
grad_norm = torch.nn.utils.clip_grad_norm_(dismodel.cls_params, max_grad_norm)
cls_trainer.step()
'savining point'
if (((start + 1) % pos_len) == 0):
random.shuffle(yelp_neg_dataset)
random.shuffle(yelp_pos_dataset)
save_model(((start + 1) // pos_len))
save_model('final')
|
def save_model(iter):
if (not os.path.exists('models/')):
os.makedirs('models/')
torch.save(dismodel.state_dict(), 'models/cls_model_{}'.format(iter))
|
def main():
f = open('gpt_yelp_vocab.json')
token2num = json.load(f)
num2token = {}
for (key, value) in token2num.items():
num2token[value] = key
f.close()
data_path = '/DATA/joosung/sentiment_data/Sentiment-and-Style-Transfer-master/data'
train_yelp_neg_path = (data_path + '/yelp/sentiment.train.0')
train_yelp_neg_open = open(train_yelp_neg_path, 'r')
train_yelp_neg_dataset = train_yelp_neg_open.readlines()
yelp_neg_dataset = train_yelp_neg_dataset
neg_len = len(yelp_neg_dataset)
train_yelp_neg_open.close()
train_yelp_pos_path = (data_path + '/yelp/sentiment.train.1')
train_yelp_pos_open = open(train_yelp_pos_path, 'r')
train_yelp_pos_dataset = train_yelp_pos_open.readlines()
yelp_pos_dataset = train_yelp_pos_dataset
pos_len = len(yelp_pos_dataset)
train_yelp_pos_open.close()
'training parameter'
aed_initial_lr = 1e-05
gen_initial_lr = 0.001
aed_trainer = optim.Adamax(genmodel.aed_params, lr=aed_initial_lr)
gen_trainer = optim.Adamax(genmodel.aed_params, lr=gen_initial_lr)
max_grad_norm = 20
batch = 1
epoch = 6
stop_point = (pos_len * epoch)
pre_epoch = 0
for start in tqdm(range(0, stop_point)):
now_epoch = ((start + 1) // pos_len)
'data start point'
neg_start = (start % neg_len)
pos_start = (start % pos_len)
'data setting'
neg_sentence = yelp_neg_dataset[neg_start].strip()
pos_sentence = yelp_pos_dataset[pos_start].strip()
neg_labels = []
neg_labels.append([1, 0])
neg_attribute = torch.from_numpy(np.asarray(neg_labels)).type(torch.FloatTensor).cuda()
pos_labels = []
pos_labels.append([0, 1])
pos_attribute = torch.from_numpy(np.asarray(pos_labels)).type(torch.FloatTensor).cuda()
sentences = [neg_sentence, pos_sentence]
attributes = [neg_attribute, pos_attribute]
sentiments = [0, 1]
'data input'
for i in range(2):
sentence = sentences[i]
attribute = attributes[i]
fake_attribute = attributes[abs((1 - i))]
token_idx = torch.tensor(gpt_tokenizer.encode(sentence)).unsqueeze(0).cuda()
max_len = int((token_idx.shape[1] / 2))
dis_out = dismodel.discriminator(token_idx)
sentiment = dis_out.argmax(1).cpu().item()
del_idx = token_idx
for k in range(max_len):
del_idx = dismodel.att_prob(del_idx, sentiment)
dis_out = dismodel.discriminator(del_idx)
sent_porb = F.softmax(dis_out, 1).squeeze(0)[sentiment].cpu().detach().numpy().item()
if (sent_porb < 0.7):
break
'auto-encoder loss & traning'
enc_out = genmodel.encoder(del_idx)
(dec_out, vocab_out) = genmodel.decoder(enc_out, token_idx, attribute)
recon_loss = genmodel.recon_loss(token_idx, vocab_out)
summary.add_scalar('reconstruction loss', recon_loss.item(), start)
aed_trainer.zero_grad()
recon_loss.backward(retain_graph=True)
grad_norm = torch.nn.utils.clip_grad_norm_(genmodel.aed_params, max_grad_norm)
aed_trainer.step()
'decoder classification loss & training'
gen_cls_out = dismodel.gen_discriminator(vocab_out)
gen_cls_loss = genmodel.cls_loss(attribute, gen_cls_out)
summary.add_scalar('generated sentence loss', gen_cls_loss.item(), start)
gen_trainer.zero_grad()
gen_cls_loss.backward()
grad_norm = torch.nn.utils.clip_grad_norm_(genmodel.aed_params, max_grad_norm)
gen_trainer.step()
'savining point'
if (((start + 1) % pos_len) == 0):
random.shuffle(yelp_neg_dataset)
random.shuffle(yelp_pos_dataset)
save_model(((start + 1) // pos_len))
save_model('final')
|
def save_model(iter):
if (not os.path.exists('models/')):
os.makedirs('models/')
torch.save(genmodel.state_dict(), 'models/gen_model_{}'.format(iter))
|
class MultiHeadedDotAttention(nn.Module):
def __init__(self, h, d_model, dropout=0.1, scale=1, project_k_v=1, use_output_layer=1, do_aoa=0, norm_q=0, dropout_aoa=0.3):
super(MultiHeadedDotAttention, self).__init__()
assert (((d_model * scale) % h) == 0)
self.d_k = ((d_model * scale) // h)
self.h = h
self.project_k_v = project_k_v
if norm_q:
self.norm = LayerNorm(d_model)
else:
self.norm = (lambda x: x)
self.linears = clones(nn.Linear(d_model, (d_model * scale)), (1 + (2 * project_k_v)))
self.output_layer = nn.Linear((d_model * scale), d_model)
self.use_aoa = do_aoa
if self.use_aoa:
self.aoa_layer = nn.Sequential(nn.Linear(((1 + scale) * d_model), (2 * d_model)), nn.GLU())
if (dropout_aoa > 0):
self.dropout_aoa = nn.Dropout(p=dropout_aoa)
else:
self.dropout_aoa = (lambda x: x)
if (self.use_aoa or (not use_output_layer)):
del self.output_layer
self.output_layer = (lambda x: x)
self.attn = None
self.dropout = nn.Dropout(p=dropout)
def forward(self, query, value, key, mask=None):
if (mask is not None):
if (len(mask.size()) == 2):
mask = mask.unsqueeze((- 2))
mask = mask.unsqueeze(1)
single_query = 0
if (len(query.size()) == 2):
single_query = 1
query = query.unsqueeze(1)
nbatches = query.size(0)
query = self.norm(query)
if (self.project_k_v == 0):
query_ = self.linears[0](query).view(nbatches, (- 1), self.h, self.d_k).transpose(1, 2)
key_ = key.view(nbatches, (- 1), self.h, self.d_k).transpose(1, 2)
value_ = value.view(nbatches, (- 1), self.h, self.d_k).transpose(1, 2)
else:
(query_, key_, value_) = [l(x).view(nbatches, (- 1), self.h, self.d_k).transpose(1, 2) for (l, x) in zip(self.linears, (query, key, value))]
(x, self.attn) = attention(query_, key_, value_, mask=mask, dropout=self.dropout)
x = x.transpose(1, 2).contiguous().view(nbatches, (- 1), (self.h * self.d_k))
if self.use_aoa:
x = self.aoa_layer(self.dropout_aoa(torch.cat([x, query], (- 1))))
x = self.output_layer(x)
if single_query:
query = query.squeeze(1)
x = x.squeeze(1)
return x
|
class AoA_Refiner_Layer(nn.Module):
def __init__(self, size, self_attn, feed_forward, dropout):
super(AoA_Refiner_Layer, self).__init__()
self.self_attn = self_attn
self.feed_forward = feed_forward
self.use_ff = 0
if (self.feed_forward is not None):
self.use_ff = 1
self.sublayer = clones(SublayerConnection(size, dropout), (1 + self.use_ff))
self.size = size
def forward(self, x, mask):
x = self.sublayer[0](x, (lambda x: self.self_attn(x, x, x, mask)))
return (self.sublayer[(- 1)](x, self.feed_forward) if self.use_ff else x)
|
class AoA_Refiner_Core(nn.Module):
def __init__(self, opt):
super(AoA_Refiner_Core, self).__init__()
attn = MultiHeadedDotAttention(opt.num_heads, opt.rnn_size, project_k_v=1, scale=opt.multi_head_scale, do_aoa=opt.refine_aoa, norm_q=0, dropout_aoa=getattr(opt, 'dropout_aoa', 0.3))
layer = AoA_Refiner_Layer(opt.rnn_size, attn, (PositionwiseFeedForward(opt.rnn_size, 2048, 0.1) if opt.use_ff else None), 0.1)
self.layers = clones(layer, 6)
self.norm = LayerNorm(layer.size)
def forward(self, x, mask):
for layer in self.layers:
x = layer(x, mask)
return self.norm(x)
|
class AoA_Decoder_Core(nn.Module):
def __init__(self, opt):
super(AoA_Decoder_Core, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
self.d_model = opt.rnn_size
self.use_multi_head = opt.use_multi_head
self.multi_head_scale = opt.multi_head_scale
self.use_ctx_drop = getattr(opt, 'ctx_drop', 0)
self.out_res = getattr(opt, 'out_res', 0)
self.decoder_type = getattr(opt, 'decoder_type', 'AoA')
self.att_lstm = nn.LSTMCell((opt.input_encoding_size + opt.rnn_size), opt.rnn_size)
self.out_drop = nn.Dropout(self.drop_prob_lm)
if (self.decoder_type == 'AoA'):
self.att2ctx = nn.Sequential(nn.Linear(((self.d_model * opt.multi_head_scale) + opt.rnn_size), (2 * opt.rnn_size)), nn.GLU())
elif (self.decoder_type == 'LSTM'):
self.att2ctx = nn.LSTMCell(((self.d_model * opt.multi_head_scale) + opt.rnn_size), opt.rnn_size)
else:
self.att2ctx = nn.Sequential(nn.Linear(((self.d_model * opt.multi_head_scale) + opt.rnn_size), opt.rnn_size), nn.ReLU())
if (opt.use_multi_head == 2):
self.attention = MultiHeadedDotAttention(opt.num_heads, opt.rnn_size, project_k_v=0, scale=opt.multi_head_scale, use_output_layer=0, do_aoa=0, norm_q=1)
else:
self.attention = Attention(opt)
if self.use_ctx_drop:
self.ctx_drop = nn.Dropout(self.drop_prob_lm)
else:
self.ctx_drop = (lambda x: x)
def forward(self, xt, mean_feats, att_feats, p_att_feats, state, att_masks=None):
(h_att, c_att) = self.att_lstm(torch.cat([xt, (mean_feats + self.ctx_drop(state[0][1]))], 1), (state[0][0], state[1][0]))
if (self.use_multi_head == 2):
att = self.attention(h_att, p_att_feats.narrow(2, 0, (self.multi_head_scale * self.d_model)), p_att_feats.narrow(2, (self.multi_head_scale * self.d_model), (self.multi_head_scale * self.d_model)), att_masks)
else:
att = self.attention(h_att, att_feats, p_att_feats, att_masks)
ctx_input = torch.cat([att, h_att], 1)
if (self.decoder_type == 'LSTM'):
(output, c_logic) = self.att2ctx(ctx_input, (state[0][1], state[1][1]))
state = (torch.stack((h_att, output)), torch.stack((c_att, c_logic)))
else:
output = self.att2ctx(ctx_input)
state = (torch.stack((h_att, output)), torch.stack((c_att, state[1][1])))
if self.out_res:
output = (output + h_att)
output = self.out_drop(output)
return (output, state)
|
class AoAModel(AttModel):
def __init__(self, opt):
super(AoAModel, self).__init__(opt)
self.num_layers = 2
self.use_mean_feats = getattr(opt, 'mean_feats', 1)
if (opt.use_multi_head == 2):
del self.ctx2att
self.ctx2att = nn.Linear(opt.rnn_size, ((2 * opt.multi_head_scale) * opt.rnn_size))
if self.use_mean_feats:
del self.fc_embed
if opt.refine:
self.refiner = AoA_Refiner_Core(opt)
else:
self.refiner = (lambda x, y: x)
self.core = AoA_Decoder_Core(opt)
def _prepare_feature(self, fc_feats, att_feats, att_masks):
(att_feats, att_masks) = self.clip_att(att_feats, att_masks)
att_feats = pack_wrapper(self.att_embed, att_feats, att_masks)
att_feats = self.refiner(att_feats, att_masks)
if self.use_mean_feats:
if (att_masks is None):
mean_feats = torch.mean(att_feats, dim=1)
else:
mean_feats = (torch.sum((att_feats * att_masks.unsqueeze((- 1))), 1) / torch.sum(att_masks.unsqueeze((- 1)), 1))
else:
mean_feats = self.fc_embed(fc_feats)
p_att_feats = self.ctx2att(att_feats)
return (mean_feats, att_feats, p_att_feats, att_masks)
|
def setup(opt):
if (opt.caption_model in ['fc', 'show_tell']):
print(('Warning: %s model is mostly deprecated; many new features are not supported.' % opt.caption_model))
if (opt.caption_model == 'fc'):
print('Use newfc instead of fc')
if (opt.caption_model == 'fc'):
model = FCModel(opt)
elif (opt.caption_model == 'language_model'):
model = LMModel(opt)
elif (opt.caption_model == 'newfc'):
model = NewFCModel(opt)
elif (opt.caption_model == 'show_tell'):
model = ShowTellModel(opt)
elif (opt.caption_model == 'att2in'):
model = Att2inModel(opt)
elif (opt.caption_model == 'att2in2'):
model = Att2in2Model(opt)
elif (opt.caption_model == 'att2all2'):
print('Warning: this is not a correct implementation of the att2all model in the original paper.')
model = Att2all2Model(opt)
elif (opt.caption_model == 'adaatt'):
model = AdaAttModel(opt)
elif (opt.caption_model == 'adaattmo'):
model = AdaAttMOModel(opt)
elif (opt.caption_model in ['topdown', 'updown']):
model = UpDownModel(opt)
elif (opt.caption_model == 'stackatt'):
model = StackAttModel(opt)
elif (opt.caption_model == 'denseatt'):
model = DenseAttModel(opt)
elif (opt.caption_model == 'transformer'):
if getattr(opt, 'cached_transformer', False):
model = cachedTransformer(opt)
else:
model = TransformerModel(opt)
elif (opt.caption_model == 'aoa'):
model = AoAModel(opt)
elif (opt.caption_model == 'bert'):
model = BertCapModel(opt)
elif (opt.caption_model == 'm2transformer'):
model = M2TransformerModel(opt)
else:
raise Exception('Caption model not supported: {}'.format(opt.caption_model))
return model
|
def repeat_tensors(n, x):
'\n For a tensor of size Bx..., we repeat it n times, and make it Bnx...\n For collections, do nested repeat\n '
if torch.is_tensor(x):
x = x.unsqueeze(1)
x = x.expand((- 1), n, *([(- 1)] * len(x.shape[2:])))
x = x.reshape((x.shape[0] * n), *x.shape[2:])
elif ((type(x) is list) or (type(x) is tuple)):
x = [repeat_tensors(n, _) for _ in x]
return x
|
def split_tensors(n, x):
if torch.is_tensor(x):
assert ((x.shape[0] % n) == 0)
x = x.reshape((x.shape[0] // n), n, *x.shape[1:]).unbind(1)
elif ((type(x) is list) or (type(x) is tuple)):
x = [split_tensors(n, _) for _ in x]
elif (x is None):
x = ([None] * n)
return x
|
class CfgNode(_CfgNode):
'\n Our own extended version of :class:`yacs.config.CfgNode`.\n It contains the following extra features:\n\n 1. The :meth:`merge_from_file` method supports the "_BASE_" key,\n which allows the new CfgNode to inherit all the attributes from the\n base configuration file.\n 2. Keys that start with "COMPUTED_" are treated as insertion-only\n "computed" attributes. They can be inserted regardless of whether\n the CfgNode is frozen or not.\n 3. With "allow_unsafe=True", it supports pyyaml tags that evaluate\n expressions in config. See examples in\n https://pyyaml.org/wiki/PyYAMLDocumentation#yaml-tags-and-python-types\n Note that this may lead to arbitrary code execution: you must not\n load a config file from untrusted sources before manually inspecting\n the content of the file.\n '
@staticmethod
def load_yaml_with_base(filename, allow_unsafe=False):
'\n Just like `yaml.load(open(filename))`, but inherit attributes from its\n `_BASE_`.\n\n Args:\n filename (str): the file name of the current config. Will be used to\n find the base config file.\n allow_unsafe (bool): whether to allow loading the config file with\n `yaml.unsafe_load`.\n\n Returns:\n (dict): the loaded yaml\n '
with PathManager.open(filename, 'r') as f:
try:
cfg = yaml.safe_load(f)
except yaml.constructor.ConstructorError:
if (not allow_unsafe):
raise
logger = logging.getLogger(__name__)
logger.warning('Loading config {} with yaml.unsafe_load. Your machine may be at risk if the file contains malicious content.'.format(filename))
f.close()
with open(filename, 'r') as f:
cfg = yaml.unsafe_load(f)
def merge_a_into_b(a, b):
for (k, v) in a.items():
if (isinstance(v, dict) and (k in b)):
assert isinstance(b[k], dict), "Cannot inherit key '{}' from base!".format(k)
merge_a_into_b(v, b[k])
else:
b[k] = v
if (BASE_KEY in cfg):
base_cfg_file = cfg[BASE_KEY]
if base_cfg_file.startswith('~'):
base_cfg_file = os.path.expanduser(base_cfg_file)
if (not any(map(base_cfg_file.startswith, ['/', 'https://', 'http://']))):
base_cfg_file = os.path.join(os.path.dirname(filename), base_cfg_file)
base_cfg = CfgNode.load_yaml_with_base(base_cfg_file, allow_unsafe=allow_unsafe)
del cfg[BASE_KEY]
merge_a_into_b(cfg, base_cfg)
return base_cfg
return cfg
def merge_from_file(self, cfg_filename, allow_unsafe=False):
'\n Merge configs from a given yaml file.\n\n Args:\n cfg_filename: the file name of the yaml config.\n allow_unsafe: whether to allow loading the config file with\n `yaml.unsafe_load`.\n '
loaded_cfg = CfgNode.load_yaml_with_base(cfg_filename, allow_unsafe=allow_unsafe)
loaded_cfg = type(self)(loaded_cfg)
self.merge_from_other_cfg(loaded_cfg)
def merge_from_other_cfg(self, cfg_other):
'\n Args:\n cfg_other (CfgNode): configs to merge from.\n '
assert (BASE_KEY not in cfg_other), "The reserved key '{}' can only be used in files!".format(BASE_KEY)
return super().merge_from_other_cfg(cfg_other)
def merge_from_list(self, cfg_list):
'\n Args:\n cfg_list (list): list of configs to merge from.\n '
keys = set(cfg_list[0::2])
assert (BASE_KEY not in keys), "The reserved key '{}' can only be used in files!".format(BASE_KEY)
return super().merge_from_list(cfg_list)
def __setattr__(self, name, val):
if name.startswith('COMPUTED_'):
if (name in self):
old_val = self[name]
if (old_val == val):
return
raise KeyError("Computed attributed '{}' already exists with a different value! old={}, new={}.".format(name, old_val, val))
self[name] = val
else:
super().__setattr__(name, val)
|
def find_ngrams(input_list, n):
return zip(*[input_list[i:] for i in range(n)])
|
def compute_div_n(caps, n=1):
aggr_div = []
for k in caps:
all_ngrams = set()
lenT = 0.0
for c in caps[k]:
tkns = c.split()
lenT += len(tkns)
ng = find_ngrams(tkns, n)
all_ngrams.update(ng)
aggr_div.append((float(len(all_ngrams)) / (1e-06 + float(lenT))))
return (np.array(aggr_div).mean(), np.array(aggr_div))
|
def compute_global_div_n(caps, n=1):
aggr_div = []
all_ngrams = set()
lenT = 0.0
for k in caps:
for c in caps[k]:
tkns = c.split()
lenT += len(tkns)
ng = find_ngrams(tkns, n)
all_ngrams.update(ng)
if (n == 1):
aggr_div.append(float(len(all_ngrams)))
else:
aggr_div.append((float(len(all_ngrams)) / (1e-06 + float(lenT))))
return (aggr_div[0], np.repeat(np.array(aggr_div), len(caps)))
|
def pickle_load(f):
' Load a pickle.\n Parameters\n ----------\n f: file-like object\n '
if six.PY3:
return cPickle.load(f, encoding='latin-1')
else:
return cPickle.load(f)
|
def pickle_dump(obj, f):
' Dump a pickle.\n Parameters\n ----------\n obj: pickled object\n f: file-like object\n '
if six.PY3:
return cPickle.dump(obj, f, protocol=2)
else:
return cPickle.dump(obj, f)
|
def serialize_to_tensor(data):
device = torch.device('cpu')
buffer = cPickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to(device=device)
return tensor
|
def deserialize(tensor):
buffer = tensor.cpu().numpy().tobytes()
return cPickle.loads(buffer)
|
def decode_sequence(ix_to_word, seq):
(N, D) = seq.size()
out = []
for i in range(N):
txt = ''
for j in range(D):
ix = seq[(i, j)]
if (ix > 0):
if (j >= 1):
txt = (txt + ' ')
txt = (txt + ix_to_word[str(ix.item())])
else:
break
if int(os.getenv('REMOVE_BAD_ENDINGS', '0')):
flag = 0
words = txt.split(' ')
for j in range(len(words)):
if (words[((- j) - 1)] not in bad_endings):
flag = (- j)
break
txt = ' '.join(words[0:(len(words) + flag)])
out.append(txt.replace('@@ ', ''))
return out
|
def save_checkpoint(opt, model, infos, optimizer, histories=None, append=''):
if (len(append) > 0):
append = ('-' + append)
if (not os.path.isdir(opt.checkpoint_path)):
os.makedirs(opt.checkpoint_path)
checkpoint_path = os.path.join(opt.checkpoint_path, ('model%s.pth' % append))
torch.save(model.state_dict(), checkpoint_path)
print('model saved to {}'.format(checkpoint_path))
optimizer_path = os.path.join(opt.checkpoint_path, ('optimizer%s.pth' % append))
torch.save(optimizer.state_dict(), optimizer_path)
with open(os.path.join(opt.checkpoint_path, (('infos_' + opt.id) + ('%s.pkl' % append))), 'wb') as f:
pickle_dump(infos, f)
if histories:
with open(os.path.join(opt.checkpoint_path, (('histories_' + opt.id) + ('%s.pkl' % append))), 'wb') as f:
pickle_dump(histories, f)
|
def set_lr(optimizer, lr):
for group in optimizer.param_groups:
group['lr'] = lr
|
def get_lr(optimizer):
for group in optimizer.param_groups:
return group['lr']
|
def build_optimizer(params, opt):
if (opt.optim == 'rmsprop'):
return optim.RMSprop(params, opt.learning_rate, opt.optim_alpha, opt.optim_epsilon, weight_decay=opt.weight_decay)
elif (opt.optim == 'adagrad'):
return optim.Adagrad(params, opt.learning_rate, weight_decay=opt.weight_decay)
elif (opt.optim == 'sgd'):
return optim.SGD(params, opt.learning_rate, weight_decay=opt.weight_decay)
elif (opt.optim == 'sgdm'):
return optim.SGD(params, opt.learning_rate, opt.optim_alpha, weight_decay=opt.weight_decay)
elif (opt.optim == 'sgdmom'):
return optim.SGD(params, opt.learning_rate, opt.optim_alpha, weight_decay=opt.weight_decay, nesterov=True)
elif (opt.optim == 'adam'):
return optim.Adam(params, opt.learning_rate, (opt.optim_alpha, opt.optim_beta), opt.optim_epsilon, weight_decay=opt.weight_decay)
elif (opt.optim == 'adamw'):
return optim.AdamW(params, opt.learning_rate, (opt.optim_alpha, opt.optim_beta), opt.optim_epsilon, weight_decay=opt.weight_decay)
else:
raise Exception('bad option opt.optim: {}'.format(opt.optim))
|
def penalty_builder(penalty_config):
if (penalty_config == ''):
return (lambda x, y: y)
(pen_type, alpha) = penalty_config.split('_')
alpha = float(alpha)
if (pen_type == 'wu'):
return (lambda x, y: length_wu(x, y, alpha))
if (pen_type == 'avg'):
return (lambda x, y: length_average(x, y, alpha))
|
def length_wu(length, logprobs, alpha=0.0):
'\n NMT length re-ranking score from\n "Google\'s Neural Machine Translation System" :cite:`wu2016google`.\n '
modifier = (((5 + length) ** alpha) / ((5 + 1) ** alpha))
return (logprobs / modifier)
|
def length_average(length, logprobs, alpha=0.0):
'\n Returns the average probability of tokens in a sequence.\n '
return (logprobs / length)
|
class NoamOpt(torch.optim.Optimizer):
'Optim wrapper that implements rate.'
def __init__(self, model_size, factor, warmup, optimizer):
self.optimizer = optimizer
self._step = 0
self.warmup = warmup
self.factor = factor
self.model_size = model_size
self._rate = 0
def step(self, *args, **kwargs):
'Update parameters and rate'
self._step += 1
rate = self.rate()
for p in self.optimizer.param_groups:
p['lr'] = rate
self._rate = rate
self.optimizer.step(*args, **kwargs)
def rate(self, step=None):
'Implement `lrate` above'
if (step is None):
step = self._step
return (self.factor * ((self.model_size ** (- 0.5)) * min((step ** (- 0.5)), (step * (self.warmup ** (- 1.5))))))
def __getattr__(self, name):
return getattr(self.optimizer, name)
def state_dict(self):
state_dict = self.optimizer.state_dict()
state_dict['_step'] = self._step
return state_dict
def load_state_dict(self, state_dict):
if ('_step' in state_dict):
self._step = state_dict['_step']
del state_dict['_step']
self.optimizer.load_state_dict(state_dict)
|
class ReduceLROnPlateau(torch.optim.Optimizer):
'Optim wrapper that implements rate.'
def __init__(self, optimizer, mode='min', factor=0.1, patience=10, threshold=0.0001, threshold_mode='rel', cooldown=0, min_lr=0, eps=1e-08, verbose=False):
self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode, factor, patience, threshold, threshold_mode, cooldown, min_lr, eps, verbose)
self.optimizer = optimizer
self.current_lr = get_lr(optimizer)
def step(self, *args, **kwargs):
'Update parameters and rate'
self.optimizer.step(*args, **kwargs)
def scheduler_step(self, val):
self.scheduler.step(val)
self.current_lr = get_lr(self.optimizer)
def state_dict(self):
return {'current_lr': self.current_lr, 'scheduler_state_dict': self.scheduler.state_dict(), 'optimizer_state_dict': self.optimizer.state_dict()}
def load_state_dict(self, state_dict):
if ('current_lr' not in state_dict):
self.optimizer.load_state_dict(state_dict)
set_lr(self.optimizer, self.current_lr)
else:
self.current_lr = state_dict['current_lr']
self.scheduler.load_state_dict(state_dict['scheduler_state_dict'])
self.optimizer.load_state_dict(state_dict['optimizer_state_dict'])
def rate(self, step=None):
'Implement `lrate` above'
if (step is None):
step = self._step
return (self.factor * ((self.model_size ** (- 0.5)) * min((step ** (- 0.5)), (step * (self.warmup ** (- 1.5))))))
def __getattr__(self, name):
return getattr(self.optimizer, name)
|
def get_std_opt(model, optim_func='adam', factor=1, warmup=2000):
optim_func = dict(adam=torch.optim.Adam, adamw=torch.optim.AdamW)[optim_func]
return NoamOpt(model.d_model, factor, warmup, optim_func(model.parameters(), lr=0, betas=(0.9, 0.98), eps=1e-09))
|
def if_use_feat(caption_model):
if (caption_model in ['show_tell', 'all_img', 'fc', 'newfc']):
(use_att, use_fc) = (False, True)
elif (caption_model == 'language_model'):
(use_att, use_fc) = (False, False)
elif (caption_model in ['updown', 'topdown']):
(use_fc, use_att) = (True, True)
else:
(use_att, use_fc) = (True, False)
return (use_fc, use_att)
|
def parse_opt():
parser = argparse.ArgumentParser()
parser.add_argument('--input_json', type=str, default='data/coco.json', help='path to the json file containing additional info and vocab')
parser.add_argument('--input_fc_dir', type=str, default='data/cocotalk_fc', help='path to the directory containing the preprocessed fc feats')
parser.add_argument('--input_att_dir', type=str, default='data/cocotalk_att', help='path to the directory containing the preprocessed att feats')
parser.add_argument('--input_box_dir', type=str, default='data/cocotalk_box', help='path to the directory containing the boxes of att feats')
parser.add_argument('--input_label_h5', type=str, default='data/coco_label.h5', help='path to the h5file containing the preprocessed dataset')
parser.add_argument('--data_in_memory', action='store_true', help='True if we want to save the features in memory')
parser.add_argument('--start_from', type=str, default=None, help="continue training from saved model at this path. Path must contain files saved by previous training process: \n 'infos.pkl' : configuration;\n 'model.pth' : weights\n ")
parser.add_argument('--cached_tokens', type=str, default='coco-train-idxs', help='Cached token file for calculating cider score during self critical training.')
parser.add_argument('--caption_model', type=str, default='show_tell', help='show_tell, show_attend_tell, all_img, fc, att2in, att2in2, att2all2, adaatt, adaattmo, updown, stackatt, denseatt, transformer')
parser.add_argument('--rnn_size', type=int, default=512, help='size of the rnn in number of hidden nodes in each layer')
parser.add_argument('--num_layers', type=int, default=1, help='number of layers in the RNN')
parser.add_argument('--rnn_type', type=str, default='lstm', help='rnn, gru, or lstm')
parser.add_argument('--input_encoding_size', type=int, default=512, help='the encoding size of each token in the vocabulary, and the image.')
parser.add_argument('--att_hid_size', type=int, default=512, help='the hidden size of the attention MLP; only useful in show_attend_tell; 0 if not using hidden layer')
parser.add_argument('--fc_feat_size', type=int, default=2048, help='2048 for resnet, 4096 for vgg')
parser.add_argument('--att_feat_size', type=int, default=2048, help='2048 for resnet, 512 for vgg')
parser.add_argument('--logit_layers', type=int, default=1, help='number of layers in the RNN')
parser.add_argument('--use_bn', type=int, default=0, help='If 1, then do batch_normalization first in att_embed, if 2 then do bn both in the beginning and the end of att_embed')
parser.add_argument('--norm_att_feat', type=int, default=0, help='If normalize attention features')
parser.add_argument('--use_box', type=int, default=0, help='If use box features')
parser.add_argument('--norm_box_feat', type=int, default=0, help='If use box, do we normalize box feature')
parser.add_argument('--max_epochs', type=int, default=(- 1), help='number of epochs')
parser.add_argument('--batch_size', type=int, default=16, help='minibatch size')
parser.add_argument('--grad_clip_mode', type=str, default='value', help='value or norm')
parser.add_argument('--grad_clip_value', type=float, default=0.1, help='clip gradients at this value/max_norm, 0 means no clipping')
parser.add_argument('--drop_prob_lm', type=float, default=0.5, help='strength of dropout in the Language Model RNN')
parser.add_argument('--self_critical_after', type=int, default=(- 1), help='After what epoch do we start finetuning the CNN? (-1 = disable; never finetune, 0 = finetune from start)')
parser.add_argument('--seq_per_img', type=int, default=5, help='number of captions to sample for each image during training. Done for efficiency since CNN forward pass is expensive. E.g. coco has 5 sents/image')
add_eval_sample_opts(parser)
parser.add_argument('--optim', type=str, default='adam', help='what update to use? rmsprop|sgd|sgdmom|adagrad|adam|adamw')
parser.add_argument('--learning_rate', type=float, default=0.0004, help='learning rate')
parser.add_argument('--learning_rate_decay_start', type=int, default=(- 1), help='at what iteration to start decaying learning rate? (-1 = dont) (in epoch)')
parser.add_argument('--learning_rate_decay_every', type=int, default=3, help='every how many iterations thereafter to drop LR?(in epoch)')
parser.add_argument('--learning_rate_decay_rate', type=float, default=0.8, help='every how many iterations thereafter to drop LR?(in epoch)')
parser.add_argument('--optim_alpha', type=float, default=0.9, help='alpha for adam')
parser.add_argument('--optim_beta', type=float, default=0.999, help='beta used for adam')
parser.add_argument('--optim_epsilon', type=float, default=1e-08, help='epsilon that goes into denominator for smoothing')
parser.add_argument('--weight_decay', type=float, default=0, help='weight_decay')
parser.add_argument('--label_smoothing', type=float, default=0, help='')
parser.add_argument('--noamopt', action='store_true', help='')
parser.add_argument('--noamopt_warmup', type=int, default=2000, help='')
parser.add_argument('--noamopt_factor', type=float, default=1, help='')
parser.add_argument('--reduce_on_plateau', action='store_true', help='')
parser.add_argument('--reduce_on_plateau_factor', type=float, default=0.5, help='')
parser.add_argument('--reduce_on_plateau_patience', type=int, default=3, help='')
parser.add_argument('--cached_transformer', action='store_true', help='')
parser.add_argument('--use_warmup', action='store_true', help='warm up the learing rate?')
parser.add_argument('--scheduled_sampling_start', type=int, default=(- 1), help='at what iteration to start decay gt probability')
parser.add_argument('--scheduled_sampling_increase_every', type=int, default=5, help='every how many iterations thereafter to gt probability')
parser.add_argument('--scheduled_sampling_increase_prob', type=float, default=0.05, help='How much to update the prob')
parser.add_argument('--scheduled_sampling_max_prob', type=float, default=0.25, help='Maximum scheduled sampling prob.')
parser.add_argument('--val_images_use', type=int, default=3200, help='how many images to use when periodically evaluating the validation loss? (-1 = all)')
parser.add_argument('--save_checkpoint_every', type=int, default=2500, help='how often to save a model checkpoint (in iterations)?')
parser.add_argument('--save_every_epoch', action='store_true', help='Save checkpoint every epoch, will overwrite save_checkpoint_every')
parser.add_argument('--save_history_ckpt', type=int, default=0, help='If save checkpoints at every save point')
parser.add_argument('--checkpoint_path', type=str, default=None, help='directory to store checkpointed models')
parser.add_argument('--language_eval', type=int, default=0, help='Evaluate language as well (1 = yes, 0 = no)? BLEU/CIDEr/METEOR/ROUGE_L? requires coco-caption code from Github.')
parser.add_argument('--losses_log_every', type=int, default=25, help='How often do we snapshot losses, for inclusion in the progress dump? (0 = disable)')
parser.add_argument('--load_best_score', type=int, default=1, help='Do we load previous best score when resuming training.')
parser.add_argument('--id', type=str, default='', help='an id identifying this run/job. used in cross-val and appended when writing progress files')
parser.add_argument('--train_only', type=int, default=0, help='if true then use 80k, else use 110k')
parser.add_argument('--cider_reward_weight', type=float, default=1, help='The reward weight from cider')
parser.add_argument('--bleu_reward_weight', type=float, default=0, help='The reward weight from bleu4')
parser.add_argument('--structure_loss_weight', type=float, default=1, help='')
parser.add_argument('--structure_after', type=int, default=(- 1), help='')
parser.add_argument('--structure_loss_type', type=str, default='seqnll', help='')
parser.add_argument('--struc_use_logsoftmax', action='store_true', help='')
parser.add_argument('--entropy_reward_weight', type=float, default=0, help='Entropy reward, seems very interesting')
parser.add_argument('--self_cider_reward_weight', type=float, default=0, help='self cider reward')
parser.add_argument('--use_ppo', type=int, default=0, help='if use ppo. when using ppo, we reuse things like structure_loss_weight and structure_after.')
parser.add_argument('--ppo_old_model_path', type=str, default=None, help='The old model used to calculate PPO loss.')
parser.add_argument('--ppo_cliprange', type=float, default=0.2, help='cliprange for PPO. 0.2 is used by InstructGPT')
parser.add_argument('--ppo_kl_coef', type=float, default=0.02, help='kl reward cooef for PPO. 0.02 is used by InstructGPT')
parser.add_argument('--train_sample_n', type=int, default=16, help='The reward weight from cider')
parser.add_argument('--train_sample_method', type=str, default='sample', help='')
parser.add_argument('--train_beam_size', type=int, default=1, help='')
parser.add_argument('--sc_sample_method', type=str, default='greedy', help='')
parser.add_argument('--sc_beam_size', type=int, default=1, help='')
parser.add_argument('--drop_worst_after', type=float, default=(- 1), help='')
parser.add_argument('--drop_worst_rate', type=float, default=0, help='')
add_diversity_opts(parser)
parser.add_argument('--cfg', type=str, default=None, help='configuration; similar to what is used in detectron')
parser.add_argument('--set_cfgs', dest='set_cfgs', help='Set config keys. Key value sequence seperate by whitespace.e.g. [key] [value] [key] [value]\n This has higher prioritythan cfg file but lower than other args. (You can only overwritearguments that have alerady been defined in config file.)', default=[], nargs='+')
args = parser.parse_args()
if ((args.cfg is not None) or (args.set_cfgs is not None)):
from .config import CfgNode
if (args.cfg is not None):
cn = CfgNode(CfgNode.load_yaml_with_base(args.cfg))
else:
cn = CfgNode()
if (args.set_cfgs is not None):
cn.merge_from_list(args.set_cfgs)
for (k, v) in cn.items():
if (not hasattr(args, k)):
print(('Warning: key %s not in args' % k))
setattr(args, k, v)
args = parser.parse_args(namespace=args)
assert (args.rnn_size > 0), 'rnn_size should be greater than 0'
assert (args.num_layers > 0), 'num_layers should be greater than 0'
assert (args.input_encoding_size > 0), 'input_encoding_size should be greater than 0'
assert (args.batch_size > 0), 'batch_size should be greater than 0'
assert ((args.drop_prob_lm >= 0) and (args.drop_prob_lm < 1)), 'drop_prob_lm should be between 0 and 1'
assert (args.seq_per_img > 0), 'seq_per_img should be greater than 0'
assert (args.beam_size > 0), 'beam_size should be greater than 0'
assert (args.save_checkpoint_every > 0), 'save_checkpoint_every should be greater than 0'
assert (args.losses_log_every > 0), 'losses_log_every should be greater than 0'
assert ((args.language_eval == 0) or (args.language_eval == 1)), 'language_eval should be 0 or 1'
assert ((args.load_best_score == 0) or (args.load_best_score == 1)), 'language_eval should be 0 or 1'
assert ((args.train_only == 0) or (args.train_only == 1)), 'language_eval should be 0 or 1'
args.checkpoint_path = (args.checkpoint_path or ('./log_%s' % args.id))
args.start_from = (args.start_from or args.checkpoint_path)
(args.use_fc, args.use_att) = if_use_feat(args.caption_model)
if args.use_box:
args.att_feat_size = (args.att_feat_size + 5)
return args
|
def add_eval_options(parser):
parser.add_argument('--batch_size', type=int, default=0, help='if > 0 then overrule, otherwise load from checkpoint.')
parser.add_argument('--num_images', type=int, default=(- 1), help='how many images to use when periodically evaluating the loss? (-1 = all)')
parser.add_argument('--language_eval', type=int, default=0, help='Evaluate language as well (1 = yes, 0 = no)? BLEU/CIDEr/METEOR/ROUGE_L? requires coco-caption code from Github.')
parser.add_argument('--dump_images', type=int, default=1, help='Dump images into vis/imgs folder for vis? (1=yes,0=no)')
parser.add_argument('--dump_json', type=int, default=1, help='Dump json with predictions into vis folder? (1=yes,0=no)')
parser.add_argument('--dump_path', type=int, default=0, help='Write image paths along with predictions into vis json? (1=yes,0=no)')
add_eval_sample_opts(parser)
parser.add_argument('--image_folder', type=str, default='', help='If this is nonempty then will predict on the images in this folder path')
parser.add_argument('--image_root', type=str, default='', help='In case the image paths have to be preprended with a root path to an image folder')
parser.add_argument('--input_fc_dir', type=str, default='', help='path to the h5file containing the preprocessed dataset')
parser.add_argument('--input_att_dir', type=str, default='', help='path to the h5file containing the preprocessed dataset')
parser.add_argument('--input_box_dir', type=str, default='', help='path to the h5file containing the preprocessed dataset')
parser.add_argument('--input_label_h5', type=str, default='', help='path to the h5file containing the preprocessed dataset')
parser.add_argument('--input_json', type=str, default='', help='path to the json file containing additional info and vocab. empty = fetch from model checkpoint.')
parser.add_argument('--split', type=str, default='test', help='if running on MSCOCO images, which split to use: val|test|train')
parser.add_argument('--coco_json', type=str, default='', help='if nonempty then use this file in DataLoaderRaw (see docs there). Used only in MSCOCO test evaluation, where we have a specific json file of only test set images.')
parser.add_argument('--id', type=str, default='', help='an id identifying this run/job. used only if language_eval = 1 for appending to intermediate files')
parser.add_argument('--verbose_beam', type=int, default=1, help='if we need to print out all beam search beams.')
parser.add_argument('--verbose_loss', type=int, default=0, help='If calculate loss using ground truth during evaluation')
|
def add_diversity_opts(parser):
parser.add_argument('--sample_n', type=int, default=1, help='Diverse sampling')
parser.add_argument('--sample_n_method', type=str, default='sample', help='sample, bs, dbs, gumbel, topk, dgreedy, dsample, dtopk, dtopp')
parser.add_argument('--eval_oracle', type=int, default=1, help='if we need to calculate loss.')
|
def add_eval_sample_opts(parser):
parser.add_argument('--sample_method', type=str, default='greedy', help='greedy; sample; gumbel; top<int>, top<0-1>')
parser.add_argument('--beam_size', type=int, default=1, help='used when sample_method = greedy, indicates number of beams in beam search. Usually 2 or 3 works well. More is not better. Set this to 1 for faster runtime but a bit worse performance.')
parser.add_argument('--max_length', type=int, default=20, help='Maximum length during sampling')
parser.add_argument('--length_penalty', type=str, default='', help='wu_X or avg_X, X is the alpha')
parser.add_argument('--group_size', type=int, default=1, help="used for diverse beam search. if group_size is 1, then it's normal beam search")
parser.add_argument('--diversity_lambda', type=float, default=0.5, help='used for diverse beam search. Usually from 0.2 to 0.8. Higher value of lambda produces a more diverse list')
parser.add_argument('--temperature', type=float, default=1.0, help='temperature when sampling from distributions (i.e. when sample_method = sample). Lower = "safer" predictions.')
parser.add_argument('--decoding_constraint', type=int, default=0, help='If 1, not allowing same word in a row')
parser.add_argument('--block_trigrams', type=int, default=0, help='block repeated trigram.')
parser.add_argument('--remove_bad_endings', type=int, default=0, help='Remove bad endings')
parser.add_argument('--suppress_UNK', type=int, default=1, help='Not predicting UNK')
|
class ResNet(torchvision.models.resnet.ResNet):
def __init__(self, block, layers, num_classes=1000):
super(ResNet, self).__init__(block, layers, num_classes)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=0, ceil_mode=True)
for i in range(2, 5):
getattr(self, ('layer%d' % i))[0].conv1.stride = (2, 2)
getattr(self, ('layer%d' % i))[0].conv2.stride = (1, 1)
|
def resnet18(pretrained=False):
'Constructs a ResNet-18 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
model = ResNet(BasicBlock, [2, 2, 2, 2])
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
|
def resnet34(pretrained=False):
'Constructs a ResNet-34 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
model = ResNet(BasicBlock, [3, 4, 6, 3])
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
|
def resnet50(pretrained=False):
'Constructs a ResNet-50 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
model = ResNet(Bottleneck, [3, 4, 6, 3])
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
|
def resnet101(pretrained=False):
'Constructs a ResNet-101 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
model = ResNet(Bottleneck, [3, 4, 23, 3])
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
|
def resnet152(pretrained=False):
'Constructs a ResNet-152 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
model = ResNet(Bottleneck, [3, 8, 36, 3])
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
|
class myResnet(nn.Module):
def __init__(self, resnet):
super(myResnet, self).__init__()
self.resnet = resnet
def forward(self, img, att_size=14):
x = img.unsqueeze(0)
x = self.resnet.conv1(x)
x = self.resnet.bn1(x)
x = self.resnet.relu(x)
x = self.resnet.maxpool(x)
x = self.resnet.layer1(x)
x = self.resnet.layer2(x)
x = self.resnet.layer3(x)
x = self.resnet.layer4(x)
fc = x.mean(3).mean(2).squeeze()
att = F.adaptive_avg_pool2d(x, [att_size, att_size]).squeeze().permute(1, 2, 0)
return (fc, att)
|
def build_vocab(imgs, params):
captions = []
for img in imgs:
for sent in img['sentences']:
captions.append(' '.join(sent['tokens']))
captions = '\n'.join(captions)
all_captions = tempfile.NamedTemporaryFile(delete=False)
all_captions.close()
with open(all_captions.name, 'w') as txt_file:
txt_file.write(captions)
codecs_output = tempfile.NamedTemporaryFile(delete=False)
codecs_output.close()
with codecs.open(codecs_output.name, 'w', encoding='UTF-8') as output:
learn_bpe.learn_bpe(codecs.open(all_captions.name, encoding='UTF-8'), output, params['symbol_count'])
with codecs.open(codecs_output.name, encoding='UTF-8') as codes:
bpe = apply_bpe.BPE(codes)
tmp = tempfile.NamedTemporaryFile(delete=False)
tmp.close()
tmpout = codecs.open(tmp.name, 'w', encoding='UTF-8')
for (_, img) in enumerate(imgs):
img['final_captions'] = []
for sent in img['sentences']:
txt = ' '.join(sent['tokens'])
txt = bpe.segment(txt).strip()
img['final_captions'].append(txt.split(' '))
tmpout.write(txt)
tmpout.write('\n')
if (_ < 20):
print(txt)
tmpout.close()
tmpin = codecs.open(tmp.name, encoding='UTF-8')
vocab = learn_bpe.get_vocabulary(tmpin)
vocab = sorted(vocab.keys(), key=(lambda x: vocab[x]), reverse=True)
print('inserting the special UNK token')
vocab.append('UNK')
print('Vocab size:', len(vocab))
os.remove(all_captions.name)
with open(codecs_output.name, 'r') as codes:
bpe = codes.read()
os.remove(codecs_output.name)
os.remove(tmp.name)
return (vocab, bpe)
|
def encode_captions(imgs, params, wtoi):
' \n\tencode all captions into one large array, which will be 1-indexed.\n\talso produces label_start_ix and label_end_ix which store 1-indexed \n\tand inclusive (Lua-style) pointers to the first and last caption for\n\teach image in the dataset.\n\t'
max_length = params['max_length']
N = len(imgs)
M = sum((len(img['final_captions']) for img in imgs))
label_arrays = []
label_start_ix = np.zeros(N, dtype='uint32')
label_end_ix = np.zeros(N, dtype='uint32')
label_length = np.zeros(M, dtype='uint32')
caption_counter = 0
counter = 1
for (i, img) in enumerate(imgs):
n = len(img['final_captions'])
assert (n > 0), 'error: some image has no captions'
Li = np.zeros((n, max_length), dtype='uint32')
for (j, s) in enumerate(img['final_captions']):
label_length[caption_counter] = min(max_length, len(s))
caption_counter += 1
for (k, w) in enumerate(s):
if (k < max_length):
Li[(j, k)] = wtoi[w]
label_arrays.append(Li)
label_start_ix[i] = counter
label_end_ix[i] = ((counter + n) - 1)
counter += n
L = np.concatenate(label_arrays, axis=0)
assert (L.shape[0] == M), "lengths don't match? that's weird"
assert np.all((label_length > 0)), 'error: some caption had no words?'
print('encoded captions to array of size ', L.shape)
return (L, label_start_ix, label_end_ix, label_length)
|
def main(params):
imgs = json.load(open(params['input_json'], 'r'))
imgs = imgs['images']
seed(123)
(vocab, bpe) = build_vocab(imgs, params)
itow = {(i + 1): w for (i, w) in enumerate(vocab)}
wtoi = {w: (i + 1) for (i, w) in enumerate(vocab)}
(L, label_start_ix, label_end_ix, label_length) = encode_captions(imgs, params, wtoi)
N = len(imgs)
f_lb = h5py.File((params['output_h5'] + '_label.h5'), 'w')
f_lb.create_dataset('labels', dtype='uint32', data=L)
f_lb.create_dataset('label_start_ix', dtype='uint32', data=label_start_ix)
f_lb.create_dataset('label_end_ix', dtype='uint32', data=label_end_ix)
f_lb.create_dataset('label_length', dtype='uint32', data=label_length)
f_lb.close()
out = {}
out['ix_to_word'] = itow
out['images'] = []
out['bpe'] = bpe
for (i, img) in enumerate(imgs):
jimg = {}
jimg['split'] = img['split']
if ('filename' in img):
jimg['file_path'] = os.path.join(img['filepath'], img['filename'])
if ('cocoid' in img):
jimg['id'] = img['cocoid']
if (params['images_root'] != ''):
with Image.open(os.path.join(params['images_root'], img['filepath'], img['filename'])) as _img:
(jimg['width'], jimg['height']) = _img.size
out['images'].append(jimg)
json.dump(out, open(params['output_json'], 'w'))
print('wrote ', params['output_json'])
|
def main(params):
imgs = json.load(open(params['input_json'], 'r'))
imgs = imgs['images']
N = len(imgs)
if (params['fc_input_dir'] is not None):
print('processing fc')
with h5py.File(params['fc_output']) as file_fc:
for (i, img) in enumerate(tqdm(imgs)):
npy_fc_path = os.path.join(params['fc_input_dir'], (str(img['cocoid']) + '.npy'))
d_set_fc = file_fc.create_dataset(str(img['cocoid']), data=np.load(npy_fc_path))
file_fc.close()
if (params['att_input_dir'] is not None):
print('processing att')
with h5py.File(params['att_output']) as file_att:
for (i, img) in enumerate(tqdm(imgs)):
npy_att_path = os.path.join(params['att_input_dir'], (str(img['cocoid']) + '.npz'))
d_set_att = file_att.create_dataset(str(img['cocoid']), data=np.load(npy_att_path)['feat'])
file_att.close()
|
class FolderLMDB(data.Dataset):
def __init__(self, db_path, fn_list=None):
self.db_path = db_path
self.lmdb = lmdbdict(db_path, unsafe=True)
self.lmdb._key_dumps = DUMPS_FUNC['ascii']
self.lmdb._value_loads = LOADS_FUNC['identity']
if (fn_list is not None):
self.length = len(fn_list)
self.keys = fn_list
else:
raise Error
def __getitem__(self, index):
byteflow = self.lmdb[self.keys[index]]
imgbuf = byteflow
buf = six.BytesIO()
buf.write(imgbuf)
buf.seek(0)
try:
if (args.extension == '.npz'):
feat = np.load(buf)['feat']
else:
feat = np.load(buf)
except Exception as e:
print(self.keys[index], e)
return None
return feat
def __len__(self):
return self.length
def __repr__(self):
return (((self.__class__.__name__ + ' (') + self.db_path) + ')')
|
def make_dataset(dir, extension):
images = []
dir = os.path.expanduser(dir)
for (root, _, fnames) in sorted(os.walk(dir)):
for fname in sorted(fnames):
if has_file_allowed_extension(fname, [extension]):
path = os.path.join(root, fname)
images.append(path)
return images
|
def raw_reader(path):
with open(path, 'rb') as f:
bin_data = f.read()
return bin_data
|
def raw_npz_reader(path):
with open(path, 'rb') as f:
bin_data = f.read()
try:
npz_data = np.load(six.BytesIO(bin_data))['feat']
except Exception as e:
print(path)
npz_data = None
print(e)
return (bin_data, npz_data)
|
def raw_npy_reader(path):
with open(path, 'rb') as f:
bin_data = f.read()
try:
npy_data = np.load(six.BytesIO(bin_data))
except Exception as e:
print(path)
npy_data = None
print(e)
return (bin_data, npy_data)
|
class Folder(data.Dataset):
def __init__(self, root, loader, extension, fn_list=None):
super(Folder, self).__init__()
self.root = root
if fn_list:
samples = [os.path.join(root, (str(_) + extension)) for _ in fn_list]
else:
samples = make_dataset(self.root, extension)
self.loader = loader
self.extension = extension
self.samples = samples
def __getitem__(self, index):
'\n Args:\n index (int): Index\n Returns:\n tuple: (sample, target) where target is class_index of the target class.\n '
path = self.samples[index]
sample = self.loader(path)
return ((path.split('/')[(- 1)].split('.')[0],) + sample)
def __len__(self):
return len(self.samples)
|
def folder2lmdb(dpath, fn_list, write_frequency=5000):
directory = osp.expanduser(osp.join(dpath))
print(('Loading dataset from %s' % directory))
if (args.extension == '.npz'):
dataset = Folder(directory, loader=raw_npz_reader, extension='.npz', fn_list=fn_list)
else:
dataset = Folder(directory, loader=raw_npy_reader, extension='.npy', fn_list=fn_list)
data_loader = DataLoader(dataset, num_workers=16, collate_fn=(lambda x: x))
lmdb_path = osp.join(('%s.lmdb' % directory))
isdir = os.path.isdir(lmdb_path)
print(('Generate LMDB to %s' % lmdb_path))
db = lmdbdict(lmdb_path, mode='w', key_method='ascii', value_method='identity')
tsvfile = open(args.output_file, 'a')
writer = csv.DictWriter(tsvfile, delimiter='\t', fieldnames=FIELDNAMES)
names = []
all_keys = []
for (idx, data) in enumerate(tqdm.tqdm(data_loader)):
(name, byte, npz) = data[0]
if (npz is not None):
db[name] = byte
all_keys.append(name)
names.append({'image_id': name, 'status': str((npz is not None))})
if ((idx % write_frequency) == 0):
print(('[%d/%d]' % (idx, len(data_loader))))
print('writing')
db.flush()
for name in names:
writer.writerow(name)
names = []
tsvfile.flush()
print('writing finished')
for name in names:
writer.writerow(name)
tsvfile.flush()
tsvfile.close()
print('Flushing database ...')
db.flush()
del db
|
def parse_args():
'\n Parse input arguments\n '
parser = argparse.ArgumentParser(description='Generate bbox output from a Fast R-CNN network')
parser.add_argument('--input_json', default='./data/dataset_coco.json', type=str)
parser.add_argument('--output_file', default='.dump_cache.tsv', type=str)
parser.add_argument('--folder', default='./data/cocobu_att', type=str)
parser.add_argument('--extension', default='.npz', type=str)
args = parser.parse_args()
return args
|
def build_vocab(imgs, params):
count_thr = params['word_count_threshold']
counts = {}
for img in imgs:
for sent in img['sentences']:
for w in sent['tokens']:
counts[w] = (counts.get(w, 0) + 1)
cw = sorted([(count, w) for (w, count) in counts.items()], reverse=True)
print('top words and their counts:')
print('\n'.join(map(str, cw[:20])))
total_words = sum(counts.values())
print('total words:', total_words)
bad_words = [w for (w, n) in counts.items() if (n <= count_thr)]
vocab = [w for (w, n) in counts.items() if (n > count_thr)]
bad_count = sum((counts[w] for w in bad_words))
print(('number of bad words: %d/%d = %.2f%%' % (len(bad_words), len(counts), ((len(bad_words) * 100.0) / len(counts)))))
print(('number of words in vocab would be %d' % (len(vocab),)))
print(('number of UNKs: %d/%d = %.2f%%' % (bad_count, total_words, ((bad_count * 100.0) / total_words))))
sent_lengths = {}
for img in imgs:
for sent in img['sentences']:
txt = sent['tokens']
nw = len(txt)
sent_lengths[nw] = (sent_lengths.get(nw, 0) + 1)
max_len = max(sent_lengths.keys())
print('max length sentence in raw data: ', max_len)
print('sentence length distribution (count, number of words):')
sum_len = sum(sent_lengths.values())
for i in range((max_len + 1)):
print(('%2d: %10d %f%%' % (i, sent_lengths.get(i, 0), ((sent_lengths.get(i, 0) * 100.0) / sum_len))))
if (bad_count > 0):
print('inserting the special UNK token')
vocab.append('UNK')
for img in imgs:
img['final_captions'] = []
for sent in img['sentences']:
txt = sent['tokens']
caption = [(w if (counts.get(w, 0) > count_thr) else 'UNK') for w in txt]
img['final_captions'].append(caption)
return vocab
|
def encode_captions(imgs, params, wtoi):
' \n encode all captions into one large array, which will be 1-indexed.\n also produces label_start_ix and label_end_ix which store 1-indexed \n and inclusive (Lua-style) pointers to the first and last caption for\n each image in the dataset.\n '
max_length = params['max_length']
N = len(imgs)
M = sum((len(img['final_captions']) for img in imgs))
label_arrays = []
label_start_ix = np.zeros(N, dtype='uint32')
label_end_ix = np.zeros(N, dtype='uint32')
label_length = np.zeros(M, dtype='uint32')
caption_counter = 0
counter = 1
for (i, img) in enumerate(imgs):
n = len(img['final_captions'])
assert (n > 0), 'error: some image has no captions'
Li = np.zeros((n, max_length), dtype='uint32')
for (j, s) in enumerate(img['final_captions']):
label_length[caption_counter] = min(max_length, len(s))
caption_counter += 1
for (k, w) in enumerate(s):
if (k < max_length):
Li[(j, k)] = wtoi[w]
label_arrays.append(Li)
label_start_ix[i] = counter
label_end_ix[i] = ((counter + n) - 1)
counter += n
L = np.concatenate(label_arrays, axis=0)
assert (L.shape[0] == M), "lengths don't match? that's weird"
assert np.all((label_length > 0)), 'error: some caption had no words?'
print('encoded captions to array of size ', L.shape)
return (L, label_start_ix, label_end_ix, label_length)
|
def main(params):
imgs = json.load(open(params['input_json'], 'r'))
imgs = imgs['images']
seed(123)
vocab = build_vocab(imgs, params)
itow = {(i + 1): w for (i, w) in enumerate(vocab)}
wtoi = {w: (i + 1) for (i, w) in enumerate(vocab)}
(L, label_start_ix, label_end_ix, label_length) = encode_captions(imgs, params, wtoi)
N = len(imgs)
f_lb = h5py.File((params['output_h5'] + '_label.h5'), 'w')
f_lb.create_dataset('labels', dtype='uint32', data=L)
f_lb.create_dataset('label_start_ix', dtype='uint32', data=label_start_ix)
f_lb.create_dataset('label_end_ix', dtype='uint32', data=label_end_ix)
f_lb.create_dataset('label_length', dtype='uint32', data=label_length)
f_lb.close()
out = {}
out['ix_to_word'] = itow
out['images'] = []
for (i, img) in enumerate(imgs):
jimg = {}
jimg['split'] = img['split']
if ('filename' in img):
jimg['file_path'] = os.path.join(img.get('filepath', ''), img['filename'])
if ('cocoid' in img):
jimg['id'] = img['cocoid']
elif ('imgid' in img):
jimg['id'] = img['imgid']
if (params['images_root'] != ''):
with Image.open(os.path.join(params['images_root'], img['filepath'], img['filename'])) as _img:
(jimg['width'], jimg['height']) = _img.size
out['images'].append(jimg)
json.dump(out, open(params['output_json'], 'w'))
print('wrote ', params['output_json'])
|
def get_doc_freq(refs, params):
tmp = CiderScorer(df_mode='corpus')
for ref in refs:
tmp.cook_append(None, ref)
tmp.compute_doc_freq()
return (tmp.document_frequency, len(tmp.crefs))
|
def build_dict(imgs, wtoi, params):
wtoi['<eos>'] = 0
count_imgs = 0
refs_words = []
refs_idxs = []
for img in imgs:
if ((params['split'] == img['split']) or ((params['split'] == 'train') and (img['split'] == 'restval')) or (params['split'] == 'all')):
ref_words = []
ref_idxs = []
for sent in img['sentences']:
if hasattr(params, 'bpe'):
sent['tokens'] = params.bpe.segment(' '.join(sent['tokens'])).strip().split(' ')
tmp_tokens = (sent['tokens'] + ['<eos>'])
tmp_tokens = [(_ if (_ in wtoi) else 'UNK') for _ in tmp_tokens]
ref_words.append(' '.join(tmp_tokens))
ref_idxs.append(' '.join([str(wtoi[_]) for _ in tmp_tokens]))
refs_words.append(ref_words)
refs_idxs.append(ref_idxs)
count_imgs += 1
print('total imgs:', count_imgs)
(ngram_words, count_refs) = get_doc_freq(refs_words, params)
(ngram_idxs, count_refs) = get_doc_freq(refs_idxs, params)
print('count_refs:', count_refs)
return (ngram_words, ngram_idxs, count_refs)
|
def main(params):
imgs = json.load(open(params['input_json'], 'r'))
dict_json = json.load(open(params['dict_json'], 'r'))
itow = dict_json['ix_to_word']
wtoi = {w: i for (i, w) in itow.items()}
if ('bpe' in dict_json):
import tempfile
import codecs
codes_f = tempfile.NamedTemporaryFile(delete=False)
codes_f.close()
with open(codes_f.name, 'w') as f:
f.write(dict_json['bpe'])
with codecs.open(codes_f.name, encoding='UTF-8') as codes:
bpe = apply_bpe.BPE(codes)
params.bpe = bpe
imgs = imgs['images']
(ngram_words, ngram_idxs, ref_len) = build_dict(imgs, wtoi, params)
utils.pickle_dump({'document_frequency': ngram_words, 'ref_len': ref_len}, open((params['output_pkl'] + '-words.p'), 'wb'))
utils.pickle_dump({'document_frequency': ngram_idxs, 'ref_len': ref_len}, open((params['output_pkl'] + '-idxs.p'), 'wb'))
|
def main(params):
imgs = json.load(open(params['input_json'][0], 'r'))['images']
out = {'info': {'description': 'This is stable 1.0 version of the 2014 MS COCO dataset.', 'url': 'http://mscoco.org', 'version': '1.0', 'year': 2014, 'contributor': 'Microsoft COCO group', 'date_created': '2015-01-27 09:11:52.357475'}, 'licenses': [{'url': 'http://creativecommons.org/licenses/by-nc-sa/2.0/', 'id': 1, 'name': 'Attribution-NonCommercial-ShareAlike License'}, {'url': 'http://creativecommons.org/licenses/by-nc/2.0/', 'id': 2, 'name': 'Attribution-NonCommercial License'}, {'url': 'http://creativecommons.org/licenses/by-nc-nd/2.0/', 'id': 3, 'name': 'Attribution-NonCommercial-NoDerivs License'}, {'url': 'http://creativecommons.org/licenses/by/2.0/', 'id': 4, 'name': 'Attribution License'}, {'url': 'http://creativecommons.org/licenses/by-sa/2.0/', 'id': 5, 'name': 'Attribution-ShareAlike License'}, {'url': 'http://creativecommons.org/licenses/by-nd/2.0/', 'id': 6, 'name': 'Attribution-NoDerivs License'}, {'url': 'http://flickr.com/commons/usage/', 'id': 7, 'name': 'No known copyright restrictions'}, {'url': 'http://www.usa.gov/copyright.shtml', 'id': 8, 'name': 'United States Government Work'}], 'type': 'captions'}
out.update({'images': [], 'annotations': []})
cnt = 0
empty_cnt = 0
for (i, img) in enumerate(imgs):
if (img['split'] == 'train'):
continue
out['images'].append({'id': img.get('cocoid', img['imgid'])})
for (j, s) in enumerate(img['sentences']):
if (len(s) == 0):
continue
s = ' '.join(s['tokens'])
out['annotations'].append({'image_id': out['images'][(- 1)]['id'], 'caption': s, 'id': cnt})
cnt += 1
json.dump(out, open(params['output_json'], 'w'))
print('wrote ', params['output_json'])
|
def test_folder():
x = pickle_load(open('log_trans/infos_trans.pkl', 'rb'))
dataset = CaptionDataset(x['opt'])
ds = torch.utils.data.Subset(dataset, dataset.split_ix['train'])
ds[0]
|
def test_lmdb():
x = pickle_load(open('log_trans/infos_trans.pkl', 'rb'))
x['opt'].input_att_dir = 'data/vilbert_att.lmdb'
dataset = CaptionDataset(x['opt'])
ds = torch.utils.data.Subset(dataset, dataset.split_ix['train'])
ds[0]
|
def add_summary_value(writer, key, value, iteration):
if writer:
writer.add_scalar(key, value, iteration)
|
def train(opt):
loader = DataLoader(opt)
opt.vocab_size = loader.vocab_size
opt.seq_length = loader.seq_length
infos = {'iter': 0, 'epoch': 0, 'loader_state_dict': None, 'vocab': loader.get_vocab()}
if ((opt.start_from is not None) and os.path.isfile(os.path.join(opt.start_from, (('infos_' + opt.id) + '.pkl')))):
with open(os.path.join(opt.start_from, (('infos_' + opt.id) + '.pkl')), 'rb') as f:
infos = utils.pickle_load(f)
saved_model_opt = infos['opt']
need_be_same = ['caption_model', 'rnn_type', 'rnn_size', 'num_layers']
for checkme in need_be_same:
assert (getattr(saved_model_opt, checkme) == getattr(opt, checkme)), ("Command line argument and saved model disagree on '%s' " % checkme)
infos['opt'] = opt
histories = defaultdict(dict)
if ((opt.start_from is not None) and os.path.isfile(os.path.join(opt.start_from, (('histories_' + opt.id) + '.pkl')))):
with open(os.path.join(opt.start_from, (('histories_' + opt.id) + '.pkl')), 'rb') as f:
histories.update(utils.pickle_load(f))
tb_summary_writer = SummaryWriter(opt.checkpoint_path)
opt.vocab = loader.get_vocab()
model = models.setup(opt).cuda()
del opt.vocab
if ((opt.start_from is not None) and os.path.isfile(os.path.join(opt.start_from, 'model.pth'))):
model.load_state_dict(torch.load(os.path.join(opt.start_from, 'model.pth')))
lw_model = LossWrapper(model, opt)
dp_model = torch.nn.DataParallel(model)
dp_model.vocab = getattr(model, 'vocab', None)
dp_lw_model = torch.nn.DataParallel(lw_model)
if opt.noamopt:
assert (opt.caption_model in ['transformer', 'bert', 'm2transformer']), 'noamopt can only work with transformer'
optimizer = utils.get_std_opt(model, optim_func=opt.optim, factor=opt.noamopt_factor, warmup=opt.noamopt_warmup)
elif opt.reduce_on_plateau:
optimizer = utils.build_optimizer(model.parameters(), opt)
optimizer = utils.ReduceLROnPlateau(optimizer, factor=opt.reduce_on_plateau_factor, patience=opt.reduce_on_plateau_patience)
else:
optimizer = utils.build_optimizer(model.parameters(), opt)
if ((opt.start_from is not None) and os.path.isfile(os.path.join(opt.start_from, 'optimizer.pth'))):
optimizer.load_state_dict(torch.load(os.path.join(opt.start_from, 'optimizer.pth')))
iteration = infos['iter']
epoch = infos['epoch']
if ('iterators' in infos):
infos['loader_state_dict'] = {split: {'index_list': infos['split_ix'][split], 'iter_counter': infos['iterators'][split]} for split in ['train', 'val', 'test']}
loader.load_state_dict(infos['loader_state_dict'])
if (opt.load_best_score == 1):
best_val_score = infos.get('best_val_score', None)
if opt.noamopt:
optimizer._step = iteration
epoch_done = True
dp_lw_model.train()
try:
while True:
if ((epoch >= opt.max_epochs) and (opt.max_epochs != (- 1))):
break
if epoch_done:
if ((not opt.noamopt) and (not opt.reduce_on_plateau)):
if ((epoch > opt.learning_rate_decay_start) and (opt.learning_rate_decay_start >= 0)):
frac = ((epoch - opt.learning_rate_decay_start) // opt.learning_rate_decay_every)
decay_factor = (opt.learning_rate_decay_rate ** frac)
opt.current_lr = (opt.learning_rate * decay_factor)
else:
opt.current_lr = opt.learning_rate
utils.set_lr(optimizer, opt.current_lr)
if ((epoch > opt.scheduled_sampling_start) and (opt.scheduled_sampling_start >= 0)):
frac = ((epoch - opt.scheduled_sampling_start) // opt.scheduled_sampling_increase_every)
opt.ss_prob = min((opt.scheduled_sampling_increase_prob * frac), opt.scheduled_sampling_max_prob)
model.ss_prob = opt.ss_prob
if ((opt.self_critical_after != (- 1)) and (epoch >= opt.self_critical_after)):
sc_flag = True
init_scorer(opt.cached_tokens)
else:
sc_flag = False
if ((opt.structure_after != (- 1)) and (epoch >= opt.structure_after)):
struc_flag = True
init_scorer(opt.cached_tokens)
else:
struc_flag = False
if ((opt.drop_worst_after != (- 1)) and (epoch >= opt.drop_worst_after)):
drop_worst_flag = True
else:
drop_worst_flag = False
epoch_done = False
start = time.time()
if (opt.use_warmup and (iteration < opt.noamopt_warmup)):
opt.current_lr = ((opt.learning_rate * (iteration + 1)) / opt.noamopt_warmup)
utils.set_lr(optimizer, opt.current_lr)
data = loader.get_batch('train')
print('Read data:', (time.time() - start))
torch.cuda.synchronize()
start = time.time()
tmp = [data['fc_feats'], data['att_feats'], data['labels'], data['masks'], data['att_masks']]
tmp = [(_ if (_ is None) else _.cuda()) for _ in tmp]
(fc_feats, att_feats, labels, masks, att_masks) = tmp
optimizer.zero_grad()
model_out = dp_lw_model(fc_feats, att_feats, labels, masks, att_masks, data['gts'], torch.arange(0, len(data['gts'])), sc_flag, struc_flag, drop_worst_flag)
if (not drop_worst_flag):
loss = model_out['loss'].mean()
else:
loss = model_out['loss']
loss = torch.topk(loss, k=int((loss.shape[0] * (1 - opt.drop_worst_rate))), largest=False)[0].mean()
loss.backward()
if (opt.grad_clip_value != 0):
getattr(torch.nn.utils, ('clip_grad_%s_' % opt.grad_clip_mode))(model.parameters(), opt.grad_clip_value)
optimizer.step()
train_loss = loss.item()
torch.cuda.synchronize()
end = time.time()
if struc_flag:
print('iter {} (epoch {}), train_loss = {:.3f}, lm_loss = {:.3f}, struc_loss = {:.3f}, time/batch = {:.3f}'.format(iteration, epoch, train_loss, model_out['lm_loss'].mean().item(), model_out['struc_loss'].mean().item(), (end - start)))
elif (not sc_flag):
print('iter {} (epoch {}), train_loss = {:.3f}, time/batch = {:.3f}'.format(iteration, epoch, train_loss, (end - start)))
else:
print('iter {} (epoch {}), avg_reward = {:.3f}, time/batch = {:.3f}'.format(iteration, epoch, model_out['reward'].mean(), (end - start)))
iteration += 1
if data['bounds']['wrapped']:
epoch += 1
epoch_done = True
if ((iteration % opt.losses_log_every) == 0):
tb_summary_writer.add_scalar('train_loss', train_loss, iteration)
if opt.noamopt:
opt.current_lr = optimizer.rate()
elif opt.reduce_on_plateau:
opt.current_lr = optimizer.current_lr
tb_summary_writer.add_scalar('learning_rate', opt.current_lr, iteration)
tb_summary_writer.add_scalar('scheduled_sampling_prob', model.ss_prob, iteration)
if sc_flag:
tb_summary_writer.add_scalar('avg_reward', model_out['reward'].mean(), iteration)
elif struc_flag:
tb_summary_writer.add_scalar('lm_loss', model_out['lm_loss'].mean().item(), iteration)
tb_summary_writer.add_scalar('struc_loss', model_out['struc_loss'].mean().item(), iteration)
tb_summary_writer.add_scalar('reward', model_out['reward'].mean().item(), iteration)
tb_summary_writer.add_scalar('reward_var', model_out['reward'].var(1).mean(), iteration)
histories['loss_history'][iteration] = (train_loss if (not sc_flag) else model_out['reward'].mean())
histories['lr_history'][iteration] = opt.current_lr
histories['ss_prob_history'][iteration] = model.ss_prob
infos['iter'] = iteration
infos['epoch'] = epoch
infos['loader_state_dict'] = loader.state_dict()
if ((((iteration % opt.save_checkpoint_every) == 0) and (not opt.save_every_epoch)) or (epoch_done and opt.save_every_epoch)):
eval_kwargs = {'split': 'val', 'dataset': opt.input_json}
eval_kwargs.update(vars(opt))
(val_loss, predictions, lang_stats) = eval_utils.eval_split(dp_model, lw_model.crit, loader, eval_kwargs)
if opt.reduce_on_plateau:
if ('CIDEr' in lang_stats):
optimizer.scheduler_step((- lang_stats['CIDEr']))
else:
optimizer.scheduler_step(val_loss)
tb_summary_writer.add_scalar('validation loss', val_loss, iteration)
if (lang_stats is not None):
for (k, v) in lang_stats.items():
tb_summary_writer.add_scalar(k, v, iteration)
histories['val_result_history'][iteration] = {'loss': val_loss, 'lang_stats': lang_stats, 'predictions': predictions}
if (opt.language_eval == 1):
current_score = lang_stats['CIDEr']
else:
current_score = (- val_loss)
best_flag = False
if ((best_val_score is None) or (current_score > best_val_score)):
best_val_score = current_score
best_flag = True
infos['best_val_score'] = best_val_score
utils.save_checkpoint(opt, model, infos, optimizer, histories)
if opt.save_history_ckpt:
utils.save_checkpoint(opt, model, infos, optimizer, append=(str(epoch) if opt.save_every_epoch else str(iteration)))
if best_flag:
utils.save_checkpoint(opt, model, infos, optimizer, append='best')
except (RuntimeError, KeyboardInterrupt):
print('Save ckpt on exception ...')
utils.save_checkpoint(opt, model, infos, optimizer)
print('Save ckpt done.')
stack_trace = traceback.format_exc()
print(stack_trace)
|
class ResNet(torchvision.models.resnet.ResNet):
def __init__(self, block, layers, num_classes=1000):
super(ResNet, self).__init__(block, layers, num_classes)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=0, ceil_mode=True)
for i in range(2, 5):
getattr(self, ('layer%d' % i))[0].conv1.stride = (2, 2)
getattr(self, ('layer%d' % i))[0].conv2.stride = (1, 1)
|
def resnet18(pretrained=False):
'Constructs a ResNet-18 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
model = ResNet(BasicBlock, [2, 2, 2, 2])
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
|
def resnet34(pretrained=False):
'Constructs a ResNet-34 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
model = ResNet(BasicBlock, [3, 4, 6, 3])
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
|
def resnet50(pretrained=False):
'Constructs a ResNet-50 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
model = ResNet(Bottleneck, [3, 4, 6, 3])
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
|
def resnet101(pretrained=False):
'Constructs a ResNet-101 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
model = ResNet(Bottleneck, [3, 4, 23, 3])
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
|
def resnet152(pretrained=False):
'Constructs a ResNet-152 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
model = ResNet(Bottleneck, [3, 8, 36, 3])
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
|
class myResnet(nn.Module):
def __init__(self, resnet):
super(myResnet, self).__init__()
self.resnet = resnet
def forward(self, img, att_size=14):
x = img.unsqueeze(0)
x = self.resnet.conv1(x)
x = self.resnet.bn1(x)
x = self.resnet.relu(x)
x = self.resnet.maxpool(x)
x = self.resnet.layer1(x)
x = self.resnet.layer2(x)
x = self.resnet.layer3(x)
x = self.resnet.layer4(x)
fc = x.mean(3).mean(2).squeeze()
att = F.adaptive_avg_pool2d(x, [att_size, att_size]).squeeze().permute(1, 2, 0)
return (fc, att)
|
def setup(opt):
if (opt.caption_model == 'fc'):
model = FCModel(opt)
if (opt.caption_model == 'show_tell'):
model = ShowTellModel(opt)
elif (opt.caption_model == 'att2in'):
model = Att2inModel(opt)
elif (opt.caption_model == 'att2in2'):
model = Att2in2Model(opt)
elif (opt.caption_model == 'att2all2'):
model = Att2all2Model(opt)
elif (opt.caption_model == 'adaatt'):
model = AdaAttModel(opt)
elif (opt.caption_model == 'adaattmo'):
model = AdaAttMOModel(opt)
elif (opt.caption_model == 'topdown'):
model = TopDownModel(opt)
elif (opt.caption_model == 'stackatt'):
model = StackAttModel(opt)
elif (opt.caption_model == 'denseatt'):
model = DenseAttModel(opt)
elif (opt.caption_model == 'transformer'):
model = TransformerModel(opt)
else:
raise Exception('Caption model not supported: {}'.format(opt.caption_model))
if (vars(opt).get('start_from', None) is not None):
assert os.path.isdir(opt.start_from), (' %s must be a a path' % opt.start_from)
assert os.path.isfile(os.path.join(opt.start_from, (('infos_' + opt.id) + '.pkl'))), ('infos.pkl file does not exist in path %s' % opt.start_from)
model.load_state_dict(torch.load(os.path.join(opt.start_from, 'model.pth')))
return model
|
def parse_opt():
parser = argparse.ArgumentParser()
parser.add_argument('--input_json', type=str, default='data/coco.json', help='path to the json file containing additional info and vocab')
parser.add_argument('--input_fc_dir', type=str, default='data/cocotalk_fc', help='path to the directory containing the preprocessed fc feats')
parser.add_argument('--input_att_dir', type=str, default='data/cocotalk_att', help='path to the directory containing the preprocessed att feats')
parser.add_argument('--input_box_dir', type=str, default='data/cocotalk_box', help='path to the directory containing the boxes of att feats')
parser.add_argument('--input_label_h5', type=str, default='data/coco_label.h5', help='path to the h5file containing the preprocessed dataset')
parser.add_argument('--start_from', type=str, default=None, help="continue training from saved model at this path. Path must contain files saved by previous training process: \n 'infos.pkl' : configuration;\n 'checkpoint' : paths to model file(s) (created by tf).\n Note: this file contains absolute paths, be careful when moving files around;\n 'model.ckpt-*' : file(s) with model definition (created by tf)\n ")
parser.add_argument('--cached_tokens', type=str, default='coco-train-idxs', help='Cached token file for calculating cider score during self critical training.')
parser.add_argument('--caption_model', type=str, default='show_tell', help='show_tell, show_attend_tell, all_img, fc, att2in, att2in2, att2all2, adaatt, adaattmo, topdown, stackatt, denseatt, transformer')
parser.add_argument('--rnn_size', type=int, default=512, help='size of the rnn in number of hidden nodes in each layer')
parser.add_argument('--num_layers', type=int, default=1, help='number of layers in the RNN')
parser.add_argument('--rnn_type', type=str, default='lstm', help='rnn, gru, or lstm')
parser.add_argument('--input_encoding_size', type=int, default=512, help='the encoding size of each token in the vocabulary, and the image.')
parser.add_argument('--att_hid_size', type=int, default=512, help='the hidden size of the attention MLP; only useful in show_attend_tell; 0 if not using hidden layer')
parser.add_argument('--fc_feat_size', type=int, default=2048, help='2048 for resnet, 4096 for vgg')
parser.add_argument('--att_feat_size', type=int, default=2048, help='2048 for resnet, 512 for vgg')
parser.add_argument('--logit_layers', type=int, default=1, help='number of layers in the RNN')
parser.add_argument('--use_bn', type=int, default=0, help='If 1, then do batch_normalization first in att_embed, if 2 then do bn both in the beginning and the end of att_embed')
parser.add_argument('--norm_att_feat', type=int, default=0, help='If normalize attention features')
parser.add_argument('--use_box', type=int, default=0, help='If use box features')
parser.add_argument('--norm_box_feat', type=int, default=0, help='If use box, do we normalize box feature')
parser.add_argument('--max_epochs', type=int, default=(- 1), help='number of epochs')
parser.add_argument('--batch_size', type=int, default=16, help='minibatch size')
parser.add_argument('--grad_clip', type=float, default=0.1, help='clip gradients at this value')
parser.add_argument('--drop_prob_lm', type=float, default=0.5, help='strength of dropout in the Language Model RNN')
parser.add_argument('--self_critical_after', type=int, default=(- 1), help='After what epoch do we start finetuning the CNN? (-1 = disable; never finetune, 0 = finetune from start)')
parser.add_argument('--seq_per_img', type=int, default=5, help='number of captions to sample for each image during training. Done for efficiency since CNN forward pass is expensive. E.g. coco has 5 sents/image')
parser.add_argument('--beam_size', type=int, default=1, help='used when sample_max = 1, indicates number of beams in beam search. Usually 2 or 3 works well. More is not better. Set this to 1 for faster runtime but a bit worse performance.')
parser.add_argument('--optim', type=str, default='adam', help='what update to use? rmsprop|sgd|sgdmom|adagrad|adam')
parser.add_argument('--learning_rate', type=float, default=0.0004, help='learning rate')
parser.add_argument('--learning_rate_decay_start', type=int, default=(- 1), help='at what iteration to start decaying learning rate? (-1 = dont) (in epoch)')
parser.add_argument('--learning_rate_decay_every', type=int, default=3, help='every how many iterations thereafter to drop LR?(in epoch)')
parser.add_argument('--learning_rate_decay_rate', type=float, default=0.8, help='every how many iterations thereafter to drop LR?(in epoch)')
parser.add_argument('--optim_alpha', type=float, default=0.9, help='alpha for adam')
parser.add_argument('--optim_beta', type=float, default=0.999, help='beta used for adam')
parser.add_argument('--optim_epsilon', type=float, default=1e-08, help='epsilon that goes into denominator for smoothing')
parser.add_argument('--weight_decay', type=float, default=0, help='weight_decay')
parser.add_argument('--scheduled_sampling_start', type=int, default=(- 1), help='at what iteration to start decay gt probability')
parser.add_argument('--scheduled_sampling_increase_every', type=int, default=5, help='every how many iterations thereafter to gt probability')
parser.add_argument('--scheduled_sampling_increase_prob', type=float, default=0.05, help='How much to update the prob')
parser.add_argument('--scheduled_sampling_max_prob', type=float, default=0.25, help='Maximum scheduled sampling prob.')
parser.add_argument('--val_images_use', type=int, default=3200, help='how many images to use when periodically evaluating the validation loss? (-1 = all)')
parser.add_argument('--save_checkpoint_every', type=int, default=2500, help='how often to save a model checkpoint (in iterations)?')
parser.add_argument('--checkpoint_path', type=str, default='save', help='directory to store checkpointed models')
parser.add_argument('--language_eval', type=int, default=0, help='Evaluate language as well (1 = yes, 0 = no)? BLEU/CIDEr/METEOR/ROUGE_L? requires coco-caption code from Github.')
parser.add_argument('--losses_log_every', type=int, default=25, help='How often do we snapshot losses, for inclusion in the progress dump? (0 = disable)')
parser.add_argument('--load_best_score', type=int, default=1, help='Do we load previous best score when resuming training.')
parser.add_argument('--id', type=str, default='', help='an id identifying this run/job. used in cross-val and appended when writing progress files')
parser.add_argument('--train_only', type=int, default=0, help='if true then use 80k, else use 110k')
parser.add_argument('--cider_reward_weight', type=float, default=1, help='The reward weight from cider')
parser.add_argument('--bleu_reward_weight', type=float, default=0, help='The reward weight from bleu4')
parser.add_argument('--label_smoothing', type=float, default=0, help='')
parser.add_argument('--noamopt', action='store_true', help='')
parser.add_argument('--noamopt_warmup', type=int, default=2000, help='')
parser.add_argument('--noamopt_factor', type=float, default=1, help='')
parser.add_argument('--reduce_on_plateau', action='store_true', help='')
args = parser.parse_args()
assert (args.rnn_size > 0), 'rnn_size should be greater than 0'
assert (args.num_layers > 0), 'num_layers should be greater than 0'
assert (args.input_encoding_size > 0), 'input_encoding_size should be greater than 0'
assert (args.batch_size > 0), 'batch_size should be greater than 0'
assert ((args.drop_prob_lm >= 0) and (args.drop_prob_lm < 1)), 'drop_prob_lm should be between 0 and 1'
assert (args.seq_per_img > 0), 'seq_per_img should be greater than 0'
assert (args.beam_size > 0), 'beam_size should be greater than 0'
assert (args.save_checkpoint_every > 0), 'save_checkpoint_every should be greater than 0'
assert (args.losses_log_every > 0), 'losses_log_every should be greater than 0'
assert ((args.language_eval == 0) or (args.language_eval == 1)), 'language_eval should be 0 or 1'
assert ((args.load_best_score == 0) or (args.load_best_score == 1)), 'language_eval should be 0 or 1'
assert ((args.train_only == 0) or (args.train_only == 1)), 'language_eval should be 0 or 1'
return args
|
def build_vocab(imgs, params):
count_thr = params['word_count_threshold']
counts = {}
for img in imgs:
for sent in img['sentences']:
for w in sent['tokens']:
counts[w] = (counts.get(w, 0) + 1)
cw = sorted([(count, w) for (w, count) in counts.items()], reverse=True)
print('top words and their counts:')
print('\n'.join(map(str, cw[:20])))
total_words = sum(counts.values())
print('total words:', total_words)
bad_words = [w for (w, n) in counts.items() if (n <= count_thr)]
vocab = [w for (w, n) in counts.items() if (n > count_thr)]
bad_count = sum((counts[w] for w in bad_words))
print(('number of bad words: %d/%d = %.2f%%' % (len(bad_words), len(counts), ((len(bad_words) * 100.0) / len(counts)))))
print(('number of words in vocab would be %d' % (len(vocab),)))
print(('number of UNKs: %d/%d = %.2f%%' % (bad_count, total_words, ((bad_count * 100.0) / total_words))))
sent_lengths = {}
for img in imgs:
for sent in img['sentences']:
txt = sent['tokens']
nw = len(txt)
sent_lengths[nw] = (sent_lengths.get(nw, 0) + 1)
max_len = max(sent_lengths.keys())
print('max length sentence in raw data: ', max_len)
print('sentence length distribution (count, number of words):')
sum_len = sum(sent_lengths.values())
for i in range((max_len + 1)):
print(('%2d: %10d %f%%' % (i, sent_lengths.get(i, 0), ((sent_lengths.get(i, 0) * 100.0) / sum_len))))
if (bad_count > 0):
print('inserting the special UNK token')
vocab.append('UNK')
for img in imgs:
img['final_captions'] = []
for sent in img['sentences']:
txt = sent['tokens']
caption = [(w if (counts.get(w, 0) > count_thr) else 'UNK') for w in txt]
img['final_captions'].append(caption)
return vocab
|
def encode_captions(imgs, params, wtoi):
' \n encode all captions into one large array, which will be 1-indexed.\n also produces label_start_ix and label_end_ix which store 1-indexed \n and inclusive (Lua-style) pointers to the first and last caption for\n each image in the dataset.\n '
max_length = params['max_length']
N = len(imgs)
M = sum((len(img['final_captions']) for img in imgs))
label_arrays = []
label_start_ix = np.zeros(N, dtype='uint32')
label_end_ix = np.zeros(N, dtype='uint32')
label_length = np.zeros(M, dtype='uint32')
caption_counter = 0
counter = 1
for (i, img) in enumerate(imgs):
n = len(img['final_captions'])
assert (n > 0), 'error: some image has no captions'
Li = np.zeros((n, max_length), dtype='uint32')
for (j, s) in enumerate(img['final_captions']):
label_length[caption_counter] = min(max_length, len(s))
caption_counter += 1
for (k, w) in enumerate(s):
if (k < max_length):
Li[(j, k)] = wtoi[w]
label_arrays.append(Li)
label_start_ix[i] = counter
label_end_ix[i] = ((counter + n) - 1)
counter += n
L = np.concatenate(label_arrays, axis=0)
assert (L.shape[0] == M), "lengths don't match? that's weird"
assert np.all((label_length > 0)), 'error: some caption had no words?'
print('encoded captions to array of size ', L.shape)
return (L, label_start_ix, label_end_ix, label_length)
|
def main(params):
imgs = json.load(open(params['input_json'], 'r'))
imgs = imgs['images']
seed(123)
vocab = build_vocab(imgs, params)
itow = {(i + 1): w for (i, w) in enumerate(vocab)}
wtoi = {w: (i + 1) for (i, w) in enumerate(vocab)}
(L, label_start_ix, label_end_ix, label_length) = encode_captions(imgs, params, wtoi)
N = len(imgs)
f_lb = h5py.File((params['output_h5'] + '_label.h5'), 'w')
f_lb.create_dataset('labels', dtype='uint32', data=L)
f_lb.create_dataset('label_start_ix', dtype='uint32', data=label_start_ix)
f_lb.create_dataset('label_end_ix', dtype='uint32', data=label_end_ix)
f_lb.create_dataset('label_length', dtype='uint32', data=label_length)
f_lb.close()
out = {}
out['ix_to_word'] = itow
out['images'] = []
for (i, img) in enumerate(imgs):
jimg = {}
jimg['split'] = img['split']
if ('filename' in img):
jimg['file_path'] = os.path.join(img['filepath'], img['filename'])
if ('cocoid' in img):
jimg['id'] = img['cocoid']
if (params['images_root'] != ''):
with Image.open(os.path.join(params['images_root'], img['filepath'], img['filename'])) as _img:
(jimg['width'], jimg['height']) = _img.size
out['images'].append(jimg)
json.dump(out, open(params['output_json'], 'w'))
print('wrote ', params['output_json'])
|
def precook(s, n=4, out=False):
'\n Takes a string as input and returns an object that can be given to\n either cook_refs or cook_test. This is optional: cook_refs and cook_test\n can take string arguments as well.\n :param s: string : sentence to be converted into ngrams\n :param n: int : number of ngrams for which representation is calculated\n :return: term frequency vector for occuring ngrams\n '
words = s.split()
counts = defaultdict(int)
for k in xrange(1, (n + 1)):
for i in xrange(((len(words) - k) + 1)):
ngram = tuple(words[i:(i + k)])
counts[ngram] += 1
return counts
|
def cook_refs(refs, n=4):
'Takes a list of reference sentences for a single segment\n and returns an object that encapsulates everything that BLEU\n needs to know about them.\n :param refs: list of string : reference sentences for some image\n :param n: int : number of ngrams for which (ngram) representation is calculated\n :return: result (list of dict)\n '
return [precook(ref, n) for ref in refs]
|
def create_crefs(refs):
crefs = []
for ref in refs:
crefs.append(cook_refs(ref))
return crefs
|
def compute_doc_freq(crefs):
'\n Compute term frequency for reference data.\n This will be used to compute idf (inverse document frequency later)\n The term frequency is stored in the object\n :return: None\n '
document_frequency = defaultdict(float)
for refs in crefs:
for ngram in set([ngram for ref in refs for (ngram, count) in ref.iteritems()]):
document_frequency[ngram] += 1
return document_frequency
|
def build_dict(imgs, wtoi, params):
wtoi['<eos>'] = 0
count_imgs = 0
refs_words = []
refs_idxs = []
for img in imgs:
if ((params['split'] == img['split']) or ((params['split'] == 'train') and (img['split'] == 'restval')) or (params['split'] == 'all')):
ref_words = []
ref_idxs = []
for sent in img['sentences']:
tmp_tokens = (sent['tokens'] + ['<eos>'])
tmp_tokens = [(_ if (_ in wtoi) else 'UNK') for _ in tmp_tokens]
ref_words.append(' '.join(tmp_tokens))
ref_idxs.append(' '.join([str(wtoi[_]) for _ in tmp_tokens]))
refs_words.append(ref_words)
refs_idxs.append(ref_idxs)
count_imgs += 1
print('total imgs:', count_imgs)
ngram_words = compute_doc_freq(create_crefs(refs_words))
ngram_idxs = compute_doc_freq(create_crefs(refs_idxs))
return (ngram_words, ngram_idxs, count_imgs)
|
def main(params):
imgs = json.load(open(params['input_json'], 'r'))
itow = json.load(open(params['dict_json'], 'r'))['ix_to_word']
wtoi = {w: i for (i, w) in itow.items()}
imgs = imgs['images']
(ngram_words, ngram_idxs, ref_len) = build_dict(imgs, wtoi, params)
cPickle.dump({'document_frequency': ngram_words, 'ref_len': ref_len}, open((params['output_pkl'] + '-words.p'), 'w'), protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump({'document_frequency': ngram_idxs, 'ref_len': ref_len}, open((params['output_pkl'] + '-idxs.p'), 'w'), protocol=cPickle.HIGHEST_PROTOCOL)
|
class MELD_loader(Dataset):
def __init__(self, txt_file, dataclass):
self.dialogs = []
f = open(txt_file, 'r')
dataset = f.readlines()
f.close()
temp_speakerList = []
context = []
context_speaker = []
self.speakerNum = []
emodict = {'anger': 'anger', 'disgust': 'disgust', 'fear': 'fear', 'joy': 'joy', 'neutral': 'neutral', 'sadness': 'sad', 'surprise': 'surprise'}
self.sentidict = {'positive': ['joy'], 'negative': ['anger', 'disgust', 'fear', 'sadness'], 'neutral': ['neutral', 'surprise']}
self.emoSet = set()
self.sentiSet = set()
for (i, data) in enumerate(dataset):
if (i < 2):
continue
if ((data == '\n') and (len(self.dialogs) > 0)):
self.speakerNum.append(len(temp_speakerList))
temp_speakerList = []
context = []
context_speaker = []
continue
(speaker, utt, emo, senti) = data.strip().split('\t')
context.append(utt)
if (speaker not in temp_speakerList):
temp_speakerList.append(speaker)
speakerCLS = temp_speakerList.index(speaker)
context_speaker.append(speakerCLS)
self.dialogs.append([context_speaker[:], context[:], emodict[emo], senti])
self.emoSet.add(emodict[emo])
self.sentiSet.add(senti)
self.emoList = sorted(self.emoSet)
self.sentiList = sorted(self.sentiSet)
if (dataclass == 'emotion'):
self.labelList = self.emoList
else:
self.labelList = self.sentiList
self.speakerNum.append(len(temp_speakerList))
def __len__(self):
return len(self.dialogs)
def __getitem__(self, idx):
return (self.dialogs[idx], self.labelList, self.sentidict)
|
class Emory_loader(Dataset):
def __init__(self, txt_file, dataclass):
self.dialogs = []
f = open(txt_file, 'r')
dataset = f.readlines()
f.close()
'sentiment'
pos = ['Joyful', 'Peaceful', 'Powerful']
neg = ['Mad', 'Sad', 'Scared']
neu = ['Neutral']
emodict = {'Joyful': 'joy', 'Mad': 'mad', 'Peaceful': 'peaceful', 'Powerful': 'powerful', 'Neutral': 'neutral', 'Sad': 'sad', 'Scared': 'scared'}
self.sentidict = {'positive': pos, 'negative': neg, 'neutral': neu}
temp_speakerList = []
context = []
context_speaker = []
self.speakerNum = []
self.emoSet = set()
self.sentiSet = set()
for (i, data) in enumerate(dataset):
if ((data == '\n') and (len(self.dialogs) > 0)):
self.speakerNum.append(len(temp_speakerList))
temp_speakerList = []
context = []
context_speaker = []
continue
(speaker, utt, emo) = data.strip().split('\t')
context.append(utt)
if (emo in pos):
senti = 'positive'
elif (emo in neg):
senti = 'negative'
elif (emo in neu):
senti = 'neutral'
else:
print('ERROR emotion&sentiment')
if (speaker not in temp_speakerList):
temp_speakerList.append(speaker)
speakerCLS = temp_speakerList.index(speaker)
context_speaker.append(speakerCLS)
self.dialogs.append([context_speaker[:], context[:], emodict[emo], senti])
self.emoSet.add(emodict[emo])
self.sentiSet.add(senti)
self.emoList = sorted(self.emoSet)
self.sentiList = sorted(self.sentiSet)
if (dataclass == 'emotion'):
self.labelList = self.emoList
else:
self.labelList = self.sentiList
self.speakerNum.append(len(temp_speakerList))
def __len__(self):
return len(self.dialogs)
def __getitem__(self, idx):
return (self.dialogs[idx], self.labelList, self.sentidict)
|
class IEMOCAP_loader(Dataset):
def __init__(self, txt_file, dataclass):
self.dialogs = []
f = open(txt_file, 'r')
dataset = f.readlines()
f.close()
temp_speakerList = []
context = []
context_speaker = []
self.speakerNum = []
pos = ['ang', 'exc', 'hap']
neg = ['fru', 'sad']
neu = ['neu']
emodict = {'ang': 'angry', 'exc': 'excited', 'fru': 'frustrated', 'hap': 'happy', 'neu': 'neutral', 'sad': 'sad'}
self.sentidict = {'positive': pos, 'negative': neg, 'neutral': neu}
self.emoSet = set()
self.sentiSet = set()
for (i, data) in enumerate(dataset):
if ((data == '\n') and (len(self.dialogs) > 0)):
self.speakerNum.append(len(temp_speakerList))
temp_speakerList = []
context = []
context_speaker = []
continue
speaker = data.strip().split('\t')[0]
utt = ' '.join(data.strip().split('\t')[1:(- 1)])
emo = data.strip().split('\t')[(- 1)]
context.append(utt)
if (emo in pos):
senti = 'positive'
elif (emo in neg):
senti = 'negative'
elif (emo in neu):
senti = 'neutral'
else:
print('ERROR emotion&sentiment')
if (speaker not in temp_speakerList):
temp_speakerList.append(speaker)
speakerCLS = temp_speakerList.index(speaker)
context_speaker.append(speakerCLS)
self.dialogs.append([context_speaker[:], context[:], emodict[emo], senti])
self.emoSet.add(emodict[emo])
self.emoList = sorted(self.emoSet)
self.sentiList = sorted(self.sentiSet)
if (dataclass == 'emotion'):
self.labelList = self.emoList
else:
self.labelList = self.sentiList
self.speakerNum.append(len(temp_speakerList))
def __len__(self):
return len(self.dialogs)
def __getitem__(self, idx):
return (self.dialogs[idx], self.labelList, self.sentidict)
|
class DD_loader(Dataset):
def __init__(self, txt_file, dataclass):
self.dialogs = []
f = open(txt_file, 'r')
dataset = f.readlines()
f.close()
temp_speakerList = []
context = []
context_speaker = []
self.speakerNum = []
self.emoSet = set()
self.sentiSet = set()
pos = ['happiness']
neg = ['anger', 'disgust', 'fear', 'sadness']
neu = ['neutral', 'surprise']
emodict = {'anger': 'anger', 'disgust': 'disgust', 'fear': 'fear', 'happiness': 'happy', 'neutral': 'neutral', 'sadness': 'sad', 'surprise': 'surprise'}
self.sentidict = {'positive': pos, 'negative': neg, 'neutral': neu}
for (i, data) in enumerate(dataset):
if ((data == '\n') and (len(self.dialogs) > 0)):
self.speakerNum.append(len(temp_speakerList))
temp_speakerList = []
context = []
context_speaker = []
continue
speaker = data.strip().split('\t')[0]
utt = ' '.join(data.strip().split('\t')[1:(- 1)])
emo = data.strip().split('\t')[(- 1)]
if (emo in pos):
senti = 'positive'
elif (emo in neg):
senti = 'negative'
elif (emo in neu):
senti = 'neutral'
else:
print('ERROR emotion&sentiment')
context.append(utt)
if (speaker not in temp_speakerList):
temp_speakerList.append(speaker)
speakerCLS = temp_speakerList.index(speaker)
context_speaker.append(speakerCLS)
self.dialogs.append([context_speaker[:], context[:], emodict[emo], senti])
self.emoSet.add(emodict[emo])
self.emoList = sorted(self.emoSet)
self.sentiList = sorted(self.sentiSet)
if (dataclass == 'emotion'):
self.labelList = self.emoList
else:
self.labelList = self.sentiList
self.speakerNum.append(len(temp_speakerList))
def __len__(self):
return len(self.dialogs)
def __getitem__(self, idx):
return (self.dialogs[idx], self.labelList, self.sentidict)
|
def CELoss(pred_outs, labels):
'\n pred_outs: [batch, clsNum]\n labels: [batch]\n '
loss = nn.CrossEntropyLoss()
loss_val = loss(pred_outs, labels)
return loss_val
|
def main():
'Dataset Loading'
batch_size = args.batch
dataset = args.dataset
dataclass = args.cls
sample = args.sample
model_type = args.pretrained
freeze = args.freeze
initial = args.initial
dataType = 'multi'
if (dataset == 'MELD'):
if args.dyadic:
dataType = 'dyadic'
else:
dataType = 'multi'
data_path = (('./dataset/MELD/' + dataType) + '/')
DATA_loader = MELD_loader
elif (dataset == 'EMORY'):
data_path = './dataset/EMORY/'
DATA_loader = Emory_loader
elif (dataset == 'iemocap'):
data_path = './dataset/iemocap/'
DATA_loader = IEMOCAP_loader
elif (dataset == 'dailydialog'):
data_path = './dataset/dailydialog/'
DATA_loader = DD_loader
if ('roberta' in model_type):
make_batch = make_batch_roberta
elif (model_type == 'bert-large-uncased'):
make_batch = make_batch_bert
else:
make_batch = make_batch_gpt
if freeze:
freeze_type = 'freeze'
else:
freeze_type = 'no_freeze'
train_path = ((data_path + dataset) + '_train.txt')
dev_path = ((data_path + dataset) + '_dev.txt')
test_path = ((data_path + dataset) + '_test.txt')
train_dataset = DATA_loader(train_path, dataclass)
if (sample < 1.0):
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=False, num_workers=4, collate_fn=make_batch)
else:
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=4, collate_fn=make_batch)
train_sample_num = int((len(train_dataloader) * sample))
dev_dataset = DATA_loader(dev_path, dataclass)
dev_dataloader = DataLoader(dev_dataset, batch_size=1, shuffle=False, num_workers=4, collate_fn=make_batch)
test_dataset = DATA_loader(test_path, dataclass)
test_dataloader = DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=4, collate_fn=make_batch)
'logging and path'
save_path = os.path.join((dataset + '_models'), model_type, initial, freeze_type, dataclass, str(sample))
print('###Save Path### ', save_path)
log_path = os.path.join(save_path, 'train.log')
if (not os.path.exists(save_path)):
os.makedirs(save_path)
fileHandler = logging.FileHandler(log_path)
logger.addHandler(streamHandler)
logger.addHandler(fileHandler)
logger.setLevel(level=logging.DEBUG)
'Model Loading'
if ('gpt2' in model_type):
last = True
else:
last = False
print('DataClass: ', dataclass, '!!!')
clsNum = len(train_dataset.labelList)
model = ERC_model(model_type, clsNum, last, freeze, initial)
model = model.cuda()
model.train()
'Training Setting'
training_epochs = args.epoch
save_term = int((training_epochs / 5))
max_grad_norm = args.norm
lr = args.lr
num_training_steps = (len(train_dataset) * training_epochs)
num_warmup_steps = len(train_dataset)
optimizer = torch.optim.AdamW(model.train_params, lr=lr)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps)
'Input & Label Setting'
(best_dev_fscore, best_test_fscore) = (0, 0)
(best_dev_fscore_macro, best_dev_fscore_micro, best_test_fscore_macro, best_test_fscore_micro) = (0, 0, 0, 0)
best_epoch = 0
for epoch in tqdm(range(training_epochs)):
model.train()
for (i_batch, data) in enumerate(train_dataloader):
if (i_batch > train_sample_num):
print(i_batch, train_sample_num)
break
'Prediction'
(batch_input_tokens, batch_labels, batch_speaker_tokens) = data
(batch_input_tokens, batch_labels) = (batch_input_tokens.cuda(), batch_labels.cuda())
pred_logits = model(batch_input_tokens, batch_speaker_tokens)
'Loss calculation & training'
loss_val = CELoss(pred_logits, batch_labels)
loss_val.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_grad_norm)
optimizer.step()
scheduler.step()
optimizer.zero_grad()
'Dev & Test evaluation'
model.eval()
if (dataset == 'dailydialog'):
(dev_acc, dev_pred_list, dev_label_list) = _CalACC(model, dev_dataloader)
(dev_pre_macro, dev_rec_macro, dev_fbeta_macro, _) = precision_recall_fscore_support(dev_label_list, dev_pred_list, average='macro')
(dev_pre_micro, dev_rec_micro, dev_fbeta_micro, _) = precision_recall_fscore_support(dev_label_list, dev_pred_list, labels=[0, 1, 2, 3, 5, 6], average='micro')
dev_fscore = (dev_fbeta_macro + dev_fbeta_micro)
'Best Score & Model Save'
if (dev_fscore > (best_dev_fscore_macro + best_dev_fscore_micro)):
best_dev_fscore_macro = dev_fbeta_macro
best_dev_fscore_micro = dev_fbeta_micro
(test_acc, test_pred_list, test_label_list) = _CalACC(model, test_dataloader)
(test_pre_macro, test_rec_macro, test_fbeta_macro, _) = precision_recall_fscore_support(test_label_list, test_pred_list, average='macro')
(test_pre_micro, test_rec_micro, test_fbeta_micro, _) = precision_recall_fscore_support(test_label_list, test_pred_list, labels=[0, 1, 2, 3, 5, 6], average='micro')
best_epoch = epoch
_SaveModel(model, save_path)
else:
(dev_acc, dev_pred_list, dev_label_list) = _CalACC(model, dev_dataloader)
(dev_pre, dev_rec, dev_fbeta, _) = precision_recall_fscore_support(dev_label_list, dev_pred_list, average='weighted')
'Best Score & Model Save'
if (dev_fbeta > best_dev_fscore):
best_dev_fscore = dev_fbeta
(test_acc, test_pred_list, test_label_list) = _CalACC(model, test_dataloader)
(test_pre, test_rec, test_fbeta, _) = precision_recall_fscore_support(test_label_list, test_pred_list, average='weighted')
best_epoch = epoch
_SaveModel(model, save_path)
if ((epoch % 5) == 0):
logger.info('Epoch: {}'.format(epoch))
if (dataset == 'dailydialog'):
logger.info('Devleopment ## accuracy: {}, macro-fscore: {}, micro-fscore: {}'.format(dev_acc, dev_fbeta_macro, dev_fbeta_micro))
logger.info('')
else:
logger.info('Devleopment ## accuracy: {}, precision: {}, recall: {}, fscore: {}'.format(dev_acc, dev_pre, dev_rec, dev_fbeta))
logger.info('')
if (dataset == 'dailydialog'):
logger.info('Final Fscore ## test-accuracy: {}, test-macro: {}, test-micro: {}, test_epoch: {}'.format(test_acc, test_fbeta_macro, test_fbeta_micro, best_epoch))
else:
logger.info('Final Fscore ## test-accuracy: {}, test-fscore: {}, test_epoch: {}'.format(test_acc, test_fbeta, best_epoch))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.