code stringlengths 17 6.64M |
|---|
def set_template(args):
if (args.template.find('jpeg') >= 0):
args.data_train = 'DIV2K_jpeg'
args.data_test = 'DIV2K_jpeg'
args.epochs = 200
args.lr_decay = 100
if (args.template.find('EDSR_paper') >= 0):
args.model = 'EDSR'
args.n_resblocks = 32
args.n_feats = 256
args.res_scale = 0.1
if (args.template.find('MDSR') >= 0):
args.model = 'MDSR'
args.patch_size = 48
args.epochs = 1650
if (args.template.find('DDBPN') >= 0):
args.model = 'DDBPN'
args.patch_size = 128
args.scale = '4'
args.data_test = 'Set5'
args.batch_size = 20
args.epochs = 1000
args.lr_decay = 500
args.gamma = 0.1
args.weight_decay = 0.0001
args.loss = '1*MSE'
if (args.template.find('GAN') >= 0):
args.epochs = 200
args.lr = 5e-05
args.lr_decay = 150
|
class Trainer():
def __init__(self, args, loader, my_model, my_loss, ckp):
self.args = args
self.scale = args.scale
self.ckp = ckp
self.loader_train = loader.loader_train
self.loader_test = loader.loader_test
self.model = my_model
self.loss = my_loss
self.optimizer = utility.make_optimizer(args, self.model)
self.scheduler = utility.make_scheduler(args, self.optimizer)
if (self.args.load != '.'):
self.optimizer.load_state_dict(torch.load(os.path.join(ckp.dir, 'optimizer.pt')))
for _ in range(len(ckp.log)):
self.scheduler.step()
self.error_last = 100000000.0
def train(self):
self.scheduler.step()
self.loss.step()
epoch = (self.scheduler.last_epoch + 1)
lr = self.scheduler.get_lr()[0]
self.ckp.write_log('[Epoch {}]\tLearning rate: {:.2e}'.format(epoch, Decimal(lr)))
self.loss.start_log()
self.model.train()
(timer_data, timer_model) = (utility.timer(), utility.timer())
for (batch, (lr, hr, _, idx_scale)) in enumerate(self.loader_train):
(lr, hr) = self.prepare([lr, hr])
timer_data.hold()
timer_model.tic()
self.optimizer.zero_grad()
sr = self.model(lr, idx_scale)
loss = self.loss(sr, hr)
if (loss.item() < (self.args.skip_threshold * self.error_last)):
loss.backward()
self.optimizer.step()
else:
print('Skip this batch {}! (Loss: {})'.format((batch + 1), loss.item()))
timer_model.hold()
if (((batch + 1) % self.args.print_every) == 0):
self.ckp.write_log('[{}/{}]\t{}\t{:.1f}+{:.1f}s'.format(((batch + 1) * self.args.batch_size), len(self.loader_train.dataset), self.loss.display_loss(batch), timer_model.release(), timer_data.release()))
timer_data.tic()
self.loss.end_log(len(self.loader_train))
self.error_last = self.loss.log[((- 1), (- 1))]
def test(self):
epoch = (self.scheduler.last_epoch + 1)
self.ckp.write_log('\nEvaluation:')
self.ckp.add_log(torch.zeros(1, len(self.scale)))
self.model.eval()
timer_test = utility.timer()
with torch.no_grad():
for (idx_scale, scale) in enumerate(self.scale):
eval_acc = 0
self.loader_test.dataset.set_scale(idx_scale)
tqdm_test = tqdm(self.loader_test, ncols=80)
for (idx_img, (lr, hr, filename, _)) in enumerate(tqdm_test):
filename = filename[0]
no_eval = (hr.nelement() == 1)
if (not no_eval):
(lr, hr) = self.prepare([lr, hr])
else:
lr = self.prepare([lr])[0]
sr = self.model(lr, idx_scale)
sr = utility.quantize(sr, self.args.rgb_range)
save_list = [sr]
if (not no_eval):
eval_acc += utility.calc_psnr(sr, hr, scale, self.args.rgb_range, benchmark=self.loader_test.dataset.benchmark)
save_list.extend([lr, hr])
if self.args.save_results:
self.ckp.save_results_nopostfix(filename, save_list, scale)
self.ckp.log[((- 1), idx_scale)] = (eval_acc / len(self.loader_test))
best = self.ckp.log.max(0)
self.ckp.write_log('[{} x{}]\tPSNR: {:.3f} (Best: {:.3f} @epoch {})'.format(self.args.data_test, scale, self.ckp.log[((- 1), idx_scale)], best[0][idx_scale], (best[1][idx_scale] + 1)))
self.ckp.write_log('Total time: {:.2f}s, ave time: {:.2f}s\n'.format(timer_test.toc(), (timer_test.toc() / len(self.loader_test))), refresh=True)
if (not self.args.test_only):
self.ckp.save(self, epoch, is_best=((best[1][0] + 1) == epoch))
def prepare(self, l, volatile=False):
device = torch.device(('cpu' if self.args.cpu else 'cuda'))
def _prepare(tensor):
if (self.args.precision == 'half'):
tensor = tensor.half()
return tensor.to(device)
return [_prepare(_l) for _l in l]
def terminate(self):
if self.args.test_only:
self.test()
return True
else:
epoch = (self.scheduler.last_epoch + 1)
return (epoch >= self.args.epochs)
|
class BaseDataClass(object):
'\n Base class for objects dealing with data pprerocessing\n and preparing data for training and evaluating the models.\n '
def __init__(self, config):
self.config = (config or dict())
self.max_src_len = self.config['max_src_len']
self.max_tgt_len = self.config['max_tgt_len']
self.num_mr_attr = len(MR_FIELDS)
self.vocab = None
self.uni_mr = {'train': None, 'dev': None, 'test': None}
self.lexicalizations = {'train': None, 'dev': None, 'test': None}
self.fnames = {}
def setup(self):
logger.info('Setting up the data')
train_x_raw = train_y_raw = None
dev_x_raw = dev_y_raw = None
test_x_raw = test_y_raw = None
train_data_fname = self.config.get('train_data', None)
dev_data_fname = self.config.get('dev_data', None)
test_data_fname = self.config.get('test_data', None)
vocab_path = ('%s.vocab' % train_data_fname)
if (train_data_fname is not None):
logger.debug('Reading train data')
(train_x_raw, train_y_raw, train_lex, train_mr) = self.read_csv_train(train_data_fname, group_ref=True)
self.lexicalizations['train'] = train_lex
self.uni_mr['train'] = train_mr
if (dev_data_fname is None):
raise Exception('No dev data in the config file!')
logger.debug('Reading dev data')
(dev_x_raw, dev_y_raw, dev_lex, dev_mr) = self.read_csv_train(dev_data_fname, group_ref=True)
self.lexicalizations['dev'] = dev_lex
self.uni_mr['dev'] = dev_mr
if (test_data_fname is not None):
logger.debug('Reading test data')
(test_x_raw, test_y_raw, test_lex, test_mr) = self.read_csv_train(test_data_fname)
self.lexicalizations['test'] = test_lex
self.uni_mr['test'] = test_mr
self.setup_vocab(vocab_path, train_x_raw, train_y_raw)
if (train_x_raw is not None):
self.train = self.data_to_token_ids_train(train_x_raw, train_y_raw)
self.fnames['train'] = train_data_fname
if (dev_x_raw is not None):
self.dev = self.data_to_token_ids_train(dev_x_raw, dev_y_raw)
self.fnames['dev'] = dev_data_fname
if (test_x_raw is not None):
self.test = self.data_to_token_ids_test(test_x_raw)
self.fnames['test'] = test_data_fname
def read_csv_train(self, fname, group_ref=False):
"\n Read the CSV file containing training data.\n\n :param fname:\n :param group_ref: group multiple references, if possible (dev set)\n :return: 3 lists:\n - MR instances\n - corresponding textual descriptions\n - lexicalizations of ['name', 'near'] for each instance\n "
raw_data_x = []
raw_data_y = []
lexicalizations = []
uni_mrs = []
orig = []
with open(fname, 'r') as csv_file:
reader = csv.reader(csv_file, delimiter=',', quotechar='"')
header = next(reader)
assert (header == ['mr', 'ref']), 'The file does not contain a header!'
first_row = next(reader)
curr_mr = first_row[0]
curr_snt = first_row[1]
orig.append((curr_mr, curr_snt))
(curr_src, curr_lex) = self.process_e2e_mr(curr_mr)
curr_text = self.tokenize(curr_snt, curr_lex)
raw_data_x.append(curr_src)
raw_data_y.append(curr_text)
lexicalizations.append(curr_lex)
uni_mrs.append(curr_mr)
for row in list(reader):
mr = row[0]
text = row[1]
orig.append((mr, text))
(this_src, this_lex) = self.process_e2e_mr(mr)
this_text = self.tokenize(text, this_lex)
raw_data_x.append(this_src)
raw_data_y.append(this_text)
uni_mrs.append(mr)
if (mr == curr_mr):
continue
else:
lexicalizations.append(this_lex)
curr_mr = mr
'\n if this_src == curr_src:\n continue\n\n else:\n lexicalizations.append(this_lex)\n curr_src = this_src\n '
if group_ref:
gen_multi_ref_dev(orig, fname=('%s.multi-ref' % fname))
print(len(raw_data_x), len(raw_data_y), len(lexicalizations), len(uni_mrs))
return (raw_data_x, raw_data_y, lexicalizations, uni_mrs)
def read_csv_test(self, fname):
raw_data_x = []
lexicalizations = []
uni_mrs = []
with open(fname, 'r') as csv_file:
reader = csv.reader(csv_file, delimiter=',', quotechar='"')
header = next(reader)
for row in list(reader):
mr = row[0]
(this_src, this_lex) = self.process_e2e_mr(mr)
raw_data_x.append(this_src)
lexicalizations.append(this_lex)
uni_mrs.append(mr)
return (raw_data_x, None, lexicalizations, uni_mrs)
def tokenize_normalize(self, s, lex_list=None):
'\n Experimenting with various ways to normalize the data.\n As it turned out, normalization did not improve the results.\n Reason: all data, including dev set follows the same noisy distribution.\n So, normalization results in enlarging the distance between data distributions for train and dev sets.\n :param s: target string\n :param lex_list: list containing lexicalizations for the string s\n :return:\n '
words = []
if lex_list:
for (l, t) in zip(lex_list, (NAME_TOKEN, NEAR_TOKEN)):
if l:
s = s.replace(l, t)
(s_r_toks, s_toks) = ([], s.strip().split())
for tok in s_toks:
if (NEAR_TOKEN in tok):
tok = tok.split(NEAR_TOKEN)
tok = [(NEAR_TOKEN if (x == '') else x) for x in tok]
s_r_toks.extend(tok)
elif (NAME_TOKEN in tok):
tok = tok.split(NAME_TOKEN)
tok = [(NAME_TOKEN if (x == '') else x) for x in tok]
s_r_toks.extend(tok)
else:
s_r_toks.append(tok)
print(s_r_toks, s_tpks)
for fragment in s_r_toks:
match = _FRAC_NUM_PAT.match(fragment)
if match:
fragment_tokens = []
pound = match.group(1)
price = match.group(2)
price = re.sub('.00', '', price)
if (not pound.isdigit()):
fragment_tokens.append(pound)
fragment_tokens.append(price)
match = _ZERO_PAT.match(fragment)
if match:
fragment_tokens = [re.sub('.00', '', fragment)]
else:
fragment_tokens = _WORD_SPLIT.split(fragment)
words.extend(fragment_tokens)
tokens = [w for w in words if w]
return tokens
def tokenize(self, s, lex_list=None):
'\n A simple procedure to tokenize a string.\n\n :param s: string to be tokenized\n :param lex_list: list of lexicalizations\n :return: list of tokens from sting s\n '
words = []
if lex_list:
for (l, t) in zip(lex_list, (NAME_TOKEN, NEAR_TOKEN)):
if l:
s = s.replace(l, t)
(s_r_toks, s_toks) = ([], s.strip().split())
for tok in s_toks:
if ((NEAR_TOKEN != tok) and (NEAR_TOKEN in tok)):
tok = tok.split(NEAR_TOKEN)
tok = [(NEAR_TOKEN if (x == '') else x) for x in tok]
s_r_toks.extend(tok)
elif ((NAME_TOKEN != tok) and (NAME_TOKEN in tok)):
tok = tok.split(NAME_TOKEN)
tok = [(NAME_TOKEN if (x == '') else x) for x in tok]
s_r_toks.extend(tok)
else:
s_r_toks.append(tok)
for fragment in s_r_toks:
fragment_tokens = _WORD_SPLIT.split(fragment)
words.extend(fragment_tokens)
tokens = [w for w in words if w]
return tokens
def setup_vocab(self, vocab_path, train_x_raw, train_y_raw):
raise NotImplementedError()
def process_e2e_mr(self, s):
raise NotImplementedError()
def data_to_token_ids_train(self, *args, **kwargs):
raise NotImplementedError()
def data_to_token_ids_test(self, *args, **kwargs):
raise NotImplementedError()
def prepare_training_data(self, *args, **kwargs):
raise NotImplementedError()
|
def gen_multi_ref_dev(dev_xy, fname):
"\n A utility function for generating a mutli-reference file\n from the data provided by the E2E NLG organizers.\n\n :param dev_xy: a list of M tuples, where M is the number of data instances.\n Each tuple contains a src string and a tgt string.\n For example:\n ('name[Taste of Cambridge], eatType[restaurant], priceRange[£20-25], customer rating[3 out of 5]',\n 'Taste of Cambridge is a restaurant with a customer rating of 3 out of 5 and and a price range of £20-£25')\n\n :param fname: the name of the target file, where you want to store multi-reference data\n :return:\n "
logger.debug('Generaing a multi-ref file')
multi_ref_src_fn = ('%s.src' % fname)
with open(fname, 'w') as fout, open(multi_ref_src_fn, 'w') as fout_src:
(curr_mr, curr_txt) = dev_xy[0]
fout.write(('%s\n' % curr_txt))
fout_src.write(('%s\n' % curr_mr))
for (mr, txt) in dev_xy[1:]:
if (mr == curr_mr):
fout.write(('%s\n' % txt))
else:
fout.write('\n')
fout.write(('%s\n' % txt))
curr_mr = mr
fout_src.write(('%s\n' % mr))
logger.debug('Done!')
|
def truncate_pad_sentence(snt_ids, max_len, add_start=True, add_end=True):
'\n Given an iterable\n :param snt_ids:\n :param max_len:\n :param add_start:\n :param add_end:\n :return:\n '
x_trunc = truncate_sentence(snt_ids, max_len, add_start, add_end)
x_trunc_pad = pad_snt(x_trunc, max_len)
return x_trunc_pad
|
def truncate_sentence(snt_ids, max_len, add_start=True, add_end=True):
'\n Given a list of token ids and maxium sequence length,\n take only the first "max_len" items.\n\n :param snt_ids: sequence of elements (token ids)\n :param max_len: cut-off value for truncation\n :param add_start: add "beginning-of-sentence" symbol (BOS) (bool)\n :param add_end: add "end-of-sentence" symbol (EOS) (bool)\n :return:\n '
if add_start:
snt_ids = ([BOS_ID] + snt_ids)
if add_end:
snt_ids = (snt_ids + [EOS_ID])
return snt_ids[:max_len]
|
def pad_snt(snt_ids_trunc, max_len):
'\n Given a list of token ids and maxium sequence length,\n pad the sentence, if necessary, so that it contains exactly "max_len" items.\n\n :param snt_ids_trunc: possibly truncated sequence of items to be padded\n :param max_len: cut-off value for padding\n :return:\n '
snt_ids_trunc_pad = (snt_ids_trunc + ([PAD_ID] * (max_len - len(snt_ids_trunc))))
return snt_ids_trunc_pad
|
def generator_wrapper(iterable):
'\n A utility function wrapping an iterable into a generator.\n\n :param iterable:\n :return:\n '
num_items = len(iterable)
for idx in range(num_items):
(yield iterable[idx])
|
def cuda_if_gpu(T):
'\n Move tensor to GPU, if one is available.\n\n :param T:\n :return:\n '
return (T.cuda() if use_cuda else T)
|
def cudify(fn):
'\n A simple decorator for wrapping functions that return tensors\n to move them to a GPU, if one is available.\n\n :param fn: function to be wrapped\n :return:\n '
@functools.wraps(fn)
def wrapper(*args, **kwargs):
result = fn(*args, **kwargs)
return cuda_if_gpu(result)
return wrapper
|
@cudify
def ids2var(snt_ids, *dims, addEOS=True):
'\n Convert a sequence of ids to a matrix of size specified by the dims.\n\n :param snt_ids: s\n :param addEOS:\n :return: A matrix of shape: (*dims)\n '
snt_ids_copy = copy.deepcopy(snt_ids)
if addEOS:
snt_ids_copy.append(EOS_ID)
result = Variable(torch.LongTensor(snt_ids_copy).view(dims))
return result
|
@cudify
def cat2onehot_var(snt_ids, vocab_size, batch_size):
'\n Convert a sequence of categorical values to one-hot representation\n Based on: https://stackoverflow.com/questions/38592324/one-hot-encoding-using-numpy#38592416\n '
targets = np.array([snt_ids]).reshape((- 1))
one_hot_targets = np.eye(vocab_size)[targets]
result = Variable(torch.FloatTensor(one_hot_targets).view((- 1), batch_size, vocab_size))
return result
|
def test_ids2var():
snt_ids = [1, 1, 1, 1, 1]
snt_ids_var = ids2var(snt_ids, 1, 2, 3, addEOS=True)
snt_ids_var_shape = snt_ids_var.data.size()
assert (snt_ids_var_shape == torch.Size([1, 2, 3]))
print('Test (ids2var): passed')
|
class E2EMLPData(BaseDataClass):
def process_e2e_mr(self, mr_string):
'\n Processing E2E NLG Challenge meaning representation\n Represent each MR as a list of 8 attributes, specified in MR_KEYMAP.\n\n :param mr_string:\n :return:\n '
items = mr_string.split(', ')
mr_data = ([PAD_ID] * MR_KEY_NUM)
lex = [None, None]
for (idx, item) in enumerate(items):
(key, raw_val) = item.split('[')
key_idx = MR_KEYMAP[key]
if (key == 'name'):
mr_val = NAME_TOKEN
lex[0] = raw_val[:(- 1)]
elif (key == 'near'):
mr_val = NEAR_TOKEN
lex[1] = raw_val[:(- 1)]
else:
mr_val = raw_val[:(- 1)]
mr_data[key_idx] = mr_val
return (mr_data, lex)
def data_to_token_ids_train(self, raw_x, raw_y):
'\n Convert raw lists of tokens to numerical representations.\n\n :param raw_x: a list of N instances, each being a list of MR values; N = size(dataset)\n :param raw_y: a list of N instances, each being a list of tokens,\n comprising textual description of the restaurant x\n :return:\n '
assert (self.max_src_len is not None)
assert (self.max_tgt_len is not None)
data_split_x = []
data_split_y = []
skipped_cnt = 0
for (idx, x) in enumerate(raw_x):
src_ids = [self.vocab.get_word(tok) for tok in x]
src_len = len(src_ids)
y = raw_y[idx]
tgt_ids = [self.vocab.get_word(tok) for tok in y]
tgt_len = len(tgt_ids)
if ((src_len > self.max_src_len) or (tgt_len >= self.max_tgt_len)):
logger.info(('Skipping long snt: %d (src) / %d (tgt)' % (src_len, tgt_len)))
skipped_cnt += 1
continue
data_split_x.append(src_ids)
data_split_y.append(tgt_ids)
logger.debug(('Skipped %d long sentences' % skipped_cnt))
return (data_split_x, data_split_y)
def data_to_token_ids_test(self, raw_x):
assert (self.max_src_len is not None)
data_split_x = []
for (idx, x) in enumerate(raw_x):
src_ids = [self.vocab.get_word(tok) for tok in x]
src_size = len(src_ids)
if (src_size > self.max_src_len):
logger.debug(('Truncating long snt: %d' % idx))
continue
data_split_x.append(src_ids)
return (data_split_x, None)
def index_data(self, data_size, mode='no_shuffling'):
'\n Aux function for indexing instances in the dataset.\n\n :param data_size:\n :param mode:\n :return:\n '
if (mode == 'random'):
indices = np.random.choice(np.arange(data_size), data_size, replace=False)
elif (mode == 'no_shuffling'):
indices = np.arange(data_size)
else:
raise NotImplementedError()
return indices
def prepare_training_data(self, xy_ids, batch_size):
'\n Cut the dataset into batches.\n :param xy_ids: a tuple of 2 lists:\n - a list of MR instances, each itself being a list of numerical ids\n - a list ot tokenized texts (same format)\n :param batch_size:\n :return:\n '
sorted_data = sorted(zip(*xy_ids), key=(lambda p: len(p[0])), reverse=True)
data_size = len(sorted_data)
num_batches = (data_size // batch_size)
data_indices = self.index_data(data_size, mode='no_shuffling')
batch_pairs = []
for bi in range(num_batches):
batch_x = []
batch_y = []
curr_batch_indices = data_indices[(bi * batch_size):((bi + 1) * batch_size)]
for idx in curr_batch_indices:
(x_ids, y_ids) = sorted_data[idx]
x_ids_copy = copy.deepcopy(x_ids)
x_ids_copy.append(EOS_ID)
batch_x.append(x_ids_copy)
y_ids_copy = copy.deepcopy(y_ids)
y_ids_copy.append(EOS_ID)
batch_y.append(y_ids_copy)
batch_x_lens = [len(s) for s in batch_x]
batch_y_lens = [len(s) for s in batch_y]
max_src_len = max(batch_x_lens)
max_tgt_len = max(batch_y_lens)
batch_x_padded = [pad_snt(x, max_src_len) for x in batch_x]
batch_y_padded = [pad_snt(y, max_tgt_len) for y in batch_y]
batch_pairs.append((batch_x_padded, batch_y_padded))
return batch_pairs
def setup_vocab(self, vocab_path, train_x_raw, train_y_raw):
self.vocab = VocabularyShared(vocab_path, train_x_raw, train_y_raw, lower=False)
|
class VocabularyBase(object):
'\n Common methods for all vocabulary classes.\n '
def load_vocabulary(self, vocabulary_path):
'\n Load vocabulary from file.\n '
if check_file_exists([vocabulary_path]):
logger.debug(('Loading vocabulary from %s' % vocabulary_path))
vocablist = []
with open(vocabulary_path, 'r') as f:
for line in f:
vocablist.append(line.strip())
for (idx, tok) in enumerate(vocablist):
self.id2tok[idx] = tok
self.tok2id[tok] = idx
else:
raise ValueError(('Vocabulary file not found: %s' % vocabulary_path))
def process_raw_data(self, raw_data):
vocablist = []
for x in raw_data:
for tok in x:
if self.lower:
tok = tok.lower()
if (tok not in vocablist):
vocablist.append(tok)
return vocablist
def get_word(self, key, default=constants.UNK_ID):
'\n Retrieve (numerical) id of a key, if present; default_id, otherwise.\n :param key: query token\n :param default: default numerical id\n :return: numerical id for the query token (int)\n '
key = (key.lower() if self.lower else key)
val = self.tok2id.get(key, default)
return val
def get_label(self, idx, default=None):
'\n Retrieve a token corresponding to the query (numerical) id\n :param idx: (numerical) query id\n :param default: default token to return, if the idx is not in the vocabulary\n :return:\n '
return self.id2tok[idx]
def __len__(self):
return self.size
@property
def size(self):
return len(self.id2tok)
|
class VocabularyOneSide(VocabularyBase):
def __init__(self, vocab_path, data_raw=None, lower=True):
'\n Initialize a vocabulary class. Either you specify a vocabulary path to load\n the vocabulary from a file, or you provide training data to create one.\n :param vocab_path: path to a saved vocabulary\n :param data_raw: training data\n '
self.lower = lower
self.id2tok = {}
self.tok2id = {}
if (not check_file_exists(vocab_path)):
assert (data_raw is not None), 'You need to process train data ** before ** creating a vocabulary!'
self.create_vocabulary(raw_data=data_raw, vocab_path=vocab_path)
else:
self.load_vocabulary(vocab_path)
def create_vocabulary(self, raw_data, vocab_path):
'\n A simple way to create a vocabulary.\n\n :param raw_data: data in the form of a list\n :param vocab_path: filename to save the vocabulary\n :return:\n '
logger.info('Creating vocabulary')
assert (type(raw_data) == list)
vocablist = constants.START_VOCAB
vocablist.extend(self.process_raw_data(raw_data))
vocablist = list(set(vocablist))
with open(vocab_path, 'w') as vocab_file:
for w in vocablist:
vocab_file.write(('%s\n' % w))
for (idx, tok) in enumerate(vocablist):
self.id2tok[idx] = tok
self.tok2id[tok] = idx
logger.info(('Created vocabulary of size %d' % self.size))
|
class VocabularyShared(VocabularyBase):
def __init__(self, vocab_path, data_raw_src=None, data_raw_tgt=None, lower=True):
'\n Initialize a vocabulary class. Either you specify a vocabulary path to load\n the vocabulary from a file, or you provide training data to create one.\n :param vocab_path: path to a saved vocabulary\n :param data_raw_src: training data, source side\n :param data_raw_tgt: training data, target side\n '
self.lower = lower
self.id2tok = {}
self.tok2id = {}
if (not check_file_exists(vocab_path)):
assert ((data_raw_src is not None) and (data_raw_tgt is not None)), 'You need to process train data ** before ** creating a vocabulary!'
self.create_vocabulary(raw_data_src=data_raw_src, raw_data_tgt=data_raw_tgt, vocab_path=vocab_path)
else:
self.load_vocabulary(vocab_path)
def create_vocabulary(self, raw_data_src, raw_data_tgt, vocab_path):
'\n A simple way to create a vocabulary.\n\n :param raw_data: data in the form of a list\n :param vocab_path: filename to save the vocabulary\n :return:\n '
logger.info('Creating vocabulary')
assert (type(raw_data_src) == type(raw_data_tgt) == list)
vocablist = constants.START_VOCAB
vocablist.extend(self.process_raw_data(raw_data_src))
vocablist.extend(self.process_raw_data(raw_data_tgt))
with open(vocab_path, 'w') as vocab_file:
for w in vocablist:
vocab_file.write(('%s\n' % w))
for (idx, tok) in enumerate(vocablist):
self.id2tok[idx] = tok
self.tok2id[tok] = idx
logger.info(('Created vocabulary of size %d' % self.size))
|
class BaseEvaluator(object):
'\n Base class containing methods for evaluation of E2E NLG models.\n '
def __init__(self, config):
self.config = (config or dict())
def label2snt(self, id2word, ids):
tokens = [id2word[t] for t in ids]
return (tokens, ' '.join(tokens))
def predict_one(self, model, src_snt_ids, beam_size=None):
input_var = ids2var(src_snt_ids, (- 1), 1, addEOS=True)
if beam_size:
(output_ids, attention_weights) = model.predict_beam(input_var, beam_size=beam_size)
else:
(output_ids, attention_weights) = model.predict(input_var)
return (output_ids, attention_weights)
def predict_dis(self, model, src_snt_ids, dis_sns, alpha=1.0):
input_var = ids2var(src_snt_ids, (- 1), 1, addEOS=True)
input_dis = [ids2var(x, (- 1), 1, addEOS=True) for x in dis_sns]
(output_ids, attention_weights) = model.predict_dis(input_var, input_dis, alpha)
return (output_ids, attention_weights)
def evaluate_model(self, model, dev_data, dev_mr=None, beam_size=None, alpha=1.0, dis=True):
'\n Evaluating model on multi-ref data\n :param model:\n :param dev_data:\n :return:\n '
if (beam_size is not None):
(decoded_ids, decoded_attn_weights) = ([[] for _ in range(beam_size)], [[] for _ in range(beam_size)])
curr_x_ids = dev_mr[0]
(out_ids, scores) = self.predict_one(model, dev_data[0], beam_size)
for _ in range(beam_size):
decoded_ids[_].append(out_ids[_])
decoded_attn_weights[_].append(scores[_])
for (cur_id, snt_ids) in enumerate(dev_data[1:]):
if (dev_mr[(cur_id + 1)] == curr_x_ids):
continue
else:
(out_ids, scores) = self.predict_one(model, snt_ids, beam_size)
for _ in range(beam_size):
decoded_ids[_].append(out_ids[_])
decoded_attn_weights[_].append(scores[_])
curr_x_ids = dev_mr[cur_id]
elif dis:
batch_size = 64
(dis_x, decoded_ids) = ([0], [])
decoded_attn_weights = []
(cur_start, cur_tmp, curr_x_ids) = (0, dev_data[0], dev_mr[0])
for (cur_id, snt_ids) in enumerate(dev_data[1:]):
if (dev_mr[(cur_id + 1)] == cur_tmp):
continue
else:
cur_tmp = dev_mr[(cur_id + 1)]
dis_x.append((cur_id + 1))
dis_ids = (dis_x[(cur_start - int((batch_size / 2))):cur_start] + dis_x[(cur_start + 1):(cur_start + int((batch_size / 2)))])
if (cur_start in dis_ids):
dis_ids.remove(cur_start)
def mask_(snt_ids, dis_ids, dev_data):
outs = [5 for _ in range(len(snt_ids))]
for (cur_f, cur_k) in enumerate(snt_ids):
if ((cur_k == 5) and (outs[cur_f] == 5)):
pp = []
for dis_id in dis_ids:
dis_cur = dev_data[dis_id]
if (dis_cur[cur_f] != 5):
pp.append(dis_cur[cur_f])
outs[cur_f] = (Counter(pp).most_common()[0][0] if len(Counter(pp).most_common()) else 5)
return outs
p = mask_(snt_ids, dis_ids, dev_data)
dis_sns = [dev_data[x] for x in dis_ids]
dis_sns = ([p] + dis_sns)
(out_ids, attn_weights) = self.predict_dis(model, dev_data[0], dis_sns, alpha)
decoded_ids.append(out_ids)
decoded_attn_weights.append(attn_weights[1])
for (cur_id, snt_ids) in enumerate(dev_data[1:]):
real_id = (cur_id + 1)
if (dev_mr[real_id] == curr_x_ids):
continue
else:
cur_start += 1
dis_ids = (dis_x[(cur_start - int((batch_size / 2))):cur_start] + dis_x[(cur_start + 1):(cur_start + int((batch_size / 2)))])
if (real_id in dis_ids):
dis_ids.remove(real_id)
dis_sns = [dev_data[x] for x in dis_ids]
while (snt_ids in dis_sns):
dis_sns.remove(snt_ids)
(out_ids, attn_weights) = self.predict_dis(model, snt_ids, dis_sns, alpha)
decoded_ids.append(out_ids)
decoded_attn_weights.append(attn_weights[1])
curr_x_ids = dev_mr[real_id]
else:
decoded_ids = []
decoded_attn_weights = []
curr_x_ids = dev_data[0]
(out_ids, attn_weights) = self.predict_one(model, dev_data[0])
decoded_ids.append(out_ids)
decoded_attn_weights.append(attn_weights[1])
for (cur_id, snt_ids) in enumerate(dev_data[1:]):
real_id = (cur_id + 1)
if (snt_ids == curr_x_ids):
continue
else:
(out_ids, attn_weights) = self.predict_one(model, snt_ids)
decoded_ids.append(out_ids)
decoded_attn_weights.append(attn_weights[1])
curr_x_ids = snt_ids
return (decoded_ids, decoded_attn_weights)
def lexicalize_predictions(self, all_tokids, data_lexicalizations, id2word):
'\n Given model predictions from a model, convert numerical ids to tokens,\n substituting placeholder items (NEAR and NAME) with the values in "data_lexicalizations",\n which we created during the data preprocessing step.\n\n :param all_tokids:\n :param data_lexicalizations:\n :param id2word:\n :return:\n '
all_tokens = []
for (idx, snt_ids) in enumerate(all_tokids):
this_snt_toks = []
this_snt_lex = data_lexicalizations[idx]
for t in snt_ids[:(- 1)]:
t = t.data.item()
tok = id2word[t]
if (tok == NAME_TOKEN):
l = this_snt_lex[0]
if (not (l is None)):
this_snt_toks.append(l)
elif (tok == NEAR_TOKEN):
l = this_snt_lex[1]
if (not (l is None)):
this_snt_toks.append(l)
else:
this_snt_toks.append(tok)
all_tokens.append(this_snt_toks)
return all_tokens
|
def eval_output(ref_fn, pred_fn):
'\n Runs an external evaluation script (COCO/MTeval evaluation, measure_scores.py) and retrieves the scores\n :param pred_fn:\n :param ref_fn:\n :return:\n '
pat = '==============\\nBLEU: (\\d+\\.?\\d*)\\nNIST: (\\d+\\.?\\d*)\\nMETEOR: (\\d+\\.?\\d*)\\nROUGE_L: (\\d+\\.?\\d*)\\nCIDEr: (\\d+\\.?\\d*)\\n'
eval_out = _sh_eval(pred_fn, ref_fn)
eval_out = eval_out.decode('utf-8')
scores = re.search(pat, eval_out).group(1, 2, 3, 4, 5)
return [float(x) for x in scores]
|
def _sh_eval(pred_fn, ref_fn):
'\n Runs measure_scores.py script and processes the output\n :param pred_fn:\n :param ref_fn:\n :return:\n '
this_dir = os.path.dirname(os.path.abspath(__file__))
script_fname = os.path.join(this_dir, 'eval_scripts/run_eval.sh')
out = subprocess.check_output([script_fname, ref_fn, pred_fn])
return out
|
class Sequence(object):
'Represents a complete or partial sequence.'
def __init__(self, output, state, logprob, score, attention=None):
'Initializes the Sequence.\n Args:\n output: List of word ids in the sequence.\n state: Model state after generating the previous word.\n logprob: Log-probability of the sequence.\n score: Score of the sequence.\n '
self.output = output
self.state = state
self.logprob = logprob
self.score = score
self.attention = attention
def __cmp__(self, other):
'Compares Sequences by score.'
assert isinstance(other, Sequence)
if (self.score == other.score):
return 0
elif (self.score < other.score):
return (- 1)
else:
return 1
def __lt__(self, other):
assert isinstance(other, Sequence)
return (self.score < other.score)
def __eq__(self, other):
assert isinstance(other, Sequence)
return (self.score == other.score)
|
class TopN(object):
'Maintains the top n elements of an incrementally provided set.'
def __init__(self, n):
self._n = n
self._data = []
def size(self):
assert (self._data is not None)
return len(self._data)
def push(self, x):
'Pushes a new element.'
assert (self._data is not None)
if (len(self._data) < self._n):
heapq.heappush(self._data, x)
else:
heapq.heappushpop(self._data, x)
def extract(self, sort=False):
'Extracts all elements from the TopN. This is a destructive operation.\n The only method that can be called immediately after extract() is reset().\n Args:\n sort: Whether to return the elements in descending sorted order.\n Returns:\n A list of data; the top n elements provided to the set.\n '
assert (self._data is not None)
data = self._data
self._data = None
if sort:
data.sort(reverse=True)
return data
def reset(self):
'Returns the TopN to an empty state.'
self._data = []
|
class BaseModel(nn.Module):
def __init__(self, config):
super(BaseModel, self).__init__()
self.config = config
self.use_cuda = torch.cuda.is_available()
|
class Seq2SeqModel(BaseModel):
def set_src_vocab_size(self, vocab_size):
self._src_vocab_size = vocab_size
def set_tgt_vocab_size(self, vocab_size):
self._tgt_vocab_size = vocab_size
def set_max_src_len(self, l):
self._max_src_len = l
def set_max_tgt_len(self, l):
self._max_tgt_len = l
@property
def src_vocab_size(self):
return self._src_vocab_size
@property
def tgt_vocab_size(self):
return self._tgt_vocab_size
@property
def max_src_len(self):
return self._max_src_len
@property
def max_tgt_len(self):
return self._max_tgt_len
|
class E2ESeq2SeqModel(Seq2SeqModel):
def setup(self, data):
self.set_flags()
self.set_data_dependent_params(data)
self.set_embeddings()
self.set_encoder()
self.set_decoder()
def set_data_dependent_params(self, data):
vocabsize = len(data.vocab)
self.set_src_vocab_size(vocabsize)
self.set_tgt_vocab_size(vocabsize)
self.set_max_src_len(data.max_src_len)
self.set_max_tgt_len(data.max_tgt_len)
def set_embeddings(self):
self.embedding_dim = self.config['embedding_dim']
self.embedding_mat = get_embed_matrix(self.src_vocab_size, self.embedding_dim)
embedding_drop_prob = self.config.get('embedding_dropout', 0.0)
self.embedding_dropout_layer = nn.Dropout(embedding_drop_prob)
def embedding_lookup(self, ids, *args, **kwargs):
return self.embedding_mat(ids)
def set_flags(self):
self.teacher_forcing_ratio = self.config.get('teacher_forcing_ratio', 1.0)
def set_encoder(self):
raise NotImplementedError()
def set_decoder(self):
raise NotImplementedError()
|
def get_GRU_unit(gru_config):
return nn.GRU(input_size=gru_config['input_size'], hidden_size=gru_config['hidden_size'], dropout=gru_config['dropout'], bidirectional=gru_config.get('bidirectional', False))
|
def get_embed_matrix(vocab_size, embedding_dim):
return nn.Embedding(vocab_size, embedding_dim, padding_idx=PAD_ID)
|
class AttnBahd(nn.Module):
def __init__(self, enc_dim, dec_dim, num_directions, attn_dim=None):
'\n Attention mechanism\n :param enc_dim: Dimension of hidden states of the encoder h_j\n :param dec_dim: Dimension of the hidden states of the decoder s_{i-1}\n :param attn_dim: Dimension of the internal dimension (default: same as decoder).\n '
super(AttnBahd, self).__init__()
self.num_directions = num_directions
self.h_dim = enc_dim
self.s_dim = dec_dim
self.a_dim = (self.s_dim if (attn_dim is None) else attn_dim)
self.build()
def build(self):
self.U = nn.Linear((self.h_dim * self.num_directions), self.a_dim)
self.W = nn.Linear(self.s_dim, self.a_dim)
self.v = nn.Linear(self.a_dim, 1)
self.tanh = nn.Tanh()
self.softmax = nn.Softmax()
def precmp_U(self, enc_outputs):
'\n Precompute U matrix for computational efficiency.\n The result is a # SL x B x self.attn_dim matrix.\n\n :param enc_outputs: # SL x B x self.attn_dim matrix\n :return: input projected by the weight matrix of the attention module.\n '
(src_seq_len, batch_size, enc_dim) = enc_outputs.size()
enc_outputs_reshaped = enc_outputs.view((- 1), self.h_dim)
proj = self.U(enc_outputs_reshaped)
proj_reshaped = proj.view(src_seq_len, batch_size, self.a_dim)
return proj_reshaped
def forward(self, prev_h_batch, enc_outputs):
'\n\n :param prev_h_batch: 1 x B x dec_dim\n :param enc_outputs: SL x B x (num_directions * enc_dim)\n\n :return: attn weights: # B x SL\n\n '
(src_seq_len, batch_size, enc_dim) = enc_outputs.size()
uh = self.U(enc_outputs.view((- 1), self.h_dim)).view(src_seq_len, batch_size, self.a_dim)
wq = self.W(prev_h_batch.view((- 1), self.s_dim)).unsqueeze(0)
wq3d = wq.expand_as(uh)
wquh = self.tanh((wq3d + uh))
attn_unnorm_scores = self.v(wquh.view((- 1), self.a_dim)).view(batch_size, src_seq_len)
attn_weights = self.softmax(attn_unnorm_scores)
return attn_weights
|
class DecoderRNNAttnBase(nn.Module):
'\n To be implemented for each Decoder:\n self.rnn\n self.attn_module\n self.combine_context_run_rnn\n self.compute_output\n '
def forward(self, prev_y_batch, prev_h_batch, encoder_outputs_batch):
'\n A forward step of the Decoder.\n The step operates on one step-slice of the target sequence.\n\n :param prev_y_batch: embedded previous predictions: B x E\n :param prev_h_batch: current decoder state: 1 x B x dec_dim\n :param encoder_outputs_batch: SL x B x MLP_H\n :return:\n '
attn_weights = self.attn_module(prev_h_batch, encoder_outputs_batch)
context = torch.bmm(attn_weights.unsqueeze(1), encoder_outputs_batch.transpose(0, 1))
(dec_rnn_output, dec_hidden) = self.combine_context_run_rnn_step(prev_y_batch, prev_h_batch, context)
dec_output = self.compute_output(dec_rnn_output)
return (dec_output, dec_hidden, attn_weights)
@property
def num_directions(self):
return (2 if self.rnn.bidirectional else 1)
def combine_context_run_rnn_step(self, *args, **kwargs):
raise NotImplementedError()
def compute_output(self, *args, **kwargs):
raise NotImplementedError()
|
class DecoderRNNAttnBahd(DecoderRNNAttnBase):
def __init__(self, rnn_config, output_size, prev_y_dim, enc_dim, enc_num_directions):
super(DecoderRNNAttnBahd, self).__init__()
self.rnn = get_GRU_unit(rnn_config)
dec_dim = rnn_config['hidden_size']
self.attn_module = AttnBahd(enc_dim, dec_dim, enc_num_directions)
self.W_combine = nn.Linear((prev_y_dim + (enc_dim * enc_num_directions)), dec_dim)
self.W_out = nn.Linear(dec_dim, output_size)
self.log_softmax = nn.LogSoftmax()
def combine_context_run_rnn_step(self, prev_y_batch, prev_h_batch, context):
'\n\n :param prev_y_batch: B x prev_y_dim\n :param prev_h_batch: 1 x B x dec_dim\n :param context: # B x 1 x MLP_H\n :return:\n '
y_ctx = torch.cat((prev_y_batch, context.squeeze(1)), 1)
rnn_input = self.W_combine(y_ctx)
(output, decoder_hidden) = self.rnn(rnn_input.unsqueeze(0), prev_h_batch)
return (output, decoder_hidden)
def compute_output(self, rnn_output):
'\n\n :param rnn_output: 1 x B x H\n :return:\n '
unnormalized_logits = self.W_out(rnn_output[0])
logits = self.log_softmax(unnormalized_logits)
return logits
|
class EncoderMLP(nn.Module):
def __init__(self, config):
super(EncoderMLP, self).__init__()
self.config = config
self.input_size = self.config['input_size']
self.hidden_size = self.config['hidden_size']
self.W = nn.Linear(self.input_size, self.hidden_size)
self.relu = nn.ReLU()
def forward(self, input_embedded):
(seq_len, batch_size, emb_dim) = input_embedded.size()
outputs = self.relu(self.W(input_embedded.view((- 1), emb_dim)))
outputs = outputs.view(seq_len, batch_size, (- 1))
dec_hidden = torch.sum(outputs, 0)
return (outputs, dec_hidden.unsqueeze(0))
@property
def num_directions(self):
return 1
|
class EncoderGRU(EncoderRNN):
def __init__(self, config):
super(EncoderGRU, self).__init__()
self.config = config
self.rnn = get_GRU_unit(config)
|
def process_e2e_mr(s):
'\n Extract key-value pairs from the input and pack them into a dictionary.\n :param s: src string containing key-value pairs.\n :return: a dictionary w/ key-value pairs corresponding to MR keys and their values on the src side of the input.\n '
items = s.split(', ')
k2v = {'name': None, 'familyFriendly': None, 'eatType': None, 'food': None, 'priceRange': None, 'near': None, 'area': None, 'customer rating': None}
for (idx, item) in enumerate(items):
(key, raw_val) = item.split('[')
val = raw_val[:(- 1)]
k2v[key] = val
return k2v
|
def _get_price_str(mr_val):
"\n Handle the price prediction part.\n :param mr_val: value of the 'price' field.\n :return: refined sentence string.\n "
if (not mr_val):
s = '.'
return s
if ('£' in mr_val):
s = (' in the price range of %s.' % mr_val)
else:
mr_val = ('low' if (mr_val == 'cheap') else mr_val)
s = (' in the %s price range.' % mr_val)
return s
|
def _get_rating(mr_val, snt):
"\n Handle the rating part.\n :param mr_val: value of the 'customerRating' field.\n :param snt: sentence string built so far.\n :return: refined sentence string.\n "
if (snt[(- 1)] != '.'):
beginning = ' with'
else:
beginning = ' It has'
if mr_val.isalpha():
s = ('%s a %s customer rating' % (beginning, mr_val))
else:
s = ('%s a customer rating of %s' % (beginning, mr_val))
return (snt + s)
|
def _get_loc(area_val, near_val, snt):
"\n Handle location string, variant 1.\n :param area_val: value of the 'area' field.\n :param near_val: value of the 'near' field.\n :param snt: incomplete sentence string (string built so far)\n :return:\n "
tokens = snt.split()
if ('It' in tokens):
beginning = ' and'
else:
beginning = '. It'
if area_val:
s = ('%s is located in the %s area' % (beginning, area_val))
if near_val:
s += (', near %s.' % near_val)
else:
s += '.'
elif near_val:
s = ('%s is located near %s.' % (beginning, near_val))
else:
raise NotImplementedError()
return (snt + s)
|
def _get_loc2(area_val, near_val):
"\n Handle location string, variant 2.\n :param area_val: value of the 'area' field.\n :param near_val: value of the 'near' field.\n :return:\n "
if area_val:
s = (' located in the %s area' % area_val)
if near_val:
s += (', near %s.' % near_val)
else:
s += '.'
elif near_val:
s = (' located near %s.' % near_val)
else:
raise NotImplementedError()
return s
|
def postprocess(snt):
'\n Fix some spelling and punctuation.\n :param snt: sentence string before post-processing\n :return: sentence string after post-processing\n '
tokens = snt.split()
for (idx, t) in enumerate(tokens):
if (t.lower() == 'a'):
if (tokens[(idx + 1)][0] in ['a', 'A']):
tokens[idx] = ('%sn' % t)
elif (t.lower() == 'an'):
if (tokens[(idx + 1)][0] not in ['a', 'A']):
tokens[idx] = ('%s' % t[0])
last_token = tokens[(- 1)]
if (last_token[(- 1)] != '.'):
tokens[(- 1)] = (last_token + '.')
return ' '.join(tokens)
|
def make_prediction(xd):
'\n Main function to make a prediction.\n\n Our template has a generic part and a field-specific part which we called SUBTEMPLATE:\n\n [SUBTEMPLATE-1] which serves [food] in the [price] price range.\n It has a [customerRating] customer rating.\n It is located in [area] area, near [near].\n [SUBTEMPLATE-2]\n\n If the familyFriendly field\'s value is "yes", the SUBTEMPLATES are:\n\n - SUBTEMPLATE-1: [name] is a family-friendly [eatType]\n - SUBTEMPLATE-2: None\n\n Otherwise:\n\n - SUBTEMPLATE-1: [name] is a [eatType]\n - SUBTEMPLATE-2: It is not family friendly.\n\n There are some variations of the ordering which we resolve through if-else statements.\n Finally, there is also a post-processing step which handles punctuation mistakes and article choice (a/an).\n\n :param xd: a dictionary containing input data.\n The dictionary has the following keys:\n ["name", "familyFriendly", "eatType", "food", "priceRange", "near", "area", "customer rating"]\n :return: a string denoting the sentence which describes input xd.\n '
name = xd['name']
assert (name is not None)
if (xd['familyFriendly'] == 'yes'):
friendly = True
elif (xd['familyFriendly'] == 'no'):
friendly = False
else:
friendly = None
restaurant_type = (xd['eatType'] or 'dining place')
food_type = xd['food']
price_range = xd['priceRange']
rating = xd['customer rating']
near = xd['near']
area = xd['area']
if (friendly is True):
snt = ('%s is a family-friendly %s' % (name, restaurant_type))
else:
snt = ('%s is a %s' % (name, restaurant_type))
if (food_type or price_range):
if (food_type == 'Fast food'):
food_type = 'fast'
elif (food_type is None):
food_type = ''
food = ('%s food' % food_type)
snt += (' which serves %s' % food)
price_range = _get_price_str(price_range)
snt += price_range
if rating:
snt = _get_rating(rating, snt)
if (near or area):
snt = _get_loc(area, near, snt)
else:
snt += '.'
elif (near or area):
snt = snt[:(- 1)]
snt = _get_loc(area, near, snt)
if (friendly is False):
snt += ' It is not family friendly.'
snt = postprocess(snt)
return snt
if (near or area):
snt += _get_loc2(area, near)
if rating:
snt = _get_rating(rating, snt)
if (friendly is False):
tokens_so_far = snt.split()
if (tokens_so_far[(- 1)][(- 1)] != '.'):
snt += '. It is not family friendly.'
else:
snt += ' It is not family friendly.'
snt = postprocess(snt)
return snt
|
def run(fname, mode='dev'):
'\n Main function.\n :param fname: filename with the input.\n :param mode: operation mode, either dev (input has both MR and TEXT) or test (only MR given).\n :return:\n '
input_data = []
predictions = []
with open(fname, 'r') as csv_file:
reader = csv.reader(csv_file, delimiter=',', quotechar='"')
header = next(reader)
if (header == ['mr', 'ref']):
assert (mode == 'dev')
logger.info('Predicting on DEV data')
elif (header == ['MR']):
assert (mode == 'test')
logger.info('Predicting on TEST data')
else:
logger.error('The file does not contain a header!')
first_row = next(reader)
curr_x = first_row[0]
input_data.append(curr_x)
xd = process_e2e_mr(curr_x)
p = make_prediction(xd)
predictions.append(p)
for row in list(reader):
x = row[0]
input_data.append(x)
this_xd = process_e2e_mr(x)
if (x == curr_x):
continue
else:
p = make_prediction(this_xd)
predictions.append(p)
curr_x = x
predictions_fname = ('%s.predicted' % fname)
with open(predictions_fname, 'w') as out:
logger.info(('Saving predictions to --> %s' % predictions_fname))
if (mode == 'test'):
for (idx, p) in enumerate(predictions):
out.write(('"%s"\t"%s"\n' % (input_data[idx], p)))
else:
for p in predictions:
out.write(('%s\n' % p))
logger.info('Done')
|
def test():
logger.info('Running a test: prediction by a template baseline')
x1 = 'name[The Vaults]'
x2 = 'name[The Vaults], eatType[pub]'
x3 = 'name[The Vaults], eatType[pub], priceRange[more than £30]'
x4 = 'name[The Vaults], eatType[pub], priceRange[more than £30], customer rating[5 out of 5]'
x5 = 'name[The Vaults], eatType[pub], priceRange[more than £30], customer rating[5 out of 5], near[Café Adriatic]'
x6 = 'name[The Vaults], eatType[pub], priceRange[more than £30], customer rating[5 out of 5], near[Café Adriatic], food[English]'
x7 = 'name[The Vaults], eatType[pub], priceRange[more than £30], customer rating[5 out of 5], near[Café Adriatic], food[English], familyFriendly[yes]'
for x in [x1, x2, x3, x4, x5, x6, x7]:
data = process_e2e_mr(x)
p = make_prediction(data)
print(('%s\n%s\n\n' % (x, p)))
|
class BaseTrainer(object):
def __init__(self, config):
self.config = config
self.init_params()
def init_params(self):
self.n_epochs = self.config['n_epochs']
self.batch_size = self.config['batch_size']
self.lr = self.config['learning_rate']
self.model_dir = self.config['model_dir']
self.evaluate_prediction = self.config['evaluate_prediction']
self.save_model = self.config['save_model_each_epoch']
self.use_cuda = torch.cuda.is_available()
self.train_losses = []
self.dev_losses = []
if self.evaluate_prediction:
self.nist_scores = []
self.bleu_scores = []
self.cider_scores = []
self.rouge_scores = []
self.meteor_scores = []
def run_external_eval(self, ref_fn, pred_fn):
'\n Run external evaluation script (provided by the E2E NLG org)\n\n :param ref_fn: reference filename\n :param pred_fn: prediction filename\n :return:\n '
(bleu, nist, meteor, rouge, cider) = eval_output(ref_fn, pred_fn)
self.bleu_scores.append(bleu)
self.nist_scores.append(nist)
self.cider_scores.append(cider)
self.rouge_scores.append(rouge)
self.meteor_scores.append(meteor)
score_msg = ('BLEU=%0.5f NIST=%0.5f CIDEr=%0.5f ROUGE=%0.5f METEOR=%0.5f' % (bleu, nist, cider, rouge, meteor))
logger.info(score_msg)
def record_loss(self, train_loss, dev_loss):
self.train_losses.append(train_loss)
self.dev_losses.append(dev_loss)
logger.info(('tloss=%0.5f dloss=%0.5f' % (train_loss, dev_loss)))
def training_start(self, model, data):
training_start_time = time.time()
logger.info('Start training')
model_summary = torch_summarize(model)
logger.debug(model_summary)
evaluator = BaseEvaluator(self.config)
logger.debug('Preparing training data')
train_batches = data.prepare_training_data(data.train, self.batch_size)
dev_batches = data.prepare_training_data(data.dev, self.batch_size)
id2word = data.vocab.id2tok
dev_lexicalizations = data.lexicalizations['dev']
dev_multi_ref_fn = ('%s.multi-ref' % data.fnames['dev'])
self.set_optimizer(model, self.config['optimizer'])
self.set_train_criterion(len(id2word), PAD_ID)
if self.use_cuda:
model = model.cuda()
for epoch_idx in range(1, (self.n_epochs + 1)):
epoch_start = time.time()
pred_fn = os.path.join(self.model_dir, ('predictions.epoch%d' % epoch_idx))
train_loss = self.train_epoch(epoch_idx, model, train_batches)
dev_loss = self.compute_val_loss(model, dev_batches)
(predicted_ids, attention_weights) = evaluator.evaluate_model(model, data.dev[0], data.uni_mr['dev'], dis=False)
predicted_tokens = evaluator.lexicalize_predictions(predicted_ids, dev_lexicalizations, id2word)
save_predictions_txt(predicted_tokens, pred_fn)
self.record_loss(train_loss, dev_loss)
if self.evaluate_prediction:
self.run_external_eval(dev_multi_ref_fn, pred_fn)
if self.save_model:
save_model(model, os.path.join(self.model_dir, ('weights.epoch%d' % epoch_idx)))
logger.info(('Epoch %d/%d: time=%s' % (epoch_idx, self.n_epochs, asMinutes((time.time() - epoch_start)))))
self.plot_lcurve()
if self.evaluate_prediction:
score_fname = os.path.join(self.model_dir, 'scores.csv')
scores = self.get_scores_to_save()
save_scores(scores, self.score_file_header, score_fname)
self.plot_training_results()
logger.info(('End training time=%s' % asMinutes((time.time() - training_start_time))))
def compute_val_loss(self, model, dev_batches):
total_loss = 0
running_losses = []
num_dev_batches = len(dev_batches)
bar = create_progress_bar('dev_loss')
for batch_idx in bar(range(num_dev_batches)):
loss_var = self.train_step(model, dev_batches[batch_idx])
loss_data = loss_var.data[0]
running_losses = ([loss_data] + running_losses)[:20]
bar.dynamic_messages['dev_loss'] = np.mean(running_losses)
total_loss += loss_data
total_loss_avg = (total_loss / num_dev_batches)
return total_loss_avg
def train_epoch(self, epoch_idx, model, train_batches):
np.random.shuffle(train_batches)
running_losses = []
epoch_losses = []
num_train_batches = len(train_batches)
bar = create_progress_bar('train_loss')
for pair_idx in bar(range(num_train_batches)):
self.optimizer.zero_grad()
loss_var = self.train_step(model, train_batches[pair_idx])
loss_data = loss_var.data[0]
loss_var.backward()
self.optimizer.step()
running_losses = ([loss_data] + running_losses)[:20]
bar.dynamic_messages['train_loss'] = np.mean(running_losses)
epoch_losses.append(loss_data)
epoch_loss_avg = np.mean(epoch_losses)
return epoch_loss_avg
def get_scores_to_save(self):
scores = list(zip(self.bleu_scores, self.nist_scores, self.cider_scores, self.rouge_scores, self.meteor_scores, self.train_losses, self.dev_losses))
return scores
def train_step(self, *args, **kwargs):
raise NotImplementedError()
def calc_loss(self, *args, **kwargs):
raise NotImplementedError()
def set_train_criterion(self, *args, **kwargs):
raise NotImplementedError()
def set_optimizer(self, model, opt_name):
logger.debug(('Setting %s as optimizer' % opt_name))
if (opt_name == 'SGD'):
self.optimizer = optim.SGD(params=model.parameters(), lr=self.lr)
elif (opt_name == 'Adam'):
self.optimizer = optim.Adam(params=model.parameters(), lr=self.lr)
elif (opt_name == 'RMSprop'):
self.optimizer = optim.RMSprop(params=model.parameters(), lr=self.lr)
else:
raise NotImplementedError()
def plot_training_results(self, *args, **kwargs):
raise NotImplementedError()
def plot_lcurve(self, *args, **kwargs):
raise NotImplementedError()
def get_plot_names(self):
raise NotImplementedError()
@property
def score_file_header(self):
HEADER = ['bleu', 'nist', 'cider', 'rouge', 'meteor', 'train_loss', 'dev_loss']
return HEADER
|
class E2EMLPTrainer(BaseTrainer):
def set_train_criterion(self, vocab_size, pad_id):
'\n NMT Criterion from: https://github.com/OpenNMT/OpenNMT-py/blob/master/onmt/Loss.py\n :return:\n\n '
weight = torch.ones(vocab_size)
weight[pad_id] = 0
self.criterion = nn.NLLLoss(weight, size_average=True)
if self.use_cuda:
self.criterion = self.criterion.cuda()
def train_step(self, model, datum):
datum = [cuda_if_gpu(Variable(torch.LongTensor(t)).transpose(0, 1)) for t in datum]
logits = model.forward(datum)
loss_var = self.calc_loss(logits, datum)
return loss_var
def calc_loss(self, logits, datum):
batch_y_var = datum[1]
vocab_size = logits.size()[(- 1)]
logits = logits.contiguous().view((- 1), vocab_size)
targets = batch_y_var.contiguous().view((- 1), 1).squeeze(1)
loss = self.criterion(logits, targets)
return loss
def plot_lcurve(self):
fig_fname = os.path.join(self.model_dir, 'lcurve.pdf')
title = self.config['modeltype']
plot_lcurve(self.train_losses, self.dev_losses, img_title=title, save_path=fig_fname, show=False)
def plot_training_results(self):
losses = np.asarray([self.train_losses, self.dev_losses]).transpose()
plot_train_progress(scores=(losses, self.bleu_scores, self.nist_scores, self.cider_scores, self.rouge_scores, self.meteor_scores), names=self.get_plot_names(), img_title=self.config['modeltype'], save_path=os.path.join(self.model_dir, 'lcurve_scores.pdf'), show=False)
def get_plot_names(self):
return [['TrainLoss', 'DevLoss'], 'BLEU', 'NIST', 'CIDEr', 'ROUGE_L', 'METEOR']
|
def load_config(config_path):
'Loads and reads a yaml configuration file\n\n :param config_path: path of the configuration file to load\n :type config_path: str\n :return: the configuration dictionary\n :rtype: dict\n '
with open(config_path, 'r') as user_config_file:
return yaml.load(user_config_file.read())
|
def fix_seed(seed):
logger.debug(('Fixing seed: %d' % seed))
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
|
def process_e2e_text(s):
words = []
for fragment in s.strip().split():
fragment_tokens = _WORD_SPLIT.split(fragment)
words.extend(fragment_tokens)
tokens = [w for w in words if w]
return tokens
|
def process_e2e_mr(s):
items = s.split(', ')
mr_data = ([None] * MR_KEY_NUM)
for (idx, item) in enumerate(items):
(key, raw_val) = item.split('[')
key_idx = MR_KEYMAP[key]
mr_data[key_idx] = raw_val[:(- 1)]
return dict(zip(MR_FIELDS, mr_data))
|
def process_e2e_mr_delex(s):
items = s.split(', ')
mr_data = ([None] * MR_KEY_NUM)
lex = [None, None]
for (idx, item) in enumerate(items):
(key, raw_val) = item.split('[')
key_idx = MR_KEYMAP[key]
if (key == 'name'):
mr_val = NAME_TOKEN
lex[0] = raw_val[:(- 1)]
elif (key == 'near'):
mr_val = NEAR_TOKEN
lex[1] = raw_val[:(- 1)]
else:
mr_val = raw_val[:(- 1)]
mr_data[key_idx] = mr_val
return dict(zip(MR_FIELDS, mr_data))
|
def cnt_bins_and_cnts():
lengths_to_consider = [0, 10, 20, 30, 40, 50, 60, 70, 80]
bins = [(lengths_to_consider[i], lengths_to_consider[(i + 1)]) for i in range((len(lengths_to_consider) - 1))]
cnts = ([0] * len(bins))
for l in references_lens:
for (bin_idx, b) in enumerate(bins):
if ((l > b[0]) and (l <= b[1])):
cnts[bin_idx] += 1
break
return (bins, cnts)
|
def plot_len_hist(lens, fname):
references_lens_df = pd.DataFrame(references_lens)
mean = float(references_lens_df.mean())
std = float(references_lens_df.std())
min_len = int(references_lens_df.min())
max_len = int(references_lens_df.max())
pp = PdfPages(fname)
(n, bins, patches) = plt.hist(lens, 20, facecolor='b', alpha=0.55)
plt.xlabel('Sentence Length')
plt.ylabel('Number of sentences')
plt.title('Sentence length distribution')
plt.axis([0, 80, 0, 10000])
plt.text(40, 7500, '$mean={:.2f},\\ std={:.2f}$'.format(mean, std))
plt.text(40, 6800, '$min={},\\ max={}$'.format(min_len, max_len))
plt.grid(True)
plt.tight_layout()
plt.show()
pp.savefig()
pp.close()
|
def check_file_exists(fname):
if (not os.path.exists(os.path.abspath(fname))):
logger.warning(('%s does not exist!' % fname))
return False
|
def check_files_exist(args):
for arg in args:
check_file_exists(arg)
return True
|
def set_logger(stdout_level=logging.INFO, log_fn=None):
'\n Set python logger for this experiment.\n Based on:\n\n https://stackoverflow.com/questions/25187083/python-logging-to-multiple-handlers-at-different-log-levels\n\n :param stdout_level:\n :param log_fn:\n :return:\n '
simple_formatter = logging.Formatter('%(name)s:%(levelname)s: %(message)s')
detailed_formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger('experiment')
logger.setLevel(logging.DEBUG)
logger.propagate = 0
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(getattr(logging, stdout_level))
console_handler.setFormatter(simple_formatter)
logger.addHandler(console_handler)
if log_fn:
log_fn = os.path.abspath(log_fn)
file_handler = logging.FileHandler(log_fn)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(detailed_formatter)
logger.addHandler(file_handler)
return logger
|
def main(src_fn, num_samples=100):
'\n Sample input data and write to a separate file for further analysis.\n\n :param src_fn: trainset data\n :param num_samples: number of samples to draw\n :return:\n '
print('AUX_DATA_ANALYSIS: Sampling input data')
num_samples = int(num_samples)
with open(src_fn) as src:
srcs = [line.strip() for line in src]
num_instances = len(srcs)
print('Num instances: ', num_instances)
sample_indices = random.sample(population=range(num_instances), k=num_samples)
print('Sample indices: ', sample_indices)
with open(('%s.sample-%d' % (src_fn, num_samples)), 'w') as sos:
for i in sample_indices:
sos.write(('%s\n' % srcs[i]))
print('Done!')
|
def main(base_out_fn, model4_out_fn, template_out_fn, src_fn, num_samples=100):
'\n Sample predictions by the three models and write them to separate files for further analysis.\n See subsection 5.2 of the paper.\n\n :param base_out_fn: Baseline prediction file\n :param model4_out_fn: MLPModel prediction file\n :param template_out_fn: Template-based system prediction file\n :param src_fn: src side of the dev-multi-ref data (devset.csv.multi-ref.src)\n :param num_samples: number of samples to draw\n :return:\n '
print('AUX_DATA_ANALYSIS: Sampling predictions')
num_samples = int(num_samples)
with open(src_fn) as src, open(base_out_fn) as bo, open(model4_out_fn) as mo, open(template_out_fn) as to:
srcs = [line.strip() for line in src]
bsnt = [line.strip() for line in bo]
mosnt = [line.strip() for line in mo]
tosnt = [line.strip() for line in to]
num_instances = len(bsnt)
assert (len(srcs) == len(mosnt) == len(tosnt) == num_instances)
print('Num instances: ', num_instances)
sample_indices = random.sample(population=range(num_instances), k=num_samples)
print('Sample indices: ', sample_indices)
with open(('%s.sample-%d' % (base_out_fn, num_samples)), 'w') as bos, open(('%s.sample-%d' % (model4_out_fn, num_samples)), 'w') as mos, open(('%s.sample-%d' % (template_out_fn, num_samples)), 'w') as tos, open(('%s.sample-inputs' % base_out_fn), 'w') as ios:
for i in sample_indices:
ios.write(('%s\n' % srcs[i]))
bos.write(('%s\n' % bsnt[i]))
mos.write(('%s\n' % mosnt[i]))
tos.write(('%s\n' % tosnt[i]))
print('Done!')
|
def save_config(config_dict, fname=None):
'\n Save configuration dictionary (n json format).\n\n :param config_dict: configuration dictionary\n :param fname: name of the file to save the dictionary in\n :return:\n '
with open(fname, mode='w', encoding='utf-8') as f:
json.dump(config_dict, f)
|
def save_model(model, model_fn):
'\n Serialize the trained model.\n\n :param model: instance of the ModelClass\n :param model_fn: name of the file where to store the model\n :return:\n '
logger.info(('Saving model to --> %s' % model_fn))
torch.save(model.state_dict(), open(model_fn, 'wb'))
|
def save_predictions_json(predictions, fname):
'\n Save predictions done by a trained model in json format.\n\n :param predictions: a list of strings, each corresponding to one predicted snt.\n :param fname: name of the file to save the predictions in\n :return:\n '
with open(fname, mode='w', encoding='utf-8') as f:
logger.info(('Saving all predictions to a json file --> %s' % fname))
json.dump(predictions, f)
|
def save_predictions_txt(predictions, fname):
'\n Save predictions done by a trained model in txt format.\n :param predictions:\n :param fname:\n :return:\n '
logger.info(('Saving predictions to a txt file --> %s' % fname))
with open(fname, mode='w', encoding='utf-8') as f:
if (type(predictions) == str):
f.write(predictions)
elif (type(predictions) == list):
for s in predictions:
if (type(s) == list):
s = ' '.join(s)
f.write(('%s\n' % s))
else:
raise NotImplementedError()
|
def save_scores(scores, header, fname):
'\n Save the performance of the model as measured for each epoch by the E2E NLG Challenge scoring metrics.\n\n :param scores: a list of lists of scores:\n - bleu_scores\n - nist_scores\n - cider_scores\n - rouge_scores\n - meteor_scores\n - train_losses\n - dev_losses\n\n :param header: explains which scores are stored in which column of the CSV file\n :param fname:\n :return:\n '
with open(fname, 'w') as csv_out:
csv_writer = csv.writer(csv_out, delimiter=',')
csv_writer.writerow(header)
for epoch_scores in scores:
csv_writer.writerow(epoch_scores)
logger.info(('Scores saved to --> %s' % fname))
|
def load_model(model, model_weights_fn):
'\n Load serialized model.\n\n :param model: instance of the ModelClass.\n :param model_weights_fn: name of the file w/ the serialized model.\n :return:\n '
logger.info(('Loading the model <-- %s' % model_weights_fn))
model.load_state_dict(torch.load(open(model_weights_fn, 'rb')))
|
def get_experiment_name(config_d):
'\n Create a simple unique name for the experiment.\n Consists of model type, timestamp and specific hyper-parameter (hp) values.\n\n :param config_d: configuration dictionary\n :return: name of the current experiment (string)\n '
model_type = get_model_type(config_d)
timestamp = get_timestamp()
hp_name = get_hp_value_name(config_d)
return ('%s_%s_%s' % (model_type, hp_name, timestamp))
|
def get_model_type(config_d):
'\n Generate part of the experiment name: model type to be trained.\n Model types correspond to non-qualified names of the python files with the code for the model.\n For example, if the code for our model is stored in "components/model/e2e_model_MLP.py",\n then model type is "e2e_model_MLP".\n\n :param config_d: configuration dictionary\n :return: model type (string)\n '
mtype = config_d['model-module'].split('.')[(- 1)]
config_d['training_params']['modeltype'] = mtype
return mtype
|
def get_timestamp():
'\n Generate a timestep to be included as part of the model name.\n\n :return: current timestamp (string)\n '
return '{:%Y-%b-%d_%H:%M:%S}'.format(datetime.now())
|
def get_hp_value_name(config_dict):
'\n Generate a string which store hyper-parameter values as part of the model name.\n This is useful for hp-optimization, if you decide to perform one.\n\n :param config_dict: configuration dictionary retrieved from the .yaml file.\n :return: concatenated hp values (string)\n '
seed = config_dict.get('random_seed', 1)
embed = config_dict['model_params']['embedding_dim']
hidden_size = config_dict['model_params']['encoder_params']['hidden_size']
dropout = config_dict['model_params']['encoder_params']['dropout']
batch_size = config_dict['training_params']['batch_size']
lr = config_dict['training_params']['learning_rate']
return ('seed%s-emb%s-hid%s-drop%s-bs%s-lr%s' % (seed, embed, hidden_size, dropout, batch_size, lr))
|
def make_model_dir(config):
'\n Create a directory to contain various files for the current experiment.\n :param config: config dictionary\n :return: name of the directory\n '
mode = config['mode']
if (mode == 'predict'):
model_fn = config['model_fn']
model_dirname = os.path.split(model_fn)[0]
elif (mode == 'train'):
all_experiments_dir = os.path.abspath(config['experiments_dir'])
model_name = get_experiment_name(config)
model_dirname = os.path.join(all_experiments_dir, model_name)
else:
raise NotImplementedError()
if (not os.path.exists(model_dirname)):
os.makedirs(model_dirname)
config['training_params']['model_dir'] = model_dirname
return model_dirname
|
def test_save_scores():
scores = [[(1, 2), 3], [(1, 2), 4], [(1, 2), 5]]
HEADER = ['loss', 'cider']
with open('todelete.csv', 'w') as csv_out:
csv_writer = csv.writer(csv_out, delimiter=',')
csv_writer.writerow(HEADER)
for epoch_scores in scores:
csv_writer.writerow(epoch_scores)
|
def timeSince(since, percent):
'\n A helper function to print time elapsed and\n estimated time remaining given the current time and progress\n :param since:\n :param percent:\n :return:\n '
now = time.time()
s = (now - since)
es = (s / percent)
rs = (es - s)
return ('%s (- %s)' % (asMinutes(s), asMinutes(rs)))
|
def asMinutes(s):
'\n A helper function to convert elapsed time to minutes.\n :param s:\n :return:\n '
m = math.floor((s / 60))
s -= (m * 60)
return ('%dm %ds' % (m, s))
|
def create_progress_bar(dynamic_msg=None):
widgets = [' [batch ', progressbar.SimpleProgress(), '] ', progressbar.Bar(), ' (', progressbar.ETA(), ') ']
if (dynamic_msg is not None):
widgets.append(progressbar.DynamicMessage(dynamic_msg))
return progressbar.ProgressBar(widgets=widgets)
|
class NGramScore(object):
'Base class for BLEU & NIST, providing tokenization and some basic n-gram matching\n functions.'
def __init__(self, max_ngram, case_sensitive):
'Create the scoring object.\n @param max_ngram: the n-gram level to compute the score for\n @param case_sensitive: use case-sensitive matching?\n '
self.max_ngram = max_ngram
self.case_sensitive = case_sensitive
def reset(self):
'Reset the object, zero all counters.'
raise NotImplementedError()
def append(self, pred_sent, ref_sents):
'Add a sentence to the statistics.\n @param pred_sent: system output / predicted sentence\n @param ref_sents: reference sentences\n '
raise NotImplementedError()
def score(self):
'Compute the current score based on sentences added so far.'
raise NotImplementedError()
def ngrams(self, n, sent):
"Given a sentence, return n-grams of nodes for the given N. Lowercases\n everything if the measure should not be case-sensitive.\n\n @param n: n-gram 'N' (1 for unigrams, 2 for bigrams etc.)\n @param sent: the sent in question\n @return: n-grams of nodes, as tuples of tuples (t-lemma & formeme)\n "
if (not self.case_sensitive):
return zip(*[[tok.lower() for tok in sent[i:]] for i in range(n)])
return zip(*[sent[i:] for i in range(n)])
def check_tokenized(self, pred_sent, ref_sents):
'Tokenize the predicted sentence and reference sentences, if they are not tokenized.\n @param pred_sent: system output / predicted sentence\n @param ref_sent: a list of corresponding reference sentences\n @return: a tuple of (pred_sent, ref_sent) where everything is tokenized\n '
pred_sent = (pred_sent if isinstance(pred_sent, list) else self.tokenize(pred_sent))
ref_sents = [(ref_sent if isinstance(ref_sent, list) else self.tokenize(ref_sent)) for ref_sent in ref_sents]
return (pred_sent, ref_sents)
def get_ngram_counts(self, n, sents):
'Returns a dictionary with counts of all n-grams in the given sentences.\n @param n: the "n" in n-grams (how long the n-grams should be)\n @param sents: list of sentences for n-gram counting\n @return: a dictionary (ngram: count) listing counts of n-grams attested in any of the sentences\n '
merged_ngrams = {}
for sent in sents:
ngrams = defaultdict(int)
for ngram in self.ngrams(n, sent):
ngrams[ngram] += 1
for (ngram, cnt) in ngrams.iteritems():
merged_ngrams[ngram] = max((merged_ngrams.get(ngram, 0), cnt))
return merged_ngrams
def tokenize(self, sent):
'This tries to mimic multi-bleu-detok from Moses, and by extension mteval-v13b.\n Code taken directly from there and attempted rewrite into Python.'
sent = re.sub('<skipped>', '', sent)
sent = re.sub('-\\n', '', sent)
sent = re.sub('\\n', ' ', sent)
sent = re.sub('"', '"', sent)
sent = re.sub('&', '&', sent)
sent = re.sub('<', '<', sent)
sent = re.sub('>', '>', sent)
sent = ((' ' + sent) + ' ')
sent = re.sub('([\\{-\\~\\[-\\` -\\&\\(-\\+\\:-\\@\\/])', ' \\1 ', sent)
sent = re.sub('([^0-9])([\\.,])', '\\1 \\2 ', sent)
sent = re.sub('([\\.,])([^0-9])', ' \\1 \\2', sent)
sent = re.sub('([0-9])(-)', '\\1 \\2 ', sent)
sent = re.sub('\\s+', ' ', sent)
sent = sent.strip()
return sent.split(' ')
|
class BLEUScore(NGramScore):
"An accumulator object capable of computing BLEU score using multiple references.\n\n The BLEU score is always smoothed a bit so that it's never undefined. For sentence-level\n measurements, proper smoothing should be used via the smoothing parameter (set to 1.0 for\n the same behavior as default Moses's MERT sentence BLEU).\n "
TINY = 1e-15
SMALL = 1e-09
def __init__(self, max_ngram=4, case_sensitive=False, smoothing=0.0):
'Create the scoring object.\n @param max_ngram: the n-gram level to compute the score for (default: 4)\n @param case_sensitive: use case-sensitive matching (default: no)\n @param smoothing: constant to add for smoothing (defaults to 0.0, sentBLEU uses 1.0)\n '
super(BLEUScore, self).__init__(max_ngram, case_sensitive)
self.smoothing = smoothing
self.reset()
def reset(self):
'Reset the object, zero all counters.'
self.ref_len = 0
self.cand_lens = ([0] * self.max_ngram)
self.hits = ([0] * self.max_ngram)
def append(self, pred_sent, ref_sents):
'Append a sentence for measurements, increase counters.\n\n @param pred_sent: the system output sentence (string/list of tokens)\n @param ref_sents: the corresponding reference sentences (list of strings/lists of tokens)\n '
(pred_sent, ref_sents) = self.check_tokenized(pred_sent, ref_sents)
for i in xrange(self.max_ngram):
self.hits[i] += self.compute_hits((i + 1), pred_sent, ref_sents)
self.cand_lens[i] += (len(pred_sent) - i)
closest_ref = min(ref_sents, key=(lambda ref_sent: (abs((len(ref_sent) - len(pred_sent))), len(ref_sent))))
self.ref_len += len(closest_ref)
def score(self):
'Return the current BLEU score, according to the accumulated counts.'
return self.bleu()
def compute_hits(self, n, pred_sent, ref_sents):
"Compute clipped n-gram hits for the given sentences and the given N\n\n @param n: n-gram 'N' (1 for unigrams, 2 for bigrams etc.)\n @param pred_sent: the system output sentence (tree/tokens)\n @param ref_sents: the corresponding reference sentences (list/tuple of trees/tokens)\n "
merged_ref_ngrams = self.get_ngram_counts(n, ref_sents)
pred_ngrams = self.get_ngram_counts(n, [pred_sent])
hits = 0
for (ngram, cnt) in pred_ngrams.iteritems():
hits += min(merged_ref_ngrams.get(ngram, 0), cnt)
return hits
def bleu(self):
'Return the current BLEU score, according to the accumulated counts.'
bp = 1.0
if (self.cand_lens[0] <= self.ref_len):
bp = math.exp((1.0 - (self.ref_len / (float(self.cand_lens[0]) if self.cand_lens[0] else 1e-05))))
return (bp * self.ngram_precision())
def ngram_precision(self):
'Return the current n-gram precision (harmonic mean of n-gram precisions up to max_ngram)\n according to the accumulated counts.'
prec_log_sum = 0.0
for (n_hits, n_len) in zip(self.hits, self.cand_lens):
n_hits += self.smoothing
n_len += self.smoothing
n_hits = max(n_hits, self.TINY)
n_len = max(n_len, self.SMALL)
prec_log_sum += math.log((n_hits / n_len))
return math.exp(((1.0 / self.max_ngram) * prec_log_sum))
|
class NISTScore(NGramScore):
'An accumulator object capable of computing NIST score using multiple references.'
BETA = ((- math.log(0.5)) / (math.log(1.5) ** 2))
def __init__(self, max_ngram=5, case_sensitive=False):
'Create the scoring object.\n @param max_ngram: the n-gram level to compute the score for (default: 5)\n @param case_sensitive: use case-sensitive matching (default: no)\n '
super(NISTScore, self).__init__(max_ngram, case_sensitive)
self.reset()
def reset(self):
'Reset the object, zero all counters.'
self.ref_ngrams = [defaultdict(int) for _ in xrange((self.max_ngram + 1))]
self.hit_ngrams = [[] for _ in xrange(self.max_ngram)]
self.cand_lens = [[] for _ in xrange(self.max_ngram)]
self.avg_ref_len = 0.0
def append(self, pred_sent, ref_sents):
'Append a sentence for measurements, increase counters.\n\n @param pred_sent: the system output sentence (string/list of tokens)\n @param ref_sents: the corresponding reference sentences (list of strings/lists of tokens)\n '
(pred_sent, ref_sents) = self.check_tokenized(pred_sent, ref_sents)
for n in xrange(self.max_ngram):
self.cand_lens[n].append((len(pred_sent) - n))
merged_ref_ngrams = self.get_ngram_counts((n + 1), ref_sents)
pred_ngrams = self.get_ngram_counts((n + 1), [pred_sent])
hit_ngrams = {}
for ngram in pred_ngrams:
hits = min(pred_ngrams[ngram], merged_ref_ngrams.get(ngram, 0))
if hits:
hit_ngrams[ngram] = hits
self.hit_ngrams[n].append(hit_ngrams)
for ref_sent in ref_sents:
for ngram in self.ngrams((n + 1), ref_sent):
self.ref_ngrams[(n + 1)][ngram] += 1
ref_len_sum = sum((len(ref_sent) for ref_sent in ref_sents))
self.ref_ngrams[0][()] += ref_len_sum
self.avg_ref_len += (ref_len_sum / float(len(ref_sents)))
def score(self):
'Return the current NIST score, according to the accumulated counts.'
return self.nist()
def info(self, ngram):
'Return the NIST informativeness of an n-gram.'
if (ngram not in self.ref_ngrams[len(ngram)]):
return 0.0
return math.log((self.ref_ngrams[(len(ngram) - 1)][ngram[:(- 1)]] / float(self.ref_ngrams[len(ngram)][ngram])), 2)
def nist_length_penalty(self, lsys, avg_lref):
'Compute the NIST length penalty, based on system output length & average reference length.\n @param lsys: total system output length\n @param avg_lref: total average reference length\n @return: NIST length penalty term\n '
ratio = (lsys / float(avg_lref))
if (ratio >= 1):
return 1
if (ratio <= 0):
return 0
return math.exp(((- self.BETA) * (math.log(ratio) ** 2)))
def nist(self):
'Return the current NIST score, according to the accumulated counts.'
hit_infos = [0.0 for _ in xrange(self.max_ngram)]
for n in xrange(self.max_ngram):
for hit_ngrams in self.hit_ngrams[n]:
hit_infos[n] += sum(((self.info(ngram) * hits) for (ngram, hits) in hit_ngrams.iteritems()))
total_lens = [sum(self.cand_lens[n]) for n in xrange(self.max_ngram)]
nist_sum = sum(((hit_info / total_len) for (hit_info, total_len) in zip(hit_infos, total_lens)))
bp = self.nist_length_penalty(sum(self.cand_lens[0]), self.avg_ref_len)
return (bp * nist_sum)
|
class Bleu():
def __init__(self, n=4):
self._n = n
self._hypo_for_image = {}
self.ref_for_image = {}
def compute_score(self, gts, res):
assert (gts.keys() == res.keys())
imgIds = gts.keys()
bleu_scorer = BleuScorer(n=self._n)
for id in imgIds:
hypo = res[id]
ref = gts[id]
assert (type(hypo) is list)
assert (len(hypo) == 1)
assert (type(ref) is list)
assert (len(ref) >= 1)
bleu_scorer += (hypo[0], ref)
(score, scores) = bleu_scorer.compute_score(option='closest', verbose=1)
return (score, scores)
def method(self):
return 'Bleu'
|
class Cider():
'\n Main Class to compute the CIDEr metric \n\n '
def __init__(self, test=None, refs=None, n=4, sigma=6.0):
self._n = n
self._sigma = sigma
def compute_score(self, gts, res):
'\n Main function to compute CIDEr score\n :param hypo_for_image (dict) : dictionary with key <image> and value <tokenized hypothesis / candidate sentence>\n ref_for_image (dict) : dictionary with key <image> and value <tokenized reference sentence>\n :return: cider (float) : computed CIDEr score for the corpus \n '
assert (gts.keys() == res.keys())
imgIds = gts.keys()
cider_scorer = CiderScorer(n=self._n, sigma=self._sigma)
for id in imgIds:
hypo = res[id]
ref = gts[id]
assert (type(hypo) is list)
assert (len(hypo) == 1)
assert (type(ref) is list)
assert (len(ref) > 0)
cider_scorer += (hypo[0], ref)
(score, scores) = cider_scorer.compute_score()
return (score, scores)
def method(self):
return 'CIDEr'
|
def precook(s, n=4, out=False):
'\n Takes a string as input and returns an object that can be given to\n either cook_refs or cook_test. This is optional: cook_refs and cook_test\n can take string arguments as well.\n :param s: string : sentence to be converted into ngrams\n :param n: int : number of ngrams for which representation is calculated\n :return: term frequency vector for occuring ngrams\n '
words = s.split()
counts = defaultdict(int)
for k in xrange(1, (n + 1)):
for i in xrange(((len(words) - k) + 1)):
ngram = tuple(words[i:(i + k)])
counts[ngram] += 1
return counts
|
def cook_refs(refs, n=4):
'Takes a list of reference sentences for a single segment\n and returns an object that encapsulates everything that BLEU\n needs to know about them.\n :param refs: list of string : reference sentences for some image\n :param n: int : number of ngrams for which (ngram) representation is calculated\n :return: result (list of dict)\n '
return [precook(ref, n) for ref in refs]
|
def cook_test(test, n=4):
'Takes a test sentence and returns an object that\n encapsulates everything that BLEU needs to know about it.\n :param test: list of string : hypothesis sentence for some image\n :param n: int : number of ngrams for which (ngram) representation is calculated\n :return: result (dict)\n '
return precook(test, n, True)
|
class CiderScorer(object):
'CIDEr scorer.\n '
def copy(self):
' copy the refs.'
new = CiderScorer(n=self.n)
new.ctest = copy.copy(self.ctest)
new.crefs = copy.copy(self.crefs)
return new
def __init__(self, test=None, refs=None, n=4, sigma=6.0):
' singular instance '
self.n = n
self.sigma = sigma
self.crefs = []
self.ctest = []
self.document_frequency = defaultdict(float)
self.cook_append(test, refs)
self.ref_len = None
def cook_append(self, test, refs):
'called by constructor and __iadd__ to avoid creating new instances.'
if (refs is not None):
self.crefs.append(cook_refs(refs))
if (test is not None):
self.ctest.append(cook_test(test))
else:
self.ctest.append(None)
def size(self):
assert (len(self.crefs) == len(self.ctest)), ('refs/test mismatch! %d<>%d' % (len(self.crefs), len(self.ctest)))
return len(self.crefs)
def __iadd__(self, other):
'add an instance (e.g., from another sentence).'
if (type(other) is tuple):
self.cook_append(other[0], other[1])
else:
self.ctest.extend(other.ctest)
self.crefs.extend(other.crefs)
return self
def compute_doc_freq(self):
'\n Compute term frequency for reference data.\n This will be used to compute idf (inverse document frequency later)\n The term frequency is stored in the object\n :return: None\n '
for refs in self.crefs:
for ngram in set([ngram for ref in refs for (ngram, count) in ref.iteritems()]):
self.document_frequency[ngram] += 1
def compute_cider(self):
def counts2vec(cnts):
'\n Function maps counts of ngram to vector of tfidf weights.\n The function returns vec, an array of dictionary that store mapping of n-gram and tf-idf weights.\n The n-th entry of array denotes length of n-grams.\n :param cnts:\n :return: vec (array of dict), norm (array of float), length (int)\n '
vec = [defaultdict(float) for _ in range(self.n)]
length = 0
norm = [0.0 for _ in range(self.n)]
for (ngram, term_freq) in cnts.iteritems():
df = np.log(max(1.0, self.document_frequency[ngram]))
n = (len(ngram) - 1)
vec[n][ngram] = (float(term_freq) * (self.ref_len - df))
norm[n] += pow(vec[n][ngram], 2)
if (n == 1):
length += term_freq
norm = [np.sqrt(n) for n in norm]
return (vec, norm, length)
def sim(vec_hyp, vec_ref, norm_hyp, norm_ref, length_hyp, length_ref):
'\n Compute the cosine similarity of two vectors.\n :param vec_hyp: array of dictionary for vector corresponding to hypothesis\n :param vec_ref: array of dictionary for vector corresponding to reference\n :param norm_hyp: array of float for vector corresponding to hypothesis\n :param norm_ref: array of float for vector corresponding to reference\n :param length_hyp: int containing length of hypothesis\n :param length_ref: int containing length of reference\n :return: array of score for each n-grams cosine similarity\n '
delta = float((length_hyp - length_ref))
val = np.array([0.0 for _ in range(self.n)])
for n in range(self.n):
for (ngram, count) in vec_hyp[n].iteritems():
val[n] += (min(vec_hyp[n][ngram], vec_ref[n][ngram]) * vec_ref[n][ngram])
if ((norm_hyp[n] != 0) and (norm_ref[n] != 0)):
val[n] /= (norm_hyp[n] * norm_ref[n])
assert (not math.isnan(val[n]))
val[n] *= (np.e ** ((- (delta ** 2)) / (2 * (self.sigma ** 2))))
return val
self.ref_len = np.log(float(len(self.crefs)))
scores = []
for (test, refs) in zip(self.ctest, self.crefs):
(vec, norm, length) = counts2vec(test)
score = np.array([0.0 for _ in range(self.n)])
for ref in refs:
(vec_ref, norm_ref, length_ref) = counts2vec(ref)
score += sim(vec, vec_ref, norm, norm_ref, length, length_ref)
score_avg = np.mean(score)
score_avg /= len(refs)
score_avg *= 10.0
scores.append(score_avg)
return scores
def compute_score(self, option=None, verbose=0):
self.compute_doc_freq()
assert (len(self.ctest) >= max(self.document_frequency.values()))
score = self.compute_cider()
return (np.mean(np.array(score)), np.array(score))
|
class COCOEvalCap():
def __init__(self, coco, cocoRes):
self.evalImgs = []
self.eval = {}
self.imgToEval = {}
self.coco = coco
self.cocoRes = cocoRes
self.params = {'image_id': coco.getImgIds()}
def evaluate(self):
imgIds = self.params['image_id']
gts = {}
res = {}
for imgId in imgIds:
gts[imgId] = self.coco.imgToAnns[imgId]
res[imgId] = self.cocoRes.imgToAnns[imgId]
((print >> sys.stderr), 'tokenization...')
tokenizer = PTBTokenizer()
gts = tokenizer.tokenize(gts)
res = tokenizer.tokenize(res)
((print >> sys.stderr), 'setting up scorers...')
scorers = [(Meteor(), 'METEOR'), (Rouge(), 'ROUGE_L'), (Cider(), 'CIDEr')]
for (scorer, method) in scorers:
((print >> sys.stderr), ('computing %s score...' % scorer.method()))
(score, scores) = scorer.compute_score(gts, res)
if (type(method) == list):
for (sc, scs, m) in zip(score, scores, method):
self.setEval(sc, m)
self.setImgToEvalImgs(scs, gts.keys(), m)
((print >> sys.stderr), ('%s: %0.3f' % (m, sc)))
else:
self.setEval(score, method)
self.setImgToEvalImgs(scores, gts.keys(), method)
((print >> sys.stderr), ('%s: %0.3f' % (method, score)))
self.setEvalImgs()
def setEval(self, score, method):
self.eval[method] = score
def setImgToEvalImgs(self, scores, imgIds, method):
for (imgId, score) in zip(imgIds, scores):
if (not (imgId in self.imgToEval)):
self.imgToEval[imgId] = {}
self.imgToEval[imgId]['image_id'] = imgId
self.imgToEval[imgId][method] = score
def setEvalImgs(self):
self.evalImgs = [eval for (imgId, eval) in self.imgToEval.items()]
|
class Meteor():
def __init__(self):
self.meteor_cmd = ['java', '-jar', '-Xmx2G', METEOR_JAR, '-', '-', '-stdio', '-l', 'en', '-norm']
self.meteor_p = subprocess.Popen(self.meteor_cmd, cwd=os.path.dirname(os.path.abspath(__file__)), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.lock = threading.Lock()
def compute_score(self, gts, res):
assert (gts.keys() == res.keys())
imgIds = gts.keys()
scores = []
eval_line = 'EVAL'
self.lock.acquire()
for i in imgIds:
assert (len(res[i]) == 1)
stat = self._stat(res[i][0], gts[i])
eval_line += ' ||| {}'.format(stat)
self.meteor_p.stdin.write('{}\n'.format(eval_line))
for i in range(0, len(imgIds)):
scores.append(float(self.meteor_p.stdout.readline().strip()))
score = float(self.meteor_p.stdout.readline().strip())
self.lock.release()
return (score, scores)
def method(self):
return 'METEOR'
def _stat(self, hypothesis_str, reference_list):
hypothesis_str = hypothesis_str.replace('|||', '').replace(' ', ' ')
score_line = ' ||| '.join(('SCORE', ' ||| '.join(reference_list), hypothesis_str))
self.meteor_p.stdin.write('{}\n'.format(score_line))
return self.meteor_p.stdout.readline().strip()
def _score(self, hypothesis_str, reference_list):
self.lock.acquire()
hypothesis_str = hypothesis_str.replace('|||', '').replace(' ', ' ')
score_line = ' ||| '.join(('SCORE', ' ||| '.join(reference_list), hypothesis_str))
self.meteor_p.stdin.write('{}\n'.format(score_line))
stats = self.meteor_p.stdout.readline().strip()
eval_line = 'EVAL ||| {}'.format(stats)
self.meteor_p.stdin.write('{}\n'.format(eval_line))
score = float(self.meteor_p.stdout.readline().strip())
score = float(self.meteor_p.stdout.readline().strip())
self.lock.release()
return score
def __del__(self):
self.lock.acquire()
self.meteor_p.stdin.close()
self.meteor_p.kill()
self.meteor_p.wait()
self.lock.release()
|
def my_lcs(string, sub):
'\n Calculates longest common subsequence for a pair of tokenized strings\n :param string : list of str : tokens from a string split using whitespace\n :param sub : list of str : shorter string, also split using whitespace\n :returns: length (list of int): length of the longest common subsequence between the two strings\n\n Note: my_lcs only gives length of the longest common subsequence, not the actual LCS\n '
if (len(string) < len(sub)):
(sub, string) = (string, sub)
lengths = [[0 for i in range(0, (len(sub) + 1))] for j in range(0, (len(string) + 1))]
for j in range(1, (len(sub) + 1)):
for i in range(1, (len(string) + 1)):
if (string[(i - 1)] == sub[(j - 1)]):
lengths[i][j] = (lengths[(i - 1)][(j - 1)] + 1)
else:
lengths[i][j] = max(lengths[(i - 1)][j], lengths[i][(j - 1)])
return lengths[len(string)][len(sub)]
|
class Rouge():
'\n Class for computing ROUGE-L score for a set of candidate sentences for the MS COCO test set\n\n '
def __init__(self):
self.beta = 1.2
def calc_score(self, candidate, refs):
'\n Compute ROUGE-L score given one candidate and references for an image\n :param candidate: str : candidate sentence to be evaluated\n :param refs: list of str : COCO reference sentences for the particular image to be evaluated\n :returns score: int (ROUGE-L score for the candidate evaluated against references)\n '
assert (len(candidate) == 1)
assert (len(refs) > 0)
prec = []
rec = []
token_c = candidate[0].split(' ')
for reference in refs:
token_r = reference.split(' ')
lcs = my_lcs(token_r, token_c)
prec.append((lcs / float(len(token_c))))
rec.append((lcs / float(len(token_r))))
prec_max = max(prec)
rec_max = max(rec)
if ((prec_max != 0) and (rec_max != 0)):
score = ((((1 + (self.beta ** 2)) * prec_max) * rec_max) / float((rec_max + ((self.beta ** 2) * prec_max))))
else:
score = 0.0
return score
def compute_score(self, gts, res):
'\n Computes Rouge-L score given a set of reference and candidate sentences for the dataset\n Invoked by evaluate_captions.py \n :param hypo_for_image: dict : candidate / test sentences with "image name" key and "tokenized sentences" as values \n :param ref_for_image: dict : reference MS-COCO sentences with "image name" key and "tokenized sentences" as values\n :returns: average_score: float (mean ROUGE-L score computed by averaging scores for all the images)\n '
assert (gts.keys() == res.keys())
imgIds = gts.keys()
score = []
for id in imgIds:
hypo = res[id]
ref = gts[id]
score.append(self.calc_score(hypo, ref))
assert (type(hypo) is list)
assert (len(hypo) == 1)
assert (type(ref) is list)
assert (len(ref) > 0)
average_score = np.mean(np.array(score))
return (average_score, np.array(score))
def method(self):
return 'Rouge'
|
class PTBTokenizer():
'Python wrapper of Stanford PTBTokenizer'
def tokenize(self, captions_for_image):
cmd = ['java', '-cp', STANFORD_CORENLP_3_4_1_JAR, 'edu.stanford.nlp.process.PTBTokenizer', '-preserveLines', '-lowerCase']
final_tokenized_captions_for_image = {}
image_id = [k for (k, v) in captions_for_image.items() for _ in range(len(v))]
sentences = '\n'.join([c['caption'].replace('\n', ' ') for (k, v) in captions_for_image.items() for c in v])
path_to_jar_dirname = os.path.dirname(os.path.abspath(__file__))
tmp_file = tempfile.NamedTemporaryFile(delete=False, dir=path_to_jar_dirname)
tmp_file.write(sentences.encode('UTF-8'))
tmp_file.close()
cmd.append(os.path.basename(tmp_file.name))
p_tokenizer = subprocess.Popen(cmd, cwd=path_to_jar_dirname, stdout=subprocess.PIPE)
token_lines = p_tokenizer.communicate(input=sentences.rstrip())[0]
lines = token_lines.split('\n')
os.remove(tmp_file.name)
for (k, line) in zip(image_id, lines):
if (not (k in final_tokenized_captions_for_image)):
final_tokenized_captions_for_image[k] = []
tokenized_caption = ' '.join([w for w in line.rstrip().split(' ') if (w not in PUNCTUATIONS)])
final_tokenized_captions_for_image[k].append(tokenized_caption)
return final_tokenized_captions_for_image
|
class AttentionWeightedAverage(Layer):
'\n Computes a weighted average of the different channels across timesteps.\n Uses 1 parameter pr. channel to compute the attention value for a single timestep.\n '
def __init__(self, return_attention=False, **kwargs):
self.init = initializers.get('uniform')
self.supports_masking = True
self.return_attention = return_attention
super(AttentionWeightedAverage, self).__init__(**kwargs)
def build(self, input_shape):
self.input_spec = [InputSpec(ndim=3)]
assert (len(input_shape) == 3)
self.W = self.add_weight(shape=(input_shape[2], 1), name='{}_W'.format(self.name), initializer=self.init)
self.trainable_weights = [self.W]
super(AttentionWeightedAverage, self).build(input_shape)
def call(self, x, mask=None):
logits = K.dot(x, self.W)
x_shape = K.shape(x)
logits = K.reshape(logits, (x_shape[0], x_shape[1]))
ai = K.exp((logits - K.max(logits, axis=(- 1), keepdims=True)))
if (mask is not None):
mask = K.cast(mask, K.floatx())
ai = (ai * mask)
att_weights = (ai / (K.sum(ai, axis=1, keepdims=True) + K.epsilon()))
weighted_input = (x * K.expand_dims(att_weights))
result = K.sum(weighted_input, axis=1)
if self.return_attention:
return [result, att_weights]
return result
def get_output_shape_for(self, input_shape):
return self.compute_output_shape(input_shape)
def compute_output_shape(self, input_shape):
output_len = input_shape[2]
if self.return_attention:
return [(input_shape[0], output_len), (input_shape[0], input_shape[1])]
return (input_shape[0], output_len)
def compute_mask(self, input, input_mask=None):
if isinstance(input_mask, list):
return ([None] * len(input_mask))
else:
return None
|
def finetuning_callbacks(checkpoint_path, patience, verbose):
' Callbacks for model training.\n # Arguments:\n checkpoint_path: Where weight checkpoints should be saved.\n patience: Number of epochs with no improvement after which\n training will be stopped.\n # Returns:\n Array with training callbacks that can be passed straight into\n model.fit() or similar.\n '
cb_verbose = (verbose >= 2)
checkpointer = ModelCheckpoint(monitor='val_loss', filepath=checkpoint_path, save_best_only=True, verbose=cb_verbose)
earlystop = EarlyStopping(monitor='val_loss', patience=patience, verbose=cb_verbose)
return [checkpointer, earlystop]
|
def reconstructor(w2v_dim, pretrain_w2v, nb_tokens, max_src_len, mr_value_num, MR_FIELDS, ra=False, embed_l2=1e-06):
model_input = Input(shape=(max_src_len,), dtype='int32')
embed_reg = (L1L2(l2=embed_l2) if (embed_l2 != 0) else None)
if (pretrain_w2v is None):
embed = Embedding(input_dim=nb_tokens, output_dim=w2v_dim, mask_zero=True, input_length=max_src_len, embeddings_regularizer=embed_reg)
else:
embed = Embedding(input_dim=nb_tokens, output_dim=w2v_dim, mask_zero=True, input_length=max_src_len, weights=[pretrain_w2v], embeddings_regularizer=embed_reg, trainable=True)
embed_x = embed(model_input)
context_emb = Bidirectional(LSTM(256, return_sequences=True, dropout=0.25))(embed_x)
outputs = []
for mr_model in MR_FIELDS:
x = AttentionWeightedAverage(name=('attlayer_%s' % mr_model.split()[0]), return_attention=ra)(context_emb)
outputs.append(Dense(mr_value_num[mr_model], activation='softmax', name=('softmax_%s' % mr_model.split()[0]))(x))
return Model(inputs=[model_input], outputs=outputs, name='reconstruct')
|
class AttentionWeightedAverage(Layer):
'\n Computes a weighted average of the different channels across timesteps.\n Uses 1 parameter pr. channel to compute the attention value for a single timestep.\n '
def __init__(self, return_attention=False, **kwargs):
self.init = initializers.get('uniform')
self.supports_masking = True
self.return_attention = return_attention
super(AttentionWeightedAverage, self).__init__(**kwargs)
def build(self, input_shape):
self.input_spec = [InputSpec(ndim=3)]
assert (len(input_shape) == 3)
self.W = self.add_weight(shape=(input_shape[2], 1), name='{}_W'.format(self.name), initializer=self.init)
self.trainable_weights = [self.W]
super(AttentionWeightedAverage, self).build(input_shape)
def call(self, x, mask=None):
logits = K.dot(x, self.W)
x_shape = K.shape(x)
logits = K.reshape(logits, (x_shape[0], x_shape[1]))
ai = K.exp((logits - K.max(logits, axis=(- 1), keepdims=True)))
if (mask is not None):
mask = K.cast(mask, K.floatx())
ai = (ai * mask)
att_weights = (ai / (K.sum(ai, axis=1, keepdims=True) + K.epsilon()))
weighted_input = (x * K.expand_dims(att_weights))
result = K.sum(weighted_input, axis=1)
if self.return_attention:
return [result, att_weights]
return result
def get_output_shape_for(self, input_shape):
return self.compute_output_shape(input_shape)
def compute_output_shape(self, input_shape):
output_len = input_shape[2]
if self.return_attention:
return [(input_shape[0], output_len), (input_shape[0], input_shape[1])]
return (input_shape[0], output_len)
def compute_mask(self, input, input_mask=None):
if isinstance(input_mask, list):
return ([None] * len(input_mask))
else:
return None
|
def finetuning_callbacks(checkpoint_path, patience, verbose):
' Callbacks for model training.\n # Arguments:\n checkpoint_path: Where weight checkpoints should be saved.\n patience: Number of epochs with no improvement after which\n training will be stopped.\n # Returns:\n Array with training callbacks that can be passed straight into\n model.fit() or similar.\n '
cb_verbose = (verbose >= 2)
checkpointer = ModelCheckpoint(monitor='val_loss', filepath=checkpoint_path, save_best_only=True, verbose=cb_verbose)
earlystop = EarlyStopping(monitor='val_loss', patience=patience, verbose=cb_verbose)
return [checkpointer, earlystop]
|
def reconstructor(w2v_dim, pretrain_w2v, nb_tokens, max_src_len, mr_value_num, MR_FIELDS, ra=False, embed_l2=1e-06):
model_input = Input(shape=(max_src_len,), dtype='int32')
embed_reg = (L1L2(l2=embed_l2) if (embed_l2 != 0) else None)
if (pretrain_w2v is None):
embed = Embedding(input_dim=nb_tokens, output_dim=w2v_dim, mask_zero=True, input_length=max_src_len, embeddings_regularizer=embed_reg)
else:
embed = Embedding(input_dim=nb_tokens, output_dim=w2v_dim, mask_zero=True, input_length=max_src_len, weights=[pretrain_w2v], embeddings_regularizer=embed_reg, trainable=True)
embed_x = embed(model_input)
context_emb = Bidirectional(LSTM(256, return_sequences=True, dropout=0.25))(embed_x)
outputs = []
for mr_model in MR_FIELDS:
x = AttentionWeightedAverage(name=('attlayer_%s' % mr_model.split()[0]), return_attention=ra)(context_emb)
outputs.append(Dense(mr_value_num[mr_model], activation='softmax', name=('softmax_%s' % mr_model.split()[0]))(x))
return Model(inputs=[model_input], outputs=outputs, name='reconstruct')
|
def minmax(a):
return ((a - min(a)) / (1.0 * (max(a) - min(a))))
|
def reranker(fw_weight, bw_weight):
global output_name, input_name
with open(input_name, 'r') as stream, open(('rerank/%s_%.2f' % (output_name, fw_weight)), 'w') as out:
for line in stream:
data = line.strip().split('\t')
texts = json.loads(data[0])
fw_score = np.array(json.loads(data[1]))
bw_score = (- np.array(json.loads(data[2])))
(fw_score_norm, bw_score_norm) = (minmax(fw_score), minmax(bw_score))
total_score = ((fw_score_norm * fw_weight) + (bw_score_norm * bw_weight))
select_id = np.argmax(total_score)
out.write(('%s\n' % texts[select_id]))
|
def tidy_total(input_name):
global beam_size
import pickle as pk
dev_recs_loss = pk.load(open('dev_recs_loss.pkl', 'rb'))
with open(input_name, 'r') as stream, open('devset.recs.full.txt', 'w') as stream_1:
for (idx, line) in enumerate(stream):
data = line.strip().split('\t')
bw_loss = dev_recs_loss[(idx * beam_size):((idx + 1) * beam_size)]
stream_1.write(('%s\t%s\t%s\n' % (data[0], data[1], json.dumps(bw_loss))))
|
def pad_snt(snt_ids_trunc, max_len):
snt_ids_trunc_pad = (snt_ids_trunc + ([PAD_ID] * (max_len - len(snt_ids_trunc))))
return snt_ids_trunc_pad
|
def parse_data_recs(mr_value_vocab2id, mr_value_num, data_process):
global MR_FIELDS, id2tok
(data_new, it_num, fa_num) = ([[], []], 0, 0)
(data_new_x, data_new_y) = ([], [[] for i in range(len(MR_FIELDS))])
print(len(data_process[0]), len(data_process[1]))
for (data_src, data_tgt) in zip(data_process[0], data_process[1]):
data_new_tmp = []
data_tgt = pad_snt(data_tgt, data_params['max_tgt_len'])
for (mr_id, mr_value_vocab) in enumerate(data_src):
mr_cur_class = np.zeros(mr_value_num[MR_FIELDS[mr_id]])
mr_cur_class[mr_value_vocab2id[MR_FIELDS[mr_id]][mr_value_vocab]] = 1
data_new_y[mr_id].append(mr_value_vocab2id[MR_FIELDS[mr_id]][mr_value_vocab])
try:
assert ((len(data_tgt) == data_params['max_tgt_len']) and (32 in data_tgt))
except:
fa_num += 1
data_new_y[0][(- 1)] = 1
data_new_x.append(data_tgt)
it_num += 1
data_new_y = [np.array(x) for x in data_new_y]
print('No special Name or Near', ((fa_num * 100.0) / it_num))
return [data_new_x, data_new_y]
|
def tidy_recs():
global data, MR_FIELDS
all_input = ((data.dev[0] + data.train[0]) + data.test[0])
all_input = set([tuple(x) for x in all_input])
print(len(all_input), len(data.lexicalizations['train']), len(data.lexicalizations['test']), len(data.lexicalizations['dev']))
lexical = ((data.lexicalizations['train'] + data.lexicalizations['test']) + data.lexicalizations['dev'])
mr_value = {x: [] for x in MR_FIELDS}
for _ in all_input:
for (mr_id, mr_f) in enumerate(_):
mr_value[MR_FIELDS[mr_id]].append(mr_f)
(mr_value_id2vocab, mr_value_vocab2id, mr_value_num) = ({x: {} for x in MR_FIELDS}, {x: {} for x in MR_FIELDS}, {x: {} for x in MR_FIELDS})
for mr_f in mr_value:
c = Counter(mr_value[mr_f]).most_common()
print(c)
mr_value_num[mr_f] = len(c)
for (c_idx, (c_v, _)) in enumerate(c):
mr_value_id2vocab[mr_f][c_idx] = c_v
mr_value_vocab2id[mr_f][c_v] = c_idx
pk.dump([mr_value_id2vocab, mr_value_vocab2id, mr_value_num], open('recs/mr_value.pkl', 'wb'))
data_new_train = parse_data_recs(mr_value_vocab2id, mr_value_num, data.train)
data_new_val = parse_data_recs(mr_value_vocab2id, mr_value_num, data.dev)
pk.dump([data_new_train, data.lexicalizations['train']], open('recs/train.pkl', 'wb'))
pk.dump([data_new_val, data.lexicalizations['dev']], open('recs/dev.pkl', 'wb'))
|
def w2v(dim):
global data
import gensim
sentences = (data.dev[1] + data.train[1])
sentences = [[str(x) for x in s] for s in sentences]
model = gensim.models.Word2Vec(size=dim, min_count=0, workers=16, sg=1)
model.build_vocab(sentences)
model.train(sentences, total_examples=model.corpus_count, epochs=model.iter)
model.wv.save_word2vec_format(join('recs/', 'word2vec.{}d.{}k.w2v'.format(dim, (len(model.wv.vocab) // 1000))))
|
def pre_w2v(dim):
global id2tok
w2v_pre = {}
with open('recs/word2vec.{}d.3k.w2v'.format(dim), encoding='utf8') as stream:
for (idx, line) in enumerate(stream):
if (idx == 0):
continue
split = line.rstrip().split(' ')
word = int(split[0])
vector = np.array([float(num) for num in split[1:]])
w2v_pre[word] = vector
p = sorted(w2v_pre.keys())
embeds = [np.zeros(dim), np.random.uniform((- 0.25), 0.25, dim), np.random.uniform((- 0.25), 0.25, dim), np.random.uniform((- 0.25), 0.25, dim)]
for idx in range(4, len(id2tok)):
embeds.append(w2v_pre.get(idx, np.random.uniform((- 0.25), 0.25, dim)))
pk.dump(embeds, open('recs/w2v.{}d.pkl'.format(dim), 'wb'))
|
def run(config_dict):
data_module = config_dict['data-module']
model_module = config_dict['model-module']
training_module = config_dict['training-module']
evaluation_module = config_dict.get('evaluation-module', None)
mode = config_dict['mode']
DataClass = importlib.import_module(data_module).component
ModelClass = importlib.import_module(model_module).component
TrainingClass = importlib.import_module(training_module).component
EvaluationClass = (importlib.import_module(evaluation_module).component if evaluation_module else None)
model_dirname = make_model_dir(config_dict)
logger = set_logger(config_dict['log_level'], os.path.join(model_dirname, 'log.txt'))
data = DataClass(config_dict['data_params'])
data.setup()
fix_seed(config_d['random_seed'])
model = ModelClass(config_dict['model_params'])
print('build model done')
model.setup(data)
print('setup data done')
if (mode == 'train'):
training_params = config_dict['training_params']
trainer = TrainingClass(training_params)
trainer.training_start(model, data)
save_config(config_dict, os.path.join(model_dirname, 'config.json'))
elif (mode == 'predict'):
assert (evaluation_module is not None), 'No evaluation module -- check config file!'
evaluator = EvaluationClass(config_dict)
model_fname = config_dict['model_fn']
load_model(model, model_fname)
id2word = data.vocab.id2tok
if ('dev' in data.fnames):
logger.info('Predicting on dev data')
(predicted_ids, attention_weights) = evaluator.evaluate_model(model, data.dev[0])
data_lexicalizations = data.lexicalizations['dev']
predicted_snts = evaluator.lexicalize_predictions(predicted_ids, data_lexicalizations, id2word)
save_predictions_txt(predicted_snts, ('%s.devset.predictions.txt' % model_fname))
if ('test' in data.fnames):
logger.info('Predicting on test data')
(predicted_ids, attention_weights) = evaluator.evaluate_model(model, data.test[0])
data_lexicalizations = data.lexicalizations['test']
predicted_snts = evaluator.lexicalize_predictions(predicted_ids, data_lexicalizations, id2word)
save_predictions_txt(predicted_snts, ('%s.testset.predictions.txt' % model_fname))
else:
logger.warning(("Check the 'mode' field in the config file: %s" % mode))
logger.info('DONE')
|
def save_beam_fw(fw_probs, decode_snts, beam_size, filename):
with open(filename, 'w') as outstream:
(pp, pp_u) = ([], set())
for dec_idx in range(len(decode_snts[0])):
(dec_cur, fw_cur) = ([], [])
for beam_idx in range(beam_size):
tmp_cur = ' '.join(decode_snts[beam_idx][dec_idx])
(pp.append(tmp_cur), pp_u.add(tmp_cur))
dec_cur.append(tmp_cur)
fw_cur.append(fw_probs[beam_idx][dec_idx])
outstream.write(('%s\t%s\n' % (json.dumps(dec_cur), json.dumps(fw_cur))))
print('unique ratio:', ((len(list(pp_u)) * 100.0) / len(pp)))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.