code stringlengths 17 6.64M |
|---|
def spec_frontend(x, is_training, config, num_filt):
"Function implementing the proposed spectrogram front-end.\n\n - 'route_out': is the output of the front-end, and therefore the input of\n this function.\n - 'is_training': placeholder indicating weather it is training or test\n phase, for dropout or batch norm.\n - 'config': dictionary with some configurable parameters like: number of\n output units - config['numOutputNeurons'] or number of frequency bins\n of the spectrogram config['setup_params']['yInput']\n - 'num_filt': multiplicative factor that controls the number of filters\n for every filter shape.\n "
initializer = tf.contrib.layers.variance_scaling_initializer()
y_input = config['setup_params']['yInput']
input_layer = tf.expand_dims(x, 3)
input_pad_7 = tf.pad(input_layer, [[0, 0], [3, 3], [0, 0], [0, 0]], 'CONSTANT')
input_pad_3 = tf.pad(input_layer, [[0, 0], [1, 1], [0, 0], [0, 0]], 'CONSTANT')
conv1 = tf.layers.conv2d(inputs=input_pad_7, filters=num_filt, kernel_size=[7, int((0.9 * y_input))], padding='valid', activation=tf.nn.relu, kernel_initializer=initializer)
bn_conv1 = tf.layers.batch_normalization(conv1, training=is_training)
pool1 = tf.layers.max_pooling2d(inputs=bn_conv1, pool_size=[1, bn_conv1.shape[2]], strides=[1, bn_conv1.shape[2]])
p1 = tf.squeeze(pool1, [2])
conv2 = tf.layers.conv2d(inputs=input_pad_3, filters=(num_filt * 2), kernel_size=[3, int((0.9 * y_input))], padding='valid', activation=tf.nn.relu, kernel_initializer=initializer)
bn_conv2 = tf.layers.batch_normalization(conv2, training=is_training)
pool2 = tf.layers.max_pooling2d(inputs=bn_conv2, pool_size=[1, bn_conv2.shape[2]], strides=[1, bn_conv2.shape[2]])
p2 = tf.squeeze(pool2, [2])
conv3 = tf.layers.conv2d(inputs=input_layer, filters=(num_filt * 4), kernel_size=[1, int((0.9 * y_input))], padding='valid', activation=tf.nn.relu, kernel_initializer=initializer)
bn_conv3 = tf.layers.batch_normalization(conv3, training=is_training)
pool3 = tf.layers.max_pooling2d(inputs=bn_conv3, pool_size=[1, bn_conv3.shape[2]], strides=[1, bn_conv3.shape[2]])
p3 = tf.squeeze(pool3, [2])
conv4 = tf.layers.conv2d(inputs=input_pad_7, filters=num_filt, kernel_size=[7, int((0.4 * y_input))], padding='valid', activation=tf.nn.relu, kernel_initializer=initializer)
bn_conv4 = tf.layers.batch_normalization(conv4, training=is_training)
pool4 = tf.layers.max_pooling2d(inputs=bn_conv4, pool_size=[1, bn_conv4.shape[2]], strides=[1, bn_conv4.shape[2]])
p4 = tf.squeeze(pool4, [2])
conv5 = tf.layers.conv2d(inputs=input_pad_3, filters=(num_filt * 2), kernel_size=[3, int((0.4 * y_input))], padding='valid', activation=tf.nn.relu, kernel_initializer=initializer)
bn_conv5 = tf.layers.batch_normalization(conv5, training=is_training)
pool5 = tf.layers.max_pooling2d(inputs=bn_conv5, pool_size=[1, bn_conv5.shape[2]], strides=[1, bn_conv5.shape[2]])
p5 = tf.squeeze(pool5, [2])
conv6 = tf.layers.conv2d(inputs=input_layer, filters=(num_filt * 4), kernel_size=[1, int((0.4 * y_input))], padding='valid', activation=tf.nn.relu, kernel_initializer=initializer)
bn_conv6 = tf.layers.batch_normalization(conv6, training=is_training)
pool6 = tf.layers.max_pooling2d(inputs=bn_conv6, pool_size=[1, bn_conv6.shape[2]], strides=[1, bn_conv6.shape[2]])
p6 = tf.squeeze(pool6, [2])
pool7 = tf.layers.average_pooling2d(inputs=input_layer, pool_size=[1, y_input], strides=[1, y_input])
pool7_rs = tf.squeeze(pool7, [3])
conv7 = tf.layers.conv1d(inputs=pool7_rs, filters=num_filt, kernel_size=165, padding='same', activation=tf.nn.relu, kernel_initializer=initializer)
bn_conv7 = tf.layers.batch_normalization(conv7, training=is_training)
pool8 = tf.layers.average_pooling2d(inputs=input_layer, pool_size=[1, y_input], strides=[1, y_input])
pool8_rs = tf.squeeze(pool8, [3])
conv8 = tf.layers.conv1d(inputs=pool8_rs, filters=(num_filt * 2), kernel_size=128, padding='same', activation=tf.nn.relu, kernel_initializer=initializer)
bn_conv8 = tf.layers.batch_normalization(conv8, training=is_training)
pool9 = tf.layers.average_pooling2d(inputs=input_layer, pool_size=[1, y_input], strides=[1, y_input])
pool9_rs = tf.squeeze(pool9, [3])
conv9 = tf.layers.conv1d(inputs=pool9_rs, filters=(num_filt * 4), kernel_size=64, padding='same', activation=tf.nn.relu, kernel_initializer=initializer)
bn_conv9 = tf.layers.batch_normalization(conv9, training=is_training)
pool10 = tf.layers.average_pooling2d(inputs=input_layer, pool_size=[1, y_input], strides=[1, y_input])
pool10_rs = tf.squeeze(pool10, [3])
conv10 = tf.layers.conv1d(inputs=pool10_rs, filters=(num_filt * 8), kernel_size=32, padding='same', activation=tf.nn.relu, kernel_initializer=initializer)
bn_conv10 = tf.layers.batch_normalization(conv10, training=is_training)
pool = tf.concat([p1, p2, p3, p4, p5, p6, bn_conv7, bn_conv8, bn_conv9, bn_conv10], 2)
return tf.expand_dims(pool, 3)
|
def backend(route_out, is_training, config, num_units):
"Function implementing the proposed back-end.\n\n - 'route_out': is the output of the front-end, and therefore the input of\n this function.\n - 'is_training': placeholder indicating weather it is training or test\n phase, for dropout or batch norm.\n - 'config': dictionary with some configurable parameters like: number of\n output units - config['numOutputNeurons'] or number of frequency bins\n of the spectrogram config['setup_params']['yInput']\n - 'num_units': number of units/neurons of the output dense layer.\n "
initializer = tf.contrib.layers.variance_scaling_initializer()
conv1 = tf.layers.conv2d(inputs=route_out, filters=512, kernel_size=[7, route_out.shape[2]], padding='valid', activation=tf.nn.relu, name='1cnnOut', kernel_initializer=initializer)
bn_conv1 = tf.layers.batch_normalization(conv1, training=is_training)
bn_conv1_t = tf.transpose(bn_conv1, [0, 1, 3, 2])
bn_conv1_pad = tf.pad(bn_conv1_t, [[0, 0], [3, 3], [0, 0], [0, 0]], 'CONSTANT')
conv2 = tf.layers.conv2d(inputs=bn_conv1_pad, filters=512, kernel_size=[7, bn_conv1_pad.shape[2]], padding='valid', activation=tf.nn.relu, name='2cnnOut', kernel_initializer=initializer)
conv2_t = tf.transpose(conv2, [0, 1, 3, 2])
bn_conv2 = tf.layers.batch_normalization(conv2_t, training=is_training)
res_conv2 = tf.add(bn_conv2, bn_conv1_t)
pool1 = tf.layers.max_pooling2d(inputs=res_conv2, pool_size=[2, 1], strides=[2, 1], name='poolOut')
bn_conv4_pad = tf.pad(pool1, [[0, 0], [3, 3], [0, 0], [0, 0]], 'CONSTANT')
conv5 = tf.layers.conv2d(inputs=bn_conv4_pad, filters=512, kernel_size=[7, bn_conv4_pad.shape[2]], padding='valid', activation=tf.nn.relu, name='3cnnOut', kernel_initializer=initializer)
conv5_t = tf.transpose(conv5, [0, 1, 3, 2])
bn_conv5 = tf.layers.batch_normalization(conv5_t, training=is_training)
res_conv5 = tf.add(bn_conv5, pool1)
max_pool2 = tf.reduce_max(res_conv5, axis=1)
(avg_pool2, var_pool2) = tf.nn.moments(res_conv5, axes=[1])
pool2 = tf.concat([max_pool2, avg_pool2], 2)
flat_pool2 = tf.contrib.layers.flatten(pool2)
flat_pool2_dropout = tf.layers.dropout(flat_pool2, rate=0.5, training=is_training)
dense = tf.layers.dense(inputs=flat_pool2_dropout, units=num_units, activation=tf.nn.relu, kernel_initializer=initializer)
bn_dense = tf.layers.batch_normalization(dense, training=is_training)
dense_dropout = tf.layers.dropout(bn_dense, rate=0.5, training=is_training)
return tf.layers.dense(inputs=dense_dropout, activation=tf.sigmoid, units=config['numOutputNeurons'], kernel_initializer=initializer)
|
def build_model(x, is_training, config):
"Function implementing an example of how to build a model with the\n functions above.\n\n - 'x': placeholder whith the input.\n - 'is_training': placeholder indicating weather it is training or test\n phase, for dropout or batch norm.\n - 'config': dictionary with some configurable parameters like: number of\n output units - config['numOutputNeurons'] or number of frequency bins\n of the spectrogram config['setup_params']['yInput']\n "
return backend(spec_frontend(x, is_training, config, 16), is_training, config, 500)
|
class CustomInstall(install):
def run(self):
install.run(self)
os.system('pip3 install -r requirements.txt --ignore-installed')
os.system('pip3 uninstall transformers -y')
os.system('pip install git+https://github.com/jordiclive/transformers.git@controlprefixes --ignore-installed')
os.system('pip3 install torchtext==0.8.0 torch==1.7.1')
|
def count_trainable_parameters(model):
model_parameters = filter((lambda p: p.requires_grad), model.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
return params
|
class Seq2SeqLoggingCallback(pl.Callback):
def on_batch_end(self, trainer, pl_module):
lrs = {f'lr_group_{i}': param['lr'] for (i, param) in enumerate(pl_module.trainer.optimizers[0].param_groups)}
pl_module.logger.log_metrics(lrs)
@rank_zero_only
def _write_logs(self, trainer: pl.Trainer, pl_module: pl.LightningModule, type_path: str, save_generations=True) -> None:
logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****')
metrics = trainer.callback_metrics
trainer.logger.log_metrics({k: v for (k, v) in metrics.items() if (k not in ['log', 'progress_bar', 'preds'])})
od = Path(pl_module.hparams.output_dir)
if (type_path == 'test'):
results_file = (od / 'test_results.txt')
generations_file = (od / 'test_generations.txt')
else:
results_file = (od / f'{type_path}_results/{trainer.global_step:05d}.txt')
generations_file = (od / f'{type_path}_generations/{trainer.global_step:05d}.txt')
results_file.parent.mkdir(exist_ok=True)
generations_file.parent.mkdir(exist_ok=True)
with open(results_file, 'a+') as writer:
for key in sorted(metrics):
if (key in ['log', 'progress_bar', 'preds']):
continue
val = metrics[key]
if isinstance(val, torch.Tensor):
val = val.item()
msg = f'''{key}: {val:.6f}
'''
writer.write(msg)
if (not save_generations):
return
if ('preds' in metrics):
content = '\n'.join(metrics['preds'])
generations_file.open('w+').write(content)
@rank_zero_only
def on_train_start(self, trainer, pl_module):
try:
npars = pl_module.model.model.num_parameters()
except AttributeError:
npars = pl_module.model.num_parameters()
n_trainable_pars = count_trainable_parameters(pl_module)
trainer.logger.log_metrics({'n_params': npars, 'mp': (npars / 1000000.0), 'grad_mp': (n_trainable_pars / 1000000.0)})
@rank_zero_only
def on_test_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule):
save_json(pl_module.metrics, pl_module.metrics_save_path)
return self._write_logs(trainer, pl_module, 'test')
@rank_zero_only
def on_validation_end(self, trainer: pl.Trainer, pl_module):
save_json(pl_module.metrics, pl_module.metrics_save_path)
|
def bespoke_scheduler(optimizer, num_warmup_steps, num_training_steps, last_epoch=(- 1)):
'\n Create a schedule with a learning rate that decreases linearly from the initial lr set in the optimizer to 0, after\n a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer.\n\n Args:\n optimizer (:class:`~torch.optim.Optimizer`):\n The optimizer for which to schedule the learning rate.\n num_warmup_steps (:obj:`int`):\n The number of steps for the warmup phase.\n num_training_steps (:obj:`int`):\n The total number of training steps.\n last_epoch (:obj:`int`, `optional`, defaults to -1):\n The index of the last epoch when resuming training.\n\n Return:\n :obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.\n '
def lr_lambda(current_step: int):
if (current_step < num_warmup_steps):
return (float(current_step) / float(max(1, num_warmup_steps)))
return max(0.0, (float((num_training_steps - current_step)) / float(max(1, (num_training_steps - num_warmup_steps)))))
def lr_lambda2(current_step: int):
if (current_step < num_warmup_steps):
return (float(current_step) / float(max(1, num_warmup_steps)))
return max(0.0, (float(((num_training_steps * 3) - current_step)) / float(max(1, ((num_training_steps * 3) - num_warmup_steps)))))
return LambdaLR(optimizer, [lr_lambda, lr_lambda2], last_epoch)
|
class PrefixModule(PrefixTransformer):
mode = 'datatotext'
loss_names = ['loss']
metric_names = ['sacrebleu']
default_val_metric = 'bleu'
def __init__(self, hparams, **kwargs):
if (hparams.sortish_sampler and (hparams.gpus > 1)):
hparams.replace_sampler_ddp = False
elif (hparams.max_tokens_per_batch is not None):
if (hparams.gpus > 1):
raise NotImplementedError('Dynamic Batch size does not work for multi-gpu training')
if hparams.sortish_sampler:
raise ValueError('--sortish_sampler and --max_tokens_per_batch may not be used simultaneously')
super().__init__(hparams, num_labels=None, mode=self.mode, **kwargs)
use_task_specific_params(self.model, 'datatotext')
self.metrics_save_path = (Path(self.output_dir) / 'metrics.json')
self.hparams_save_path = (Path(self.output_dir) / 'hparams.pkl')
pickle_save(self.hparams, self.hparams_save_path)
self.step_count = 0
self.metrics = defaultdict(list)
self.model_type = self.config.model_type
self.vocab_size = (self.config.tgt_vocab_size if (self.model_type == 'fsmt') else self.config.vocab_size)
if self.hparams.T5_preamble:
p = 'translate Graph to English:'
else:
p = None
self.dataset_kwargs: dict = dict(data_dir=self.hparams.data_dir, max_source_length=self.hparams.max_source_length, prefix=p)
n_observations_per_split = {'train': self.hparams.n_train, 'val': self.hparams.n_val, 'test_seen': self.hparams.n_test, 'test_unseen': self.hparams.n_test, 'test_both': self.hparams.n_test}
self.n_obs = {k: (v if (v >= 0) else None) for (k, v) in n_observations_per_split.items()}
self.target_lens = {'train': self.hparams.max_target_length, 'val': self.hparams.val_max_target_length, 'test': self.hparams.test_max_target_length, 'test_seen': self.hparams.test_max_target_length, 'test_unseen': self.hparams.test_max_target_length, 'test_both': self.hparams.test_max_target_length}
assert (self.target_lens['train'] <= self.target_lens['val']), f'target_lens: {self.target_lens}'
assert (self.target_lens['train'] <= self.target_lens['test']), f'target_lens: {self.target_lens}'
freeze_params(self.seq2seq_model)
assert_all_frozen(self.seq2seq_model)
if self.hparams.new_tokens:
self.seq2seq_model.resize_token_embeddings(len(self.tokenizer))
make_new_embeddings_learnable(self.seq2seq_model, len(self.tokenizer), self.new_token_len)
freeze_params(self.seq2seq_model)
assert_all_frozen(self.seq2seq_model)
self.seq2seq_model.shared.trainable_weight.requires_grad = True
if self.hparams.freeze_base:
rank_zero_info('Freezing Base')
freeze_prefix(self.model)
self.num_workers = hparams.num_workers
self.decoder_start_token_id = None
self.dataset_class = (Seq2SeqDataset if hasattr(self.tokenizer, 'prepare_seq2seq_batch') else None)
if self.hparams.DART:
self.dataset_class = (Seq2SeqDatasetSingle if hasattr(self.tokenizer, 'prepare_seq2seq_batch') else None)
self.eval_beams = (self.model.config.num_beams if (self.hparams.eval_beams is None) else self.hparams.eval_beams)
assert (self.eval_beams >= 1), f'got self.eval_beams={self.eval_beams}. Need an integer > 1'
if (self.hparams.eval_max_gen_length is not None):
self.eval_max_length = self.hparams.eval_max_gen_length
else:
self.eval_max_length = self.model.config.max_length
self.val_metric = (self.default_val_metric if (self.hparams.val_metric is None) else self.hparams.val_metric)
self.training_acc_across_batches_at_curr_epoch = []
self.eval_min_length = self.hparams.eval_min_length
rank_zero_info('for decoding, eval_max_length={}, eval_min_length={}, eval_beams={}'.format(self.eval_max_length, self.eval_min_length, self.eval_beams))
if self.hparams.restart_with_embed:
self.seq2seq_model.shared.trainable_weight = self.model.es.trainable_weight
self.seq2seq_model.encoder.embed_tokens.trainable_weight = self.model.es.trainable_weight
def freeze_embeds(self):
'Freeze token embeddings and positional embeddings for bart, just token embeddings for t5.'
if (self.model_type == 't5'):
freeze_params(self.model.shared)
for d in [self.model.encoder, self.model.decoder]:
freeze_params(d.embed_tokens)
elif (self.model_type == 'fsmt'):
for d in [self.model.model.encoder, self.model.model.decoder]:
freeze_params(d.embed_positions)
freeze_params(d.embed_tokens)
else:
freeze_params(self.model.model.shared)
for d in [self.model.model.encoder, self.model.model.decoder]:
freeze_params(d.embed_positions)
freeze_params(d.embed_tokens)
def forward(self, input_ids, **kwargs):
return self.model(input_ids, frozen_model=self.seq2seq_model, **kwargs)
def ids_to_clean_text(self, generated_ids: List[int]):
gen_text = self.tokenizer.batch_decode(generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True)
return lmap(str.strip, gen_text)
def _step(self, batch: dict) -> Tuple:
pad_token_id = self.tokenizer.pad_token_id
(src_ids, src_mask) = (batch['input_ids'], batch['attention_mask'])
tgt_ids = batch['labels']
decoder_input_ids = self.seq2seq_model._shift_right(tgt_ids)
if self.hparams.DART:
outputs = self(src_ids, attention_mask=src_mask, decoder_input_ids=decoder_input_ids, use_cache=False, use_prefix=True, conditional_info={'sources': batch['sources']})
else:
outputs = self(src_ids, attention_mask=src_mask, decoder_input_ids=decoder_input_ids, use_cache=False, use_prefix=True, conditional_info={'cats': batch['cats'], 'sources': batch['sources']})
lm_logits = outputs[0]
if (self.hparams.label_smoothing == 0):
ce_loss_fct = torch.nn.CrossEntropyLoss(ignore_index=pad_token_id)
loss = ce_loss_fct(lm_logits.view((- 1), lm_logits.shape[(- 1)]), tgt_ids.view((- 1)))
else:
lprobs = torch.nn.functional.log_softmax(lm_logits, dim=(- 1))
(loss, nll_loss) = label_smoothed_nll_loss(lprobs, tgt_ids, self.hparams.label_smoothing, ignore_index=pad_token_id)
return (loss,)
@property
def pad(self) -> int:
return self.tokenizer.pad_token_id
def training_step(self, batch, batch_idx) -> Dict:
if (batch_idx == 0):
if self.hparams.DART:
print(batch['sources'])
else:
print(batch['cats'])
print(batch['sources'])
print('Trainable', self.seq2seq_model.encoder.embed_tokens.trainable_weight)
rank_zero_info(f'step {self.step_count}')
loss_tensors = self._step(batch)
logs = {name: loss for (name, loss) in zip(self.loss_names, loss_tensors)}
logs['tpb'] = (batch['input_ids'].ne(self.pad).sum() + batch['labels'].ne(self.pad).sum())
logs['bs'] = batch['input_ids'].shape[0]
logs['src_pad_tok'] = batch['input_ids'].eq(self.pad).sum()
logs['src_pad_frac'] = batch['input_ids'].eq(self.pad).float().mean()
self.training_acc_across_batches_at_curr_epoch.append(loss_tensors[0].item())
self.log_dict(logs)
loss = loss_tensors[0]
return {'loss': loss}
def on_epoch_end(self):
train_acc_mean = np.mean(self.training_acc_across_batches_at_curr_epoch)
self.log_dict({'train_loss': train_acc_mean})
rank_zero_info('train_loss = {}'.format(train_acc_mean))
self.training_acc_across_batches_per_epoch = []
def validation_step(self, batch, batch_idx) -> Dict:
if self.hparams.hf_checkpoint:
print(self.model.es.trainable_weight)
print('SEQ', self.seq2seq_model.shared.trainable_weight)
self.model.es.trainable_weight = self.seq2seq_model.shared.trainable_weight
rank_zero_info(f'Prefix_stored_weight {self.model.es.trainable_weight}')
save_path = Path(self.hparams.save_hf)
save_path = save_path.joinpath('checkpoint-curr_best')
self.model.config.save_step = self.step_count
self.model.save_pretrained(save_path)
self.tokenizer.save_pretrained(save_path)
rank_zero_info('SAVING TO checkpoint {}'.format(save_path))
raise ValueError('just_saving')
return self._generative_step(batch)
def validation_epoch_end(self, outputs, prefix='val') -> Dict:
self.step_count += 1
val_outputs_folder = 'val_outputs'
os.system(('mkdir -p ' + os.path.join(self.hparams.output_dir, val_outputs_folder)))
if (prefix == 'val'):
output_test_predictions_file = os.path.join(self.hparams.output_dir, val_outputs_folder, (('validation_predictions_' + str(self.step_count)) + '.txt'))
output_test_targets_file = os.path.join(self.hparams.output_dir, val_outputs_folder, (('validation_targets_' + str(self.step_count)) + '.txt'))
output_no_process = os.path.join(self.hparams.output_dir, val_outputs_folder, (('output_no_proceess_' + str(self.step_count)) + '.txt'))
with open(output_test_predictions_file, 'w') as p_writer, open(output_test_targets_file, 'w') as t_writer, open(output_no_process, 'w') as v_writer:
for output_batch in outputs:
p_writer.writelines(((convert_text(s) + '\n') for s in output_batch['preds']))
t_writer.writelines(((convert_text(s) + '\n') for s in output_batch['target']))
v_writer.writelines(((s + '\n') for s in output_batch['preds']))
p_writer.close()
t_writer.close()
v_writer.close()
bleu_info = eval_bleu(self.hparams.data_dir, output_test_predictions_file, 'val')
rank_zero_info(f'%s bleu_info: %s {self.step_count} {bleu_info}')
if (bleu_info == (- 1)):
bleu_info = float(bleu_info)
else:
bleu_info = float(bleu_info.split(',')[0].split('BLEU = ')[1])
losses = {k: torch.stack([x[k] for x in outputs]).mean() for k in self.loss_names}
loss = losses['loss']
generative_metrics = {k: np.array([x[k] for x in outputs]).mean() for k in (self.metric_names + ['gen_time', 'gen_len'])}
generative_metrics['bleu'] = bleu_info
metric_val = (generative_metrics[self.val_metric] if (self.val_metric in generative_metrics) else losses[self.val_metric])
self.log('bleu', bleu_info)
self.log('VAL_LOSS', loss)
metric_tensor: torch.FloatTensor = torch.tensor(metric_val).type_as(loss)
generative_metrics.update({k: v.item() for (k, v) in losses.items()})
losses.update(generative_metrics)
all_metrics = {f'{prefix}_avg_{k}': x for (k, x) in losses.items()}
all_metrics['step_count'] = self.step_count
self.metrics[prefix].append(all_metrics)
preds = flatten_list([x['preds'] for x in outputs])
if (prefix == 'val'):
self.log_dict({'log': all_metrics, f'{prefix}_loss': loss, f'{prefix}_{self.val_metric}': metric_tensor})
return {'bleu': bleu_info, 'log': all_metrics, 'preds': preds, f'{prefix}_loss': loss, f'{prefix}_{self.val_metric}': metric_tensor}
else:
data_logs = {}
for output in outputs:
dataset_idx = output[0]['dataloader_idx']
if (dataset_idx == 0):
dataset_name = 'test_both'
elif (dataset_idx == 1):
dataset_name = 'test_seen'
else:
dataset_name = 'test_unseen'
if (output[0]['bleu'] == (- 1)):
bleu_info = float(output[0]['bleu'])
else:
bleu_info = float(output[0]['bleu'].split(',')[0].split('BLEU = ')[1])
losses = {k: torch.stack([x[k] for x in output]).mean() for k in self.loss_names}
loss = losses['loss']
generative_metrics = {k: np.array([x[k] for x in output]).mean() for k in (self.metric_names + ['gen_time', 'gen_len'])}
generative_metrics['bleu'] = bleu_info
metric_val = (generative_metrics[self.val_metric] if (self.val_metric in generative_metrics) else losses[self.val_metric])
metric_tensor: torch.FloatTensor = torch.tensor(metric_val).type_as(loss)
generative_metrics.update({k: v.item() for (k, v) in losses.items()})
losses.update(generative_metrics)
all_metrics = {f'{prefix}_avg_{k}': x for (k, x) in losses.items()}
all_metrics['step_count'] = self.step_count
self.metrics[prefix].append(all_metrics)
preds = flatten_list([x['preds'] for x in output])
data_logs.update({(('log' + '_') + dataset_name): all_metrics, (('preds' + '_') + dataset_name): preds, ((f'{prefix}_loss' + '_') + dataset_name): loss, ((f'{prefix}_{self.val_metric}' + '_') + dataset_name): metric_tensor})
return data_logs
def calc_generative_metrics(self, preds, target) -> Dict:
return calculate_bleu(preds, target)
def _generative_step(self, batch: dict, batch_idx=None, dataloader_idx=None) -> dict:
t0 = time.time()
bsz = batch['input_ids'].size(0)
if self.hparams.DART:
prefix_prompt = self.model.get_prompt(bsz=bsz, sample_size=self.eval_beams, conditional_info={'sources': batch['sources']})
else:
prefix_prompt = self.model.get_prompt(bsz=bsz, sample_size=self.eval_beams, conditional_info={'cats': batch['cats'], 'sources': batch['sources']})
generated_ids = self.seq2seq_model.generate(batch['input_ids'], past_key_values=prefix_prompt, attention_mask=batch['attention_mask'], use_cache=True, length_penalty=self.hparams.length_penalty, use_prefix=True, decoder_start_token_id=self.decoder_start_token_id, num_beams=self.eval_beams, min_length=self.eval_min_length, max_length=self.eval_max_length)
gen_time = ((time.time() - t0) / batch['input_ids'].shape[0])
preds: List[str] = self.ids_to_clean_text(generated_ids)
target: List[str] = self.ids_to_clean_text(batch['labels'])
loss_tensors = self._step(batch)
base_metrics = {name: loss for (name, loss) in zip(self.loss_names, loss_tensors)}
rouge: Dict = self.calc_generative_metrics(preds, target)
summ_len = np.mean(lmap(len, generated_ids))
base_metrics.update(gen_time=gen_time, gen_len=summ_len, preds=preds, target=target, **rouge)
if (dataloader_idx is not None):
base_metrics.update(batch_idx=batch_idx, dataloader_idx=dataloader_idx)
return base_metrics
def test_step(self, batch, batch_idx, dataloader_idx):
if (batch_idx == 0):
rank_zero_info(f'Trainable {self.seq2seq_model.shared.trainable_weight}')
return self._generative_step(batch, batch_idx, dataloader_idx)
def test_epoch_end(self, outputs_all_testsets):
pickle_save(outputs_all_testsets, 'outputs_all_testsets.pkl')
val_outputs_folder = 'val_outputs'
os.system(('mkdir -p ' + os.path.join(self.hparams.output_dir, val_outputs_folder)))
for outputs in outputs_all_testsets:
dataset_idx = outputs[0]['dataloader_idx']
if (dataset_idx == 0):
file_name = 'test_both_predictions.txt'
file_name_tgt = 'test_both_targets.txt'
dataset_name = 'test_both'
elif (dataset_idx == 1):
file_name = 'test_seen_predictions.txt'
file_name_tgt = 'test_seen_targets.txt'
dataset_name = 'test_seen'
else:
file_name = 'test_unseen_predictions.txt'
file_name_tgt = 'test_unseen_targets.txt'
dataset_name = 'test_unseen'
file_name += '.debug'
file_name_tgt += '.debug'
output_test_predictions_file = os.path.join(self.hparams.output_dir, val_outputs_folder, file_name)
output_test_targets_file = os.path.join(self.hparams.output_dir, val_outputs_folder, file_name_tgt)
output_no_process = os.path.join(self.hparams.output_dir, val_outputs_folder, (file_name + 'output_no_process'))
with open(output_test_predictions_file, 'w') as p_writer, open(output_test_targets_file, 'w') as t_writer, open(output_no_process, 'w') as v_writer:
for output_batch in outputs:
p_writer.writelines(((convert_text(s) + '\n') for s in output_batch['preds']))
t_writer.writelines(((convert_text(s) + '\n') for s in output_batch['target']))
v_writer.writelines(((s + '\n') for s in output_batch['preds']))
p_writer.close()
t_writer.close()
v_writer.close()
bleu_info = eval_bleu(self.hparams.data_dir, output_test_predictions_file, dataset_name)
meteor_info = eval_meteor_test_webnlg(self.hparams.data_dir, output_test_predictions_file, dataset_name)
chrf_info = eval_chrf_test_webnlg(self.hparams.data_dir, output_test_predictions_file, dataset_name)
print(f' %s - bleu_info: %s', dataset_name, bleu_info)
print(f' %s - meteor_info: %s', dataset_name, meteor_info)
print(f' %s - chrf_info: %s', dataset_name, chrf_info)
outputs[0]['bleu'] = bleu_info
return self.validation_epoch_end(outputs_all_testsets, prefix='test')
def get_dataset(self, type_path):
n_obs = self.n_obs[type_path]
max_target_length = self.target_lens[type_path]
dataset = self.dataset_class(self.tokenizer, type_path=type_path, n_obs=n_obs, max_target_length=max_target_length, **self.dataset_kwargs)
return dataset
def get_dataloader(self, type_path: str, batch_size: int, shuffle: bool=False) -> DataLoader:
dataset = self.get_dataset(type_path)
if (self.hparams.sortish_sampler and (type_path != 'test')):
sampler = dataset.make_sortish_sampler(batch_size, distributed=(self.hparams.gpus > 1))
return DataLoader(dataset, batch_size=batch_size, collate_fn=dataset.collate_fn, shuffle=False, num_workers=self.num_workers, sampler=sampler)
elif ((self.hparams.max_tokens_per_batch is not None) and (type_path != 'test')):
batch_sampler = dataset.make_dynamic_sampler(self.hparams.max_tokens_per_batch, distributed=(self.hparams.gpus > 1))
return DataLoader(dataset, batch_sampler=batch_sampler, collate_fn=dataset.collate_fn, num_workers=self.num_workers)
else:
return DataLoader(dataset, batch_size=batch_size, collate_fn=dataset.collate_fn, shuffle=shuffle, num_workers=self.num_workers, sampler=None)
def train_dataloader(self) -> DataLoader:
dataloader = self.get_dataloader('train', batch_size=self.hparams.train_batch_size, shuffle=True)
return dataloader
def val_dataloader(self) -> DataLoader:
return self.get_dataloader('val', batch_size=self.hparams.eval_batch_size)
def test_dataloader(self) -> List[DataLoader]:
test_dataloader = self.get_dataloader('test_both', batch_size=self.hparams.eval_batch_size)
if self.hparams.DART:
return [test_dataloader]
test_seen_dataloader = self.get_dataloader('test_seen', batch_size=self.hparams.eval_batch_size)
test_unseen_dataloader = self.get_dataloader('test_unseen', batch_size=self.hparams.eval_batch_size)
return [test_dataloader, test_seen_dataloader, test_unseen_dataloader]
@staticmethod
def add_model_specific_args(parser, root_dir):
PrefixTransformer.add_model_specific_args(parser, root_dir)
add_generic_args(parser, root_dir)
parser.add_argument('--max_source_length', default=512, type=int, help='The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.')
parser.add_argument('--max_target_length', default=60, type=int, help='The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.')
parser.add_argument('--val_dir', default='', type=str, help='The directory for validation')
parser.add_argument('--skip_train', type=bool, default=False)
parser.add_argument('--val_max_target_length', default=60, type=int, help='The maximum total validation target length specified foor generation')
parser.add_argument('--test_max_target_length', default=100, type=int, help='The maximum total test target length specified for generation')
parser.add_argument('--freeze_encoder', action='store_true')
parser.add_argument('--freeze_embeds', action='store_true')
parser.add_argument('--sortish_sampler', action='store_true', default=False)
parser.add_argument('--max_tokens_per_batch', type=int, default=None)
parser.add_argument('--logger_name', type=str, choices=['default', 'wandb', 'wandb_shared'], default='default')
parser.add_argument('--n_train', type=int, default=(- 1), required=False, help='# examples. -1 means use all.')
parser.add_argument('--n_val', type=int, default=(- 1), required=False, help='# examples. -1 means use all.')
parser.add_argument('--n_test', type=int, default=(- 1), required=False, help='# examples. -1 means use all.')
parser.add_argument('--task_mode', type=str, default='datatotext', required=False, help='if different tasks.')
parser.add_argument('--label_smoothing', type=float, default=0.0, required=False)
parser.add_argument('--src_lang', type=str, default='', required=False)
parser.add_argument('--save_hf', type=str, default='', required=False)
parser.add_argument('--tgt_lang', type=str, default='', required=False)
parser.add_argument('--eval_beams', type=int, default=6, required=False)
parser.add_argument('--eval_min_length', type=int, default=10, required=False)
parser.add_argument('--skip_val', type=bool, default=False, required=False)
parser.add_argument('--val_metric', type=str, default=None, required=False)
parser.add_argument('--eval_max_gen_length', type=int, default=60, help='never generate more than n tokens')
parser.add_argument('--length_penalty', type=float, default=1.0, help='length penalty specified for beam search')
parser.add_argument('--save_top_k', type=int, default=1, required=False, help='How many checkpoints to save')
parser.add_argument('--wb_project', type=str, default='', help='wandb project name')
parser.add_argument('--git', type=bool, default=True)
parser.add_argument('--dev', type=bool, default=False)
parser.add_argument('--freeze_base', type=bool, default=False)
parser.add_argument('--wb_name', type=str, default='', help='wandb run name')
parser.add_argument('--wb_entity', type=str, default='', help='wandb entity')
parser.add_argument('--id', type=str, default='wand id if continuing a run')
parser.add_argument('--DART', default=False, type=bool, help='if running on DART dataseet rather than webnlg, only one testloader required')
parser.add_argument('--early_stopping_patience', type=int, default=(- 1), required=False, help='-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So val_check_interval will effect it.')
parser.add_argument('--T5_preamble', type=bool, default=False, required=False, help='Add the T5 preamble e.g. Translate graph to text to every input')
parser.add_argument('--restart_with_embed', type=bool, default=False, required=False, help='Set to true if working with special tokens, these methods are fixed LM so the embedding matrix is frozen, bar some special tokens, e.g. <H>, <R> , <T>. ImportantIf continuing a checkpoint, ')
return parser
|
def eval(args, model=None):
if (model is None):
if ('datatotext' in args.task_mode):
if (args.tuning_mode == 'prefixtune'):
model = PrefixModule(args)
rank_zero_info('the length penalty is {}'.format(args.length_penalty))
with torch.no_grad():
model.eval()
model = model.cuda()
data_loader = model.test_dataloader()
rank_zero_info('DATALOADER_LEN', len(data_loader))
out_lst = []
for (batch_idx, batch) in enumerate(data_loader):
batch = model.transfer_batch_to_device(batch, model.device)
out = model.test_step(batch, batch_idx)
out_lst.append(out)
if ((batch_idx % 50) == 0):
rank_zero_info(model.test_epoch_end(out_lst))
rank_zero_info(out['preds'])
result = model.test_epoch_end(out_lst)
for (k, v) in result.items():
if (k != 'preds'):
rank_zero_info('FINAL_RESULTS')
rank_zero_info(k, v)
out_path = os.path.join(args.output_dir, 'test_beam_{}'.format(args.length_penalty))
rank_zero_info(f'writing the test results to {out_path}')
with open(out_path, 'w') as f:
for preds in result['preds']:
rank_zero_info(preds, file=f)
|
def main(args, model=None):
Path(args.output_dir).mkdir(exist_ok=True)
if (model is None):
if ('datatotext' in args.task_mode):
if (args.tuning_mode == 'prefixtune'):
model = PrefixModule(args)
pickle_save(model.hparams, (model.output_dir / 'hparams.pkl'))
if ((args.logger_name == 'default') or args.fast_dev_run or str(args.output_dir).startswith('/tmp') or str(args.output_dir).startswith('/var')):
logger = True
elif (args.logger_name == 'wandb'):
from pytorch_lightning.loggers import WandbLogger
if (args.id is not None):
id_ = args.id
else:
id_ = wandb.util.generate_id()
rank_zero_info(f'ID {id_}')
logger = WandbLogger(id=id_, name=args.wb_name, project=args.wb_project, entity=args.wb_entity)
if args.skip_train:
print('ES', model.model.es.trainable_weight)
print('Seq', model.seq2seq_model.shared.trainable_weight)
model.seq2seq_model.shared.trainable_weight = model.model.es.trainable_weight
trainer = pl.Trainer(gpus=1, precision=32)
trainer.test(model)
print('ES', model.model.es.trainable_weight)
return model
trainer: pl.Trainer = generic_train(model, args, logging_callback=Seq2SeqLoggingCallback(), logger=logger)
pickle_save(model.hparams, (model.output_dir / 'hparams.pkl'))
if (not args.do_predict):
return model
if (args.test_checkpoint is not None):
checkpoints = [args.test_checkpoint]
model.hparams.test_checkpoint = checkpoints[(- 1)]
trainer.resume_from_checkpoint = checkpoints[(- 1)]
if (args.do_predict and args.skip_train):
checkpoint = checkpoints[(- 1)]
rank_zero_info(checkpoint)
trainer.test(model, ckpt_path=checkpoint)
return model
trainer.test()
return model
|
class PrefixTransformer(pl.LightningModule):
def __init__(self, hparams: argparse.Namespace, num_labels=None, config=None, tokenizer=None, seq2seq_model=None, **config_kwargs):
'Initialize a model, tokenizer and config.'
super().__init__()
self.save_hyperparameters(hparams)
self.step_count = 0
self.output_dir = Path(self.hparams.output_dir)
cache_dir = (self.hparams.cache_dir if self.hparams.cache_dir else None)
rank_zero_info('the cache dir is {}'.format(cache_dir))
if (config is None):
self.config = AutoConfig.from_pretrained((self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path), **({'num_labels': num_labels} if (num_labels is not None) else {}), cache_dir=cache_dir, **config_kwargs)
else:
self.config: PretrainedConfig = config
extra_model_params = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(self.hparams, p, None):
assert hasattr(self.config, p), f"model config doesn't have a `{p}` attribute"
setattr(self.config, p, getattr(self.hparams, p))
if (tokenizer is None):
self.tokenizer = AutoTokenizer.from_pretrained((self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path), cache_dir=cache_dir)
if self.hparams.new_tokens:
new_tokens = ['<H>', '<R>', '<T>']
if self.hparams.control_token_DART:
new_tokens.extend(['<e2e>', '<webnlg_old>', '<WikiTableQuestions_lily>', '<WikiSQL_decl_sents>', '<WikiTableQuestions_mturk>', '<WikiSQL_lily>'])
self.new_token_len = len(new_tokens)
new_tokens_vocab = {}
new_tokens_vocab['additional_special_tokens'] = []
for (idx, t) in enumerate(new_tokens):
new_tokens_vocab['additional_special_tokens'].append(t)
num_added_toks = self.tokenizer.add_special_tokens(new_tokens_vocab)
rank_zero_info('We have added %s tokens', num_added_toks)
else:
self.tokenizer: PreTrainedTokenizer = tokenizer
self.config.preseqlen = self.hparams.preseqlen
if self.hparams.control_prefixes:
if self.hparams.DART:
self.config.preseqlen += self.hparams.m_prefix_len
else:
self.config.preseqlen += (self.hparams.m_prefix_len * 2)
self.config.use_prefix = True
self.seq2seq_model_type = AutoModel
if (seq2seq_model is None):
self.seq2seq_model = T5ForConditionalGeneration.from_pretrained(self.hparams.model_name_or_path, from_tf=bool(('.ckpt' in self.hparams.model_name_or_path)), config=self.config, cache_dir=cache_dir)
else:
self.seq2seq_model = seq2seq_model
self.seq2seq_model.resize_token_embeddings(len(self.tokenizer))
config_prefix = AutoConfig.from_pretrained(self.hparams.model_name_or_path, cache_dir=cache_dir)
self.model_type = config_prefix.model_type
if (self.hparams.optim_prefix == 'yes'):
optim_prefix_bool = True
elif (self.hparams.optim_prefix == 'no'):
optim_prefix_bool = False
else:
assert False, 'model_args.optim_prefix should be either yes or no'
rank_zero_info(self.model_type)
config_prefix._my_arg_tune_mode = self.hparams.tuning_mode
config_prefix._my_arg_task_mode = self.hparams.task_mode
config_prefix._my_arg_control = True
config_prefix.train_weights = False
config_prefix.optim_prefix = optim_prefix_bool
config_prefix.preseqlen = self.hparams.preseqlen
config_prefix.use_infix = (self.hparams.format_mode == 'infix')
config_prefix.format_mode = self.hparams.format_mode
config_prefix.prefix_dropout = self.hparams.prefix_dropout
config_prefix.vocab_size = len(self.tokenizer)
config_prefix.DART = self.hparams.DART
config_prefix.lowdata = ('lowdata' in self.hparams.output_dir)
if (config_prefix.lowdata and (self.hparams.use_lowdata_token == 'yes')):
config_prefix.lowdata_token = self.tokenizer([self.hparams.lowdata_token], add_prefix_space=True)['input_ids']
rank_zero_info(self.hparams.lowdata_token)
rank_zero_info(config_prefix.lowdata_token)
rank_zero_info(self.tokenizer.pad_token_id)
config_prefix.mid_dim = self.hparams.mid_dim
config_prefix.new_token_len = self.new_token_len
if self.hparams.control_prefixes:
config_prefix.m_prefix_mid_dim = self.hparams.m_prefix_mid_dim
config_prefix.m_prefix_len = self.hparams.m_prefix_len
if self.hparams.unseen:
config_prefix.unseen = True
if (self.hparams.prefixModel_name_or_path is not None):
rank_zero_info('loading from {}'.format(hparams.prefixModel_name_or_path))
self.model = ControlPrefixes.from_pretrained(self.hparams.prefixModel_name_or_path, from_tf=bool(('.ckpt' in self.hparams.prefixModel_name_or_path)), cache_dir=cache_dir, config=config_prefix)
else:
self.model = ControlPrefixes(config_prefix)
def load_hf_checkpoint(self, *args, **kwargs):
assert False, 'why need to load model here?'
self.model = self.model_type.from_pretrained(*args, **kwargs)
def get_lr_scheduler(self):
get_schedule_func = arg_to_scheduler[self.hparams.lr_scheduler]
scheduler = get_schedule_func(self.opt, num_warmup_steps=self.hparams.warmup_steps, num_training_steps=self.total_steps)
rank_zero_info(f'warm up {self.hparams.warmup_steps}')
scheduler = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1}
return scheduler
def configure_optimizers(self):
'Prepare optimizer and schedule (linear warmup and decay)'
if self.hparams.different_scheduler:
cefr_params = [p for (n, p) in self.named_parameters() if any(((nd in n) for nd in ['CEFR_matrices.wte']))]
no_cefr_params = [p for (n, p) in self.named_parameters() if (not any(((nd in n) for nd in ['CEFR_matrices.wte'])))]
optimizer_grouped_parameters = [{'params': no_cefr_params, 'weight_decay': self.hparams.weight_decay}, {'params': cefr_params, 'weight_decay': self.hparams.weight_decay}]
if self.hparams.adafactor:
optimizer = Adafactor(optimizer_grouped_parameters, lr=self.hparams.learning_rate, scale_parameter=False, relative_step=False)
else:
optimizer = AdamW(optimizer_grouped_parameters, lr=self.hparams.learning_rate, eps=self.hparams.adam_epsilon)
self.opt = optimizer
scheduler = bespoke_scheduler(self.opt, num_warmup_steps=self.hparams.warmup_steps, num_training_steps=self.total_steps)
rank_zero_info(f'warm up {self.hparams.warmup_steps}')
scheduler = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1}
return ([optimizer], [scheduler])
def test_step(self, batch, batch_nb):
return self.validation_step(batch, batch_nb)
def test_epoch_end(self, outputs):
return self.validation_end(outputs)
@property
def total_steps(self) -> int:
'The number of total training steps that will be run. Used for lr scheduler purposes.'
num_devices = max(1, self.hparams.gpus)
if (self.hparams.original_batch_size is not None):
effective_batch_size = ((self.hparams.original_batch_size * self.hparams.accumulate_grad_batches) * num_devices)
else:
effective_batch_size = ((self.hparams.train_batch_size * self.hparams.accumulate_grad_batches) * num_devices)
dataset_size = len(self.train_loader.dataset)
return ((dataset_size / effective_batch_size) * self.hparams.max_epochs)
def setup(self, mode):
if (mode == 'fit'):
self.train_loader = self.get_dataloader('train', self.hparams.train_batch_size, shuffle=True)
def get_dataloader(self, type_path, batch_size, shuffle=False):
raise NotImplementedError('You must implement this for your task')
def train_dataloader(self):
return self.train_loader
def val_dataloader(self):
return self.get_dataloader('dev', self.hparams.eval_batch_size, shuffle=False)
def test_dataloader(self):
return self.get_dataloader('test', self.hparams.eval_batch_size, shuffle=False)
def _feature_file(self, mode):
return os.path.join(self.hparams.data_dir, 'cached_{}_{}_{}'.format(mode, list(filter(None, self.hparams.model_name_or_path.split('/'))).pop(), str(self.hparams.max_seq_length)))
@pl.utilities.rank_zero_only
def save_checkpoint(self, trainer) -> None:
rank_zero_info('Saving the the checkpoint.')
return
@pl.utilities.rank_zero_only
def on_save_checkpoint(self, checkpoint: Dict[(str, Any)], filepath=None) -> None:
rank_zero_info('SEQ', self.seq2seq_model.shared.trainable_weight)
self.model.es.trainable_weight = self.seq2seq_model.shared.trainable_weight
rank_zero_info('Prefix_stored_weight', self.model.es.trainable_weight)
save_path = self.output_dir.joinpath('checkpoint-curr_best')
self.model.config.save_step = self.step_count
self.model.save_pretrained(save_path)
self.tokenizer.save_pretrained(save_path)
rank_zero_info('SAVING TO checkpoint {}'.format(save_path))
@staticmethod
def add_model_specific_args(parser, root_dir):
parser.add_argument('--model_name_or_path', default='t5-large', type=str, required=False, help='Path to pretrained model or model identifier from huggingface.co/models')
parser.add_argument('--prefixModel_name_or_path', default=None, type=str, help='Path to pretrained prefix model or model identifier from huggingface.co/models')
parser.add_argument('--prefix_mode', default='activation', type=str, help='embedding or activation')
parser.add_argument('--preseqlen', default=200, type=int, help='the length of the prefix.')
parser.add_argument('--optim_prefix', default='yes', type=str, help='use the task specific optimization of the prefix.')
parser.add_argument('--different_scheduler', default=False, type=bool, help='use a different lr scheduler for control prefixes and main prefix')
parser.add_argument('--tuning_mode', default='prefixtune', type=str, help='Could be prefixtune or finetune')
parser.add_argument('--prefix_dropout', default=0.0, type=float, help='the dropout rate for our prefix model.')
parser.add_argument('--use_dropout', default='no', type=str, help='whether to dropout the main model during training. ')
parser.add_argument('--mid_dim', default=800, type=int, help='the dimension of the intermediate layer of themain prefix reparameterization')
parser.add_argument('--m_prefix_mid_dim', default=512, type=int, help='the dimension of the intermediate layer of the control prefix reparameterizations')
parser.add_argument('--m_prefix_len', default=1, type=int, help='the control prefix length')
parser.add_argument('--unseen', default=False, type=bool, help='Initializing a control prefix for unseen categories to zero')
parser.add_argument('--format_mode', default='cat', type=str, help='whether to look at the input again, including [infix, cat, peek, nopeek]')
parser.add_argument('--use_lowdata_token', default='yes', type=str, help='whether or not to use the lowdata token, ')
parser.add_argument('--lowdata_token', default='summarize', type=str, help='the low data token to use. ')
parser.add_argument('--config_name', default='', type=str, help='Pretrained config name or path if not the same as model_name')
parser.add_argument('--tokenizer_name', default=None, type=str, help='Pretrained tokenizer name or path if not the same as model_name')
parser.add_argument('--cache_dir', default='/content/gdrive/MyDrive/cache_dir', type=str, help='Where do you want to store the pre-trained models downloaded from s3')
parser.add_argument('--encoder_layerdrop', type=float, help='Encoder layer dropout probability (Optional). Goes into model.config')
parser.add_argument('--decoder_layerdrop', type=float, help='Decoder layer dropout probability (Optional). Goes into model.config')
parser.add_argument('--dropout', type=float, help='Dropout probability (Optional). Goes into model.config')
parser.add_argument('--control_prefixes', type=bool, default=False, help='if using control prefixes')
parser.add_argument('--new_tokens', type=bool, default=False, help='if using demarcation tokens <H>, <R>, <T> that need to be learnable')
parser.add_argument('--attention_dropout', type=float, help='Attention dropout probability (Optional). Goes into model.config')
parser.add_argument('--learning_rate', default=5e-05, type=float, help='The target learning rate.')
parser.add_argument('--lr_scheduler', default='linear', choices=arg_to_scheduler_choices, metavar=arg_to_scheduler_metavar, type=str, help='Learning rate scheduler')
parser.add_argument('--weight_decay', default=0.0, type=float, help='Weight decay if we apply some.')
parser.add_argument('--adam_epsilon', default=1e-08, type=float, help='Epsilon for Adam optimizer.')
parser.add_argument('--warmup_steps', default=0, type=int, help='Linear warmup over warmup_steps.')
parser.add_argument('--num_workers', default=4, type=int, help='kwarg passed to DataLoader')
parser.add_argument('--num_train_epochs', dest='max_epochs', default=30, type=int)
parser.add_argument('--original_batch_size', default=None, type=int)
parser.add_argument('--hf_checkpoint', default=False, type=bool, help='if want to save a hf model checkpoint from a lightning ckpt')
parser.add_argument('--test_checkpoint', default=None, type=str)
parser.add_argument('--train_batch_size', default=8, type=int)
parser.add_argument('--eval_batch_size', default=6, type=int)
parser.add_argument('--adafactor', action='store_true')
|
def add_generic_args(parser, root_dir) -> None:
parser.add_argument('--output_dir', default=None, type=str, required=True, help='The output directory where the model predictions and checkpoints will be written.')
parser.add_argument('--n_tpu_cores', dest='tpu_cores', type=int)
parser.add_argument('--max_grad_norm', dest='gradient_clip_val', default=1.0, type=float, help='Max gradient norm')
parser.add_argument('--do_predict', default=True, type=bool, help='Whether to run training.')
parser.add_argument('--gradient_accumulation_steps', dest='accumulate_grad_batches', type=int, default=1, help='Number of updates steps to accumulate before performing a backward/update pass.')
parser.add_argument('--seed', type=int, default=101, help='random seed for initialization')
parser.add_argument('--control_token_DART', type=bool, default=False, help='if using control tokens for DART source')
parser.add_argument('--data_dir', default=None, type=str, required=True, help='The input data dir.')
|
def generic_train(model, args: argparse.Namespace, early_stopping_callback=False, logger=True, extra_callbacks=[], checkpoint_callback=None, logging_callback=None, **extra_train_kwargs):
pl.seed_everything(args.seed)
odir = Path(model.hparams.output_dir)
odir.mkdir(exist_ok=True)
checkpoint_callback = pl.callbacks.ModelCheckpoint(dirpath=args.output_dir, monitor=('val_' + args.val_metric), mode='max', save_top_k=args.save_top_k, save_last=True)
if (early_stopping_callback is not False):
extra_callbacks.append(early_stopping_callback)
rank_zero_info('the max number of epochs is {}'.format(args.max_epochs))
rank_zero_info('early stopping', early_stopping_callback)
rank_zero_info('checkpoint_callback', checkpoint_callback)
rank_zero_info('logging', logging_callback)
trainer = pl.Trainer.from_argparse_args(args, max_epochs=args.max_epochs, weights_summary=None, callbacks=([logging_callback] + extra_callbacks), logger=logger, checkpoint_callback=checkpoint_callback)
print('args.do_Train:', (not args.skip_train))
if (not args.skip_train):
trainer.fit(model)
return trainer
|
class PartiallyFixedEmbedding(torch.nn.Module):
def __init__(self, fixed_weights, num_to_learn, padding_idx=1):
super().__init__()
self.num_fixed = fixed_weights.size(0)
self.num_to_learn = num_to_learn
weight = torch.empty((self.num_fixed + num_to_learn), fixed_weights.size(1))
weight[:self.num_fixed] = fixed_weights
self.trainable_weight = torch.nn.Parameter(torch.empty(num_to_learn, fixed_weights.size(1)))
torch.nn.init.kaiming_uniform_(self.trainable_weight)
weight[self.num_fixed:] = self.trainable_weight
self.register_buffer('weight', weight)
self.padding_idx = padding_idx
def forward(self, inp):
self.weight.detach_()
self.weight[self.num_fixed:] = self.trainable_weight
return torch.nn.functional.embedding(inp, self.weight, self.padding_idx, None, 2.0, False, False)
|
def make_new_embeddings_learnable(model, tokenizer_len, num_to_learn):
print('fixed_embeds', (tokenizer_len - num_to_learn))
fixed_weights = model.shared.weight[:32100]
new_embed_layer = PartiallyFixedEmbedding(fixed_weights, num_to_learn)
model.decoder.embed_tokens = new_embed_layer
model.encoder.embed_tokens = new_embed_layer
model.shared = new_embed_layer
|
def run_experiment(yaml_file):
with open(yaml_file, 'r') as stream:
parsed_yaml = yaml.safe_load(stream)
args = ''
for (arg, value) in parsed_yaml.items():
args += f'--{arg} {value} '
os.system(f'python finetune.py {args}--adafactor')
|
def label_smoothed_nll_loss(lprobs, target, epsilon, ignore_index=(- 100)):
'From fairseq'
if (target.dim() == (lprobs.dim() - 1)):
target = target.unsqueeze((- 1))
nll_loss = (- lprobs.gather(dim=(- 1), index=target))
smooth_loss = (- lprobs.sum(dim=(- 1), keepdim=True))
if (ignore_index is not None):
pad_mask = target.eq(ignore_index)
nll_loss.masked_fill_(pad_mask, 0.0)
smooth_loss.masked_fill_(pad_mask, 0.0)
else:
nll_loss = nll_loss.squeeze((- 1))
smooth_loss = smooth_loss.squeeze((- 1))
nll_loss = nll_loss.sum()
smooth_loss = smooth_loss.sum()
eps_i = (epsilon / lprobs.size((- 1)))
loss = (((1.0 - epsilon) * nll_loss) + (eps_i * smooth_loss))
return (loss, nll_loss)
|
def lmap(f: Callable, x: Iterable) -> List:
'list(map(f, x))'
return list(map(f, x))
|
def calculate_bleu(output_lns, refs_lns) -> dict:
"Uses sacrebleu's corpus_bleu implementation."
return {'sacrebleu': round(corpus_bleu(output_lns, [refs_lns]).score, 4)}
|
class AbstractSeq2SeqDataset(Dataset):
def __init__(self, tokenizer, data_dir, max_source_length, max_target_length, type_path='train', n_obs=None, prefix='', **dataset_kwargs):
super().__init__()
self.src_file = Path(data_dir).joinpath((type_path + '.source'))
self.tgt_file = Path(data_dir).joinpath((type_path + '.target'))
self.len_file = Path(data_dir).joinpath((type_path + '.len'))
self.cat_file = list(np.load(Path(data_dir).joinpath((type_path + '.source_cat.npy'))))
self.source_file = list(np.load(Path(data_dir).joinpath((type_path + '.source.npy'))))
if os.path.exists(self.len_file):
self.src_lens = pickle_load(self.len_file)
self.used_char_len = False
else:
self.src_lens = self.get_char_lens(self.src_file)
self.used_char_len = True
self.max_source_length = max_source_length
self.max_target_length = max_target_length
assert (min(self.src_lens) > 0), f'found empty line in {self.src_file}'
self.tokenizer = tokenizer
self.prefix = (prefix if (prefix is not None) else '')
if (n_obs is not None):
self.src_lens = self.src_lens[:n_obs]
self.pad_token_id = self.tokenizer.pad_token_id
self.dataset_kwargs = dataset_kwargs
dataset_kwargs.update(({'add_prefix_space': True} if isinstance(self.tokenizer, BartTokenizer) else {}))
def __len__(self):
return len(self.src_lens)
@staticmethod
def get_char_lens(data_file):
return [len(x) for x in Path(data_file).open().readlines()]
@cached_property
def tgt_lens(self):
'Length in characters of target documents'
return self.get_char_lens(self.tgt_file)
def make_sortish_sampler(self, batch_size, distributed=False, shuffle=True, **kwargs):
if distributed:
return DistributedSortishSampler(self, batch_size, shuffle=shuffle, **kwargs)
else:
return SortishSampler(self.src_lens, batch_size, shuffle=shuffle)
def make_dynamic_sampler(self, max_tokens_per_batch=1024, **kwargs):
assert FAIRSEQ_AVAILABLE, 'Dynamic batch size requires `pip install fairseq`'
assert (not self.used_char_len), 'You must call python make_len_file.py before calling make_dynamic_sampler'
sorted_indices = list(self.make_sortish_sampler(1024, shuffle=False))
def num_tokens_in_example(i):
return min(self.src_lens[i], self.max_target_length)
batch_sampler: List[List[int]] = batch_by_size(sorted_indices, num_tokens_fn=num_tokens_in_example, max_tokens=max_tokens_per_batch, required_batch_size_multiple=64)
shuffled_batches = [batch_sampler[i] for i in np.random.permutation(range(len(batch_sampler)))]
approximate_toks_per_batch = [(max((self.src_lens[i] for i in batch)) * len(batch)) for batch in shuffled_batches]
largest_batch_idx = np.argmax(approximate_toks_per_batch)
(shuffled_batches[0], shuffled_batches[largest_batch_idx]) = (shuffled_batches[largest_batch_idx], shuffled_batches[0])
return shuffled_batches
def __getitem__(self, item):
raise NotImplementedError('You must implement this')
def collate_fn(self, batch):
raise NotImplementedError('You must implement this')
|
class Seq2SeqDataset(AbstractSeq2SeqDataset):
'A dataset that calls prepare_seq2seq_batch.'
def __getitem__(self, index) -> Dict[(str, str)]:
index = (index + 1)
source_line = (self.prefix + linecache.getline(str(self.src_file), index).rstrip('\n'))
tgt_line = linecache.getline(str(self.tgt_file), index).rstrip('\n')
assert source_line, f'empty source line for index {index}'
assert tgt_line, f'empty tgt line for index {index}'
return {'tgt_texts': tgt_line, 'src_texts': source_line, 'id': (index - 1), 'category': self.cat_file[(index - 1)], 'source': self.source_file[(index - 1)]}
def collate_fn(self, batch):
'Call prepare_seq2seq_batch.'
batch_encoding: Dict[(str, torch.Tensor)] = self.tokenizer.prepare_seq2seq_batch([x['src_texts'] for x in batch], tgt_texts=[x['tgt_texts'] for x in batch], max_length=self.max_source_length, max_target_length=self.max_target_length, return_tensors='pt', **self.dataset_kwargs).data
batch_encoding['ids'] = torch.tensor([x['id'] for x in batch])
batch_encoding['cats'] = torch.tensor([x['category'] for x in batch])
batch_encoding['sources'] = torch.tensor([x['source'] for x in batch])
return batch_encoding
|
class SortishSampler(Sampler):
'Go through the text data by order of src length with a bit of randomness. From fastai repo.'
def __init__(self, data, batch_size, shuffle=True):
(self.data, self.bs, self.shuffle) = (data, batch_size, shuffle)
def __len__(self) -> int:
return len(self.data)
def __iter__(self):
return iter(sortish_sampler_indices(self.data, self.bs, shuffle=self.shuffle))
|
def sortish_sampler_indices(data: List, bs: int, shuffle=True) -> np.array:
'Go through the text data by order of src length with a bit of randomness. From fastai repo.'
if (not shuffle):
return np.argsort((np.array(data) * (- 1)))
def key_fn(i):
return data[i]
idxs = np.random.permutation(len(data))
sz = (bs * 50)
ck_idx = [idxs[i:(i + sz)] for i in range(0, len(idxs), sz)]
sort_idx = np.concatenate([sorted(s, key=key_fn, reverse=True) for s in ck_idx])
sz = bs
ck_idx = [sort_idx[i:(i + sz)] for i in range(0, len(sort_idx), sz)]
max_ck = np.argmax([key_fn(ck[0]) for ck in ck_idx])
(ck_idx[0], ck_idx[max_ck]) = (ck_idx[max_ck], ck_idx[0])
sort_idx = (np.concatenate(np.random.permutation(ck_idx[1:])) if (len(ck_idx) > 1) else np.array([], dtype=np.int))
sort_idx = np.concatenate((ck_idx[0], sort_idx))
return sort_idx
|
class DistributedSortishSampler(Sampler):
'Copied from torch DistributedSampler'
def __init__(self, dataset, batch_size, num_replicas=None, rank=None, add_extra_examples=True, shuffle=True):
if (num_replicas is None):
if (not dist.is_available()):
raise RuntimeError('Requires distributed package to be available')
num_replicas = dist.get_world_size()
if (rank is None):
if (not dist.is_available()):
raise RuntimeError('Requires distributed package to be available')
rank = dist.get_rank()
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
if add_extra_examples:
self.num_samples = int(math.ceil(((len(self.dataset) * 1.0) / self.num_replicas)))
self.total_size = (self.num_samples * self.num_replicas)
else:
self.total_size = len(dataset)
self.num_samples = len(self.available_indices)
self.batch_size = batch_size
self.add_extra_examples = add_extra_examples
self.shuffle = shuffle
def __iter__(self) -> Iterable:
g = torch.Generator()
g.manual_seed(self.epoch)
sortish_data = [self.dataset.src_lens[i] for i in self.available_indices]
sortish_indices = sortish_sampler_indices(sortish_data, self.batch_size, shuffle=self.shuffle)
indices = [self.available_indices[i] for i in sortish_indices]
assert (len(indices) == self.num_samples)
return iter(indices)
@cached_property
def available_indices(self) -> np.array:
indices = list(range(len(self.dataset)))
indices += indices[:(self.total_size - len(indices))]
assert (len(indices) == self.total_size)
available_indices = indices[self.rank:self.total_size:self.num_replicas]
return available_indices
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
self.epoch = epoch
|
def use_task_specific_params(model, task):
'Update config with GEC specific params.'
task_specific_params = model.config.task_specific_params
if (task_specific_params is not None):
pars = task_specific_params.get(task, {})
logger.info(f'using task specific params for {task}: {pars}')
model.config.update(pars)
|
def pickle_load(path):
'pickle.load(path)'
with open(path, 'rb') as f:
return pickle.load(f)
|
def pickle_save(obj, path):
'pickle.dump(obj, path)'
with open(path, 'wb') as f:
return pickle.dump(obj, f)
|
def flatten_list(summary_ids: List[List]):
return [x for x in itertools.chain.from_iterable(summary_ids)]
|
def freeze_params(model: nn.Module):
'Set requires_grad=False for each of model.parameters()'
for par in model.parameters():
par.requires_grad = False
|
def freeze_embeds(model):
'Freeze token embeddings and positional embeddings for bart, just token embeddings for t5.'
model_type = model.config.model_type
if (model_type == 't5'):
freeze_params(model.shared)
for d in [model.encoder, model.decoder]:
freeze_params(d.embed_tokens)
elif (model_type == 'fsmt'):
for d in [model.model.encoder, model.model.decoder]:
freeze_params(d.embed_positions)
freeze_params(d.embed_tokens)
else:
freeze_params(model.model.shared)
for d in [model.model.encoder, model.model.decoder]:
freeze_params(d.embed_positions)
freeze_params(d.embed_tokens)
|
def assert_all_frozen(model):
model_grads: List[bool] = list(grad_status(model))
n_require_grad = sum(lmap(int, model_grads))
npars = len(model_grads)
assert (not any(model_grads)), f'{(n_require_grad / npars):.1%} of {npars} weights require grad'
|
def grad_status(model: nn.Module) -> Iterable:
return (par.requires_grad for par in model.parameters())
|
def convert_text(text):
text = text.lower()
text = ' '.join(re.split('(\\W)', text))
text = ' '.join(text.split())
return text
|
def eval_meteor_test_webnlg(folder_data, pred_file, dataset):
dir_path = os.path.dirname(os.path.realpath(__file__))
folder_data_before = (dir_path + '/utils')
cmd_string = ((((((((('java -jar ' + folder_data_before) + '/meteor-1.5.jar ') + pred_file) + ' ') + folder_data) + '/') + dataset) + '.target_eval_meteor -l en -norm -r 3 > ') + pred_file.replace('txt', 'meteor'))
print(cmd_string)
os.system(cmd_string)
meteor_info = open(pred_file.replace('txt', 'meteor'), 'r').readlines()[(- 1)].strip()
return meteor_info
|
def eval_chrf_test_webnlg(folder_data, pred_file, dataset):
dir_path = os.path.dirname(os.path.realpath(__file__))
folder_data_before = (dir_path + '/utils')
cmd_string = ((((((((('python ' + folder_data_before) + '/chrf++.py -H ') + pred_file) + ' -R ') + folder_data) + '/') + dataset) + '.target_eval_crf > ') + pred_file.replace('txt', 'chrf'))
os.system(cmd_string)
chrf_info_1 = open(pred_file.replace('txt', 'chrf'), 'r').readlines()[1].strip()
chrf_info_2 = open(pred_file.replace('txt', 'chrf'), 'r').readlines()[2].strip()
return ((chrf_info_1 + ' ') + chrf_info_2)
|
def eval_bleu(folder_data, pred_file, dataset):
dir_path = os.path.dirname(os.path.realpath(__file__))
cmd_string = ((((((((((((((((('perl ' + dir_path) + '/multi-bleu.perl -lc ') + folder_data) + '/') + dataset) + '.target_eval ') + folder_data) + '/') + dataset) + '.target2_eval ') + folder_data) + '/') + dataset) + '.target3_eval < ') + pred_file) + ' > ') + pred_file.replace('txt', 'bleu'))
print(cmd_string)
os.system(cmd_string)
try:
bleu_info = open(pred_file.replace('txt', 'bleu'), 'r').readlines()[0].strip()
except:
bleu_info = (- 1)
return bleu_info
|
def eval_bleu_sents_tok(pred_file, folder_data, dataset):
dir_path = os.path.dirname(os.path.realpath(__file__))
folder_data_before = (dir_path + '/utils')
cmd_string = (((((('perl ' + folder_data_before) + '/tokenizer.perl -threads 4 -no-escape < ') + pred_file) + ' > ') + pred_file) + '_tok')
os.system(cmd_string)
cmd_string = ((((((((((('perl ' + folder_data_before) + '/multi-bleu.perl -lc ') + folder_data) + '/') + dataset) + '.target.tok') + ' < ') + pred_file) + '_tok') + ' > ') + pred_file.replace('txt', 'bleu_data'))
os.system(cmd_string)
try:
bleu_info_data = open(pred_file.replace('txt', 'bleu_data'), 'r').readlines()[0].strip()
except:
bleu_info_data = 'no data'
return bleu_info_data
|
def eval_meteor(ref_file, pred_file):
dir_path = os.path.dirname(os.path.realpath(__file__))
folder_data_before = (dir_path + '/utils')
cmd_string = ((((((('java -jar ' + folder_data_before) + '/meteor-1.5.jar ') + pred_file) + ' ') + ref_file) + ' > ') + pred_file.replace('txt', 'meteor'))
os.system(cmd_string)
meteor_info = open(pred_file.replace('txt', 'meteor'), 'r').readlines()[(- 1)].strip()
return meteor_info
|
def eval_chrf(ref_file, pred_file):
dir_path = os.path.dirname(os.path.realpath(__file__))
folder_data_before = (dir_path + '/utils')
cmd_string = ((((((('python ' + folder_data_before) + '/chrf++.py -H ') + pred_file) + ' -R ') + ref_file) + ' > ') + pred_file.replace('txt', 'chrf'))
os.system(cmd_string)
try:
chrf_info_1 = open(pred_file.replace('txt', 'chrf'), 'r').readlines()[1].strip()
chrf_info_2 = open(pred_file.replace('txt', 'chrf'), 'r').readlines()[2].strip()
chrf_data = ((chrf_info_1 + ' ') + chrf_info_2)
except:
chrf_data = 'no data'
return chrf_data
|
def save_json(content, path, indent=4, **json_dump_kwargs):
with open(path, 'w') as f:
json.dump(content, f, indent=indent, **json_dump_kwargs)
|
def freeze_prefix(model):
params = [p for (n, p) in model.named_parameters() if (not any(((nd in n) for nd in ['CEFR_matrices'])))]
for par in params:
par.requires_grad = False
|
class AbstractSeq2SeqDatasetSingle(Dataset):
def __init__(self, tokenizer, data_dir, max_source_length, max_target_length, type_path='train', n_obs=None, prefix='', **dataset_kwargs):
super().__init__()
self.src_file = Path(data_dir).joinpath((type_path + '.source'))
self.tgt_file = Path(data_dir).joinpath((type_path + '.target'))
self.len_file = Path(data_dir).joinpath((type_path + '.len'))
self.source_file = list(np.load(Path(data_dir).joinpath((type_path + '.source.npy'))))
if os.path.exists(self.len_file):
self.src_lens = pickle_load(self.len_file)
self.used_char_len = False
else:
self.src_lens = self.get_char_lens(self.src_file)
self.used_char_len = True
self.max_source_length = max_source_length
self.max_target_length = max_target_length
assert (min(self.src_lens) > 0), f'found empty line in {self.src_file}'
self.tokenizer = tokenizer
self.prefix = (prefix if (prefix is not None) else '')
if (n_obs is not None):
self.src_lens = self.src_lens[:n_obs]
self.pad_token_id = self.tokenizer.pad_token_id
self.dataset_kwargs = dataset_kwargs
dataset_kwargs.update(({'add_prefix_space': True} if isinstance(self.tokenizer, BartTokenizer) else {}))
def __len__(self):
return len(self.src_lens)
@staticmethod
def get_char_lens(data_file):
return [len(x) for x in Path(data_file).open().readlines()]
@cached_property
def tgt_lens(self):
'Length in characters of target documents'
return self.get_char_lens(self.tgt_file)
def make_sortish_sampler(self, batch_size, distributed=False, shuffle=True, **kwargs):
if distributed:
return DistributedSortishSampler(self, batch_size, shuffle=shuffle, **kwargs)
else:
return SortishSampler(self.src_lens, batch_size, shuffle=shuffle)
def make_dynamic_sampler(self, max_tokens_per_batch=1024, **kwargs):
assert FAIRSEQ_AVAILABLE, 'Dynamic batch size requires `pip install fairseq`'
assert (not self.used_char_len), 'You must call python make_len_file.py before calling make_dynamic_sampler'
sorted_indices = list(self.make_sortish_sampler(1024, shuffle=False))
def num_tokens_in_example(i):
return min(self.src_lens[i], self.max_target_length)
batch_sampler: List[List[int]] = batch_by_size(sorted_indices, num_tokens_fn=num_tokens_in_example, max_tokens=max_tokens_per_batch, required_batch_size_multiple=64)
shuffled_batches = [batch_sampler[i] for i in np.random.permutation(range(len(batch_sampler)))]
approximate_toks_per_batch = [(max((self.src_lens[i] for i in batch)) * len(batch)) for batch in shuffled_batches]
largest_batch_idx = np.argmax(approximate_toks_per_batch)
(shuffled_batches[0], shuffled_batches[largest_batch_idx]) = (shuffled_batches[largest_batch_idx], shuffled_batches[0])
return shuffled_batches
def __getitem__(self, item):
raise NotImplementedError('You must implement this')
def collate_fn(self, batch):
raise NotImplementedError('You must implement this')
|
class Seq2SeqDatasetSingle(AbstractSeq2SeqDatasetSingle):
'A dataset that calls prepare_seq2seq_batch.'
def __getitem__(self, index) -> Dict[(str, str)]:
index = (index + 1)
source_line = (self.prefix + linecache.getline(str(self.src_file), index).rstrip('\n'))
tgt_line = linecache.getline(str(self.tgt_file), index).rstrip('\n')
assert source_line, f'empty source line for index {index}'
assert tgt_line, f'empty tgt line for index {index}'
return {'tgt_texts': tgt_line, 'src_texts': source_line, 'id': (index - 1), 'source': self.source_file[(index - 1)]}
def collate_fn(self, batch):
'Call prepare_seq2seq_batch.'
batch_encoding: Dict[(str, torch.Tensor)] = self.tokenizer.prepare_seq2seq_batch([x['src_texts'] for x in batch], tgt_texts=[x['tgt_texts'] for x in batch], max_length=self.max_source_length, max_target_length=self.max_target_length, return_tensors='pt', **self.dataset_kwargs).data
batch_encoding['ids'] = torch.tensor([x['id'] for x in batch])
batch_encoding['sources'] = torch.tensor([x['source'] for x in batch])
return batch_encoding
|
def count_trainable_parameters(model):
model_parameters = filter((lambda p: p.requires_grad), model.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
return params
|
class Seq2SeqLoggingCallback(pl.Callback):
def on_batch_end(self, trainer, pl_module):
lrs = {f'lr_group_{i}': param['lr'] for (i, param) in enumerate(pl_module.trainer.optimizers[0].param_groups)}
pl_module.logger.log_metrics(lrs)
@rank_zero_only
def _write_logs(self, trainer: pl.Trainer, pl_module: pl.LightningModule, type_path: str, save_generations=True) -> None:
logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****')
metrics = trainer.callback_metrics
trainer.logger.log_metrics({k: v for (k, v) in metrics.items() if (k not in ['log', 'progress_bar', 'preds'])})
od = Path(pl_module.hparams.output_dir)
if (type_path == 'test'):
results_file = (od / 'test_results.txt')
generations_file = (od / 'test_generations.txt')
else:
results_file = (od / f'{type_path}_results/{trainer.global_step:05d}.txt')
generations_file = (od / f'{type_path}_generations/{trainer.global_step:05d}.txt')
results_file.parent.mkdir(exist_ok=True)
generations_file.parent.mkdir(exist_ok=True)
with open(results_file, 'a+') as writer:
for key in sorted(metrics):
if (key in ['log', 'progress_bar', 'preds']):
continue
val = metrics[key]
if isinstance(val, torch.Tensor):
val = val.item()
msg = f'''{key}: {val:.6f}
'''
writer.write(msg)
if (not save_generations):
return
if ('preds' in metrics):
content = '\n'.join(metrics['preds'])
generations_file.open('w+').write(content)
@rank_zero_only
def on_train_start(self, trainer, pl_module):
try:
npars = pl_module.model.model.num_parameters()
except AttributeError:
npars = pl_module.model.num_parameters()
n_trainable_pars = count_trainable_parameters(pl_module)
trainer.logger.log_metrics({'n_params': npars, 'mp': (npars / 1000000.0), 'grad_mp': (n_trainable_pars / 1000000.0)})
@rank_zero_only
def on_test_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule):
save_json(pl_module.metrics, pl_module.metrics_save_path)
return self._write_logs(trainer, pl_module, 'test')
@rank_zero_only
def on_validation_end(self, trainer: pl.Trainer, pl_module):
save_json(pl_module.metrics, pl_module.metrics_save_path)
|
def bespoke_scheduler(optimizer, num_warmup_steps, num_training_steps, last_epoch=(- 1)):
'\n Create a schedule with a learning rate that decreases linearly from the initial lr set in the optimizer to 0, after\n a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer.\n\n Args:\n optimizer (:class:`~torch.optim.Optimizer`):\n The optimizer for which to schedule the learning rate.\n num_warmup_steps (:obj:`int`):\n The number of steps for the warmup phase.\n num_training_steps (:obj:`int`):\n The total number of training steps.\n last_epoch (:obj:`int`, `optional`, defaults to -1):\n The index of the last epoch when resuming training.\n\n Return:\n :obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.\n '
def lr_lambda(current_step: int):
if (current_step < num_warmup_steps):
return (float(current_step) / float(max(1, num_warmup_steps)))
return max(0.0, (float((num_training_steps - current_step)) / float(max(1, (num_training_steps - num_warmup_steps)))))
def lr_lambda2(current_step: int):
if (current_step < num_warmup_steps):
return (float(current_step) / float(max(1, num_warmup_steps)))
return max(0.0, (float(((num_training_steps * 3) - current_step)) / float(max(1, ((num_training_steps * 3) - num_warmup_steps)))))
return LambdaLR(optimizer, [lr_lambda, lr_lambda2], last_epoch)
|
class PrefixSummarizationModule(PrefixTransformer):
mode = 'summarization'
loss_names = ['loss']
metric_names = ROUGE_KEYS
default_val_metric = 'rouge2'
def __init__(self, hparams, **kwargs):
if (hparams.sortish_sampler and (hparams.gpus > 1)):
hparams.replace_sampler_ddp = False
elif (hparams.max_tokens_per_batch is not None):
if (hparams.gpus > 1):
raise NotImplementedError('Dynamic Batch size does not work for multi-gpu training')
if hparams.sortish_sampler:
raise ValueError('--sortish_sampler and --max_tokens_per_batch may not be used simultaneously')
super().__init__(hparams, num_labels=None, mode=self.mode, **kwargs)
use_task_specific_params(self.model, 'summarization')
self.metrics_save_path = (Path(self.output_dir) / 'metrics.json')
self.hparams_save_path = (Path(self.output_dir) / 'hparams.pkl')
pickle_save(self.hparams, self.hparams_save_path)
self.step_count = 0
self.metrics = defaultdict(list)
self.model_type = self.config.model_type
self.vocab_size = (self.config.tgt_vocab_size if (self.model_type == 'fsmt') else self.config.vocab_size)
self.val_dir = self.hparams.output_dir
self.dataset_kwargs: dict = dict(data_dir=self.hparams.data_dir, max_source_length=self.hparams.max_source_length, prefix=(self.model.config.prefix or ''))
n_observations_per_split = {'train': self.hparams.n_train, 'val': self.hparams.n_val, 'test': self.hparams.n_test}
self.n_obs = {k: (v if (v >= 0) else None) for (k, v) in n_observations_per_split.items()}
self.target_lens = {'train': self.hparams.max_target_length, 'val': self.hparams.val_max_target_length, 'test': self.hparams.test_max_target_length}
assert (self.target_lens['train'] <= self.target_lens['val']), f'target_lens: {self.target_lens}'
assert (self.target_lens['train'] <= self.target_lens['test']), f'target_lens: {self.target_lens}'
if (not self.hparams.finetune):
freeze_params(self.seq2seq_model)
assert_all_frozen(self.seq2seq_model)
print('FREEZING ENTIRE seq2seq model.')
else:
print('FINE-TUNING')
self.freeze_embeds()
self.num_workers = hparams.num_workers
self.decoder_start_token_id = None
if ((self.model.config.decoder_start_token_id is None) and isinstance(self.tokenizer, MBartTokenizer)):
self.decoder_start_token_id = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
self.model.config.decoder_start_token_id = self.decoder_start_token_id
self.dataset_class = (Seq2SeqDataset if hasattr(self.tokenizer, 'prepare_seq2seq_batch') else LegacySeq2SeqDataset)
self.eval_beams = (self.model.config.num_beams if (self.hparams.eval_beams is None) else self.hparams.eval_beams)
assert (self.eval_beams >= 1), f'got self.eval_beams={self.eval_beams}. Need an integer > 1'
if (self.hparams.eval_max_gen_length is not None):
self.eval_max_length = self.hparams.eval_max_gen_length
else:
self.eval_max_length = self.model.config.max_length
self.val_metric = (self.default_val_metric if (self.hparams.val_metric is None) else self.hparams.val_metric)
self.training_acc_across_batches_at_curr_epoch = []
self.eval_max_length = 60
self.eval_min_length = 10
self.eval_beams = 6
print('for decoding, eval_max_length={}, eval_min_length={}, eval_beams={}'.format(self.eval_max_length, self.eval_min_length, self.eval_beams))
def freeze_embeds(self):
'Freeze token embeddings and positional embeddings for bart, just token embeddings for t5.'
freeze_params(self.seq2seq_model.model.shared)
for d in [self.seq2seq_model.model.encoder, self.seq2seq_model.model.decoder]:
freeze_params(d.embed_positions)
freeze_params(d.embed_tokens)
def forward(self, input_ids, **kwargs):
return self.model(input_ids, frozen_model=self.seq2seq_model, **kwargs)
def ids_to_clean_text(self, generated_ids: List[int]):
gen_text = self.tokenizer.batch_decode(generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True)
return lmap(str.strip, gen_text)
def _step(self, batch: dict) -> Tuple:
pad_token_id = self.tokenizer.pad_token_id
(src_ids, src_mask) = (batch['input_ids'], batch['attention_mask'])
tgt_ids = batch['labels']
if isinstance(self.model, T5ForConditionalGeneration):
decoder_input_ids = self.model._shift_right(tgt_ids)
else:
decoder_input_ids = shift_tokens_right(tgt_ids, pad_token_id)
outputs = self(src_ids, attention_mask=src_mask, decoder_input_ids=decoder_input_ids, use_cache=False, use_prefix=True, conditional_info={'cats': batch['cats'], 'sport': batch['sport']})
lm_logits = outputs[0]
if (self.hparams.label_smoothing == 0):
ce_loss_fct = torch.nn.CrossEntropyLoss(ignore_index=pad_token_id)
assert (lm_logits.shape[(- 1)] == self.vocab_size)
loss = ce_loss_fct(lm_logits.view((- 1), lm_logits.shape[(- 1)]), tgt_ids.view((- 1)))
else:
lprobs = torch.nn.functional.log_softmax(lm_logits, dim=(- 1))
(loss, nll_loss) = label_smoothed_nll_loss(lprobs, tgt_ids, self.hparams.label_smoothing, ignore_index=pad_token_id)
return (loss,)
@property
def pad(self) -> int:
return self.tokenizer.pad_token_id
def training_step(self, batch, batch_idx) -> Dict:
loss_tensors = self._step(batch)
logs = {name: loss for (name, loss) in zip(self.loss_names, loss_tensors)}
logs['tpb'] = (batch['input_ids'].ne(self.pad).sum() + batch['labels'].ne(self.pad).sum())
logs['bs'] = batch['input_ids'].shape[0]
logs['src_pad_tok'] = batch['input_ids'].eq(self.pad).sum()
logs['src_pad_frac'] = batch['input_ids'].eq(self.pad).float().mean()
self.training_acc_across_batches_at_curr_epoch.append(loss_tensors[0].item())
self.log_dict(logs)
loss = loss_tensors[0]
return {'loss': loss}
def on_epoch_end(self):
train_acc_mean = np.mean(self.training_acc_across_batches_at_curr_epoch)
self.log_dict({'train_loss': train_acc_mean})
print('train_loss = {}'.format(train_acc_mean))
self.training_acc_across_batches_per_epoch = []
def validation_step(self, batch, batch_idx) -> Dict:
if (self.current_epoch < 1):
return 1
if self.hparams.skip_val:
return 1
if self.hparams.hf_checkpoint:
save_path = Path(self.hparams.save_hf)
save_path = save_path.joinpath('checkpoint-curr_best')
self.model.config.save_step = self.step_count
self.model.save_pretrained(save_path)
self.tokenizer.save_pretrained(save_path)
print('SAVING TO checkpoint {}'.format(save_path))
raise ValueError('just_saving')
return self._generative_step(batch)
def validation_epoch_end(self, outputs, prefix='val') -> Dict:
if (self.current_epoch < 1):
logg = 0.1
self.log('val_rouge2', logg)
return 1
if self.hparams.skip_val:
logg = 0.1
self.log('val_rouge2', logg)
return 1
self.step_count += 1
losses = {k: torch.stack([x[k] for x in outputs]).mean() for k in self.loss_names}
loss = losses['loss']
generative_metrics = {k: np.array([x[k] for x in outputs]).mean() for k in (self.metric_names + ['gen_time', 'gen_len'])}
metric_val = (generative_metrics[self.val_metric] if (self.val_metric in generative_metrics) else losses[self.val_metric])
metric_tensor: torch.FloatTensor = torch.tensor(metric_val).type_as(loss)
print('ROUGE2', metric_tensor)
print('VAL_LOSS', loss)
generative_metrics.update({k: v.item() for (k, v) in losses.items()})
losses.update(generative_metrics)
all_metrics = {f'{prefix}_avg_{k}': x for (k, x) in losses.items()}
all_metrics['step_count'] = self.step_count
self.metrics[prefix].append(all_metrics)
self.log_dict({'log': all_metrics, f'{prefix}_loss': loss, f'{prefix}_{self.val_metric}': metric_tensor})
preds = flatten_list([x['preds'] for x in outputs])
return {'log': all_metrics, 'preds': preds, f'{prefix}_loss': loss, f'{prefix}_{self.val_metric}': metric_tensor}
def calc_generative_metrics(self, preds, target) -> Dict:
return calculate_rouge(preds, target)
def _generative_step(self, batch: dict) -> dict:
t0 = time.time()
bsz = batch['input_ids'].size(0)
prefix_prompt = self.model.get_prompt(bsz=bsz, sample_size=self.eval_beams, conditional_info={'cats': batch['cats'], 'sport': batch['sport']})
generated_ids = self.seq2seq_model.generate(batch['input_ids'], past_key_values=prefix_prompt, attention_mask=batch['attention_mask'], use_cache=True, length_penalty=self.hparams.length_penalty, use_prefix=True, decoder_start_token_id=self.decoder_start_token_id, num_beams=self.eval_beams, min_length=self.eval_min_length, max_length=self.eval_max_length, no_repeat_ngram_size=3)
gen_time = ((time.time() - t0) / batch['input_ids'].shape[0])
preds: List[str] = self.ids_to_clean_text(generated_ids)
target: List[str] = self.ids_to_clean_text(batch['labels'])
loss_tensors = self._step(batch)
base_metrics = {name: loss for (name, loss) in zip(self.loss_names, loss_tensors)}
rouge: Dict = self.calc_generative_metrics(preds, target)
summ_len = np.mean(lmap(len, generated_ids))
base_metrics.update(gen_time=gen_time, gen_len=summ_len, preds=preds, target=target, **rouge)
return base_metrics
def test_step(self, batch, batch_idx):
return self._generative_step(batch)
def test_epoch_end(self, outputs):
return self.validation_epoch_end(outputs, prefix='test')
def get_dataset(self, type_path) -> Seq2SeqDataset:
n_obs = self.n_obs[type_path]
max_target_length = self.target_lens[type_path]
dataset = self.dataset_class(self.tokenizer, type_path=type_path, n_obs=n_obs, max_target_length=max_target_length, **self.dataset_kwargs)
return dataset
def get_dataloader(self, type_path: str, batch_size: int, shuffle: bool=False) -> DataLoader:
dataset = self.get_dataset(type_path)
if (self.hparams.sortish_sampler and (type_path != 'test')):
sampler = dataset.make_sortish_sampler(batch_size, distributed=(self.hparams.gpus > 1))
return DataLoader(dataset, batch_size=batch_size, collate_fn=dataset.collate_fn, shuffle=False, num_workers=self.num_workers, sampler=sampler)
elif ((self.hparams.max_tokens_per_batch is not None) and (type_path != 'test')):
batch_sampler = dataset.make_dynamic_sampler(self.hparams.max_tokens_per_batch, distributed=(self.hparams.gpus > 1))
return DataLoader(dataset, batch_sampler=batch_sampler, collate_fn=dataset.collate_fn, num_workers=self.num_workers)
else:
return DataLoader(dataset, batch_size=batch_size, collate_fn=dataset.collate_fn, shuffle=shuffle, num_workers=self.num_workers, sampler=None)
def train_dataloader(self) -> DataLoader:
dataloader = self.get_dataloader('train', batch_size=self.hparams.train_batch_size, shuffle=True)
return dataloader
def val_dataloader(self) -> DataLoader:
return self.get_dataloader('val', batch_size=self.hparams.eval_batch_size)
def test_dataloader(self) -> DataLoader:
return self.get_dataloader('test', batch_size=self.hparams.eval_batch_size)
@staticmethod
def add_model_specific_args(parser, root_dir):
PrefixTransformer.add_model_specific_args(parser, root_dir)
add_generic_args(parser, root_dir)
parser.add_argument('--max_source_length', default=512, type=int, help='The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.')
parser.add_argument('--max_target_length', default=60, type=int, help='The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.')
parser.add_argument('--val_dir', default='', type=str, help='The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.')
parser.add_argument('--val_max_target_length', default=60, type=int, help='The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.')
parser.add_argument('--test_max_target_length', default=100, type=int, help='The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.')
parser.add_argument('--freeze_encoder', action='store_true')
parser.add_argument('--freeze_embeds', action='store_true')
parser.add_argument('--sortish_sampler', action='store_true', default=False)
parser.add_argument('--max_tokens_per_batch', type=int, default=None)
parser.add_argument('--logger_name', type=str, choices=['default', 'wandb', 'wandb_shared'], default='default')
parser.add_argument('--n_train', type=int, default=(- 1), required=False, help='# examples. -1 means use all.')
parser.add_argument('--n_val', type=int, default=(- 1), required=False, help='# examples. -1 means use all.')
parser.add_argument('--n_test', type=int, default=(- 1), required=False, help='# examples. -1 means use all.')
parser.add_argument('--task_mode', type=str, default='summarization', required=False, help='# examples. -1 means use all.')
parser.add_argument('--label_smoothing', type=float, default=0.0, required=False)
parser.add_argument('--src_lang', type=str, default='', required=False)
parser.add_argument('--save_hf', type=str, default='', required=False)
parser.add_argument('--tgt_lang', type=str, default='', required=False)
parser.add_argument('--eval_beams', type=int, default=6, required=False)
parser.add_argument('--eval_min_length', type=int, default=10, required=False)
parser.add_argument('--skip_val', type=bool, default=False, required=False)
parser.add_argument('--val_metric', type=str, default=None, required=False)
parser.add_argument('--eval_max_gen_length', type=int, default=60, help='never generate more than n tokens')
parser.add_argument('--length_penalty', type=float, default=1.0, help='never generate more than n tokens')
parser.add_argument('--save_top_k', type=int, default=1, required=False, help='How many checkpoints to save')
parser.add_argument('--wb_project', type=str, default='')
parser.add_argument('--finetune', type=bool, default=False)
parser.add_argument('--git', type=bool, default=True)
parser.add_argument('--dev', type=bool, default=False)
parser.add_argument('--freeze_base', type=bool, default=False)
parser.add_argument('--wb_name', type=str, default='')
parser.add_argument('--id', type=str, default='')
parser.add_argument('--early_stopping_patience', type=int, default=(- 1), required=False, help='-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So val_check_interval will effect it.')
return parser
|
def eval(args, model=None):
if (model is None):
if ('summarization' in args.task_mode):
if (args.tuning_mode == 'prefixtune'):
model = PrefixSummarizationModule(args)
print('the length penalty is {}'.format(args.length_penalty))
with torch.no_grad():
model.eval()
model = model.cuda()
data_loader = model.test_dataloader()
print('DATALOADER_LEN', len(data_loader))
out_lst = []
for (batch_idx, batch) in enumerate(data_loader):
batch = model.transfer_batch_to_device(batch, model.device)
out = model.test_step(batch, batch_idx)
out_lst.append(out)
if ((batch_idx % 50) == 0):
print(model.test_epoch_end(out_lst))
print(out['preds'])
result = model.test_epoch_end(out_lst)
for (k, v) in result.items():
if (k != 'preds'):
print('FINAL_RESULTS')
print(k, v)
out_path = os.path.join(args.output_dir, 'test_beam_{}'.format(args.length_penalty))
print('writing the test results to ', out_path)
with open(out_path, 'w') as f:
for preds in result['preds']:
print(preds, file=f)
|
def main(args, model=None):
Path(args.output_dir).mkdir(exist_ok=True)
if (model is None):
if ('summarization' in args.task_mode):
if (args.tuning_mode == 'prefixtune'):
model = PrefixSummarizationModule(args)
pickle_save(args, os.path.join(args.output_dir, 'args.pkl'))
pickle_save(model.hparams, (model.output_dir / 'hparams.pkl'))
dataset = Path(args.data_dir).name
print(dataset)
if ((args.logger_name == 'default') or args.fast_dev_run or str(args.output_dir).startswith('/tmp') or str(args.output_dir).startswith('/var')):
logger = True
elif (args.logger_name == 'wandb'):
from pytorch_lightning.loggers import WandbLogger
if (args.id is not None):
id_ = args.id
else:
id_ = wandb.util.generate_id()
print('ID', id_)
logger = WandbLogger(id=id_, name=args.wb_name, project=args.wb_project, entity='jordiclive')
if (args.early_stopping_patience >= 0):
es_callback = get_early_stopping_callback(model.val_metric, args.early_stopping_patience)
else:
es_callback = False
trainer: pl.Trainer = generic_train(model, args, logging_callback=Seq2SeqLoggingCallback(), early_stopping_callback=es_callback, logger=logger)
pickle_save(model.hparams, (model.output_dir / 'hparams.pkl'))
return model
|
class PrefixTransformer(pl.LightningModule):
def __init__(self, hparams: argparse.Namespace, num_labels=None, mode='base', config=None, tokenizer=None, seq2seq_model=None, **config_kwargs):
'Initialize a model, tokenizer and config.'
super().__init__()
self.save_hyperparameters(hparams)
self.step_count = 0
self.output_dir = Path(self.hparams.output_dir)
cache_dir = (self.hparams.cache_dir if self.hparams.cache_dir else None)
print('the cache dir is {}'.format(cache_dir))
if (config is None):
self.config = AutoConfig.from_pretrained((self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path), **({'num_labels': num_labels} if (num_labels is not None) else {}), cache_dir=cache_dir, **config_kwargs)
else:
self.config: PretrainedConfig = config
extra_model_params = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(self.hparams, p, None):
assert hasattr(self.config, p), f"model config doesn't have a `{p}` attribute"
setattr(self.config, p, getattr(self.hparams, p))
if (tokenizer is None):
self.tokenizer = AutoTokenizer.from_pretrained((self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path), cache_dir=cache_dir)
else:
self.tokenizer: PreTrainedTokenizer = tokenizer
self.config.preseqlen = 204
self.config.use_prefix = True
self.seq2seq_model_type = AutoModel
if (seq2seq_model is None):
self.seq2seq_model = BartForConditionalGeneration.from_pretrained(self.hparams.model_name_or_path, from_tf=bool(('.ckpt' in self.hparams.model_name_or_path)), config=self.config, cache_dir=cache_dir)
else:
self.seq2seq_model = seq2seq_model
config_prefix = AutoConfig.from_pretrained(self.hparams.model_name_or_path, cache_dir=cache_dir)
self.model_type = config_prefix.model_type
if (self.hparams.optim_prefix == 'yes'):
optim_prefix_bool = True
elif (self.hparams.optim_prefix == 'no'):
optim_prefix_bool = False
else:
assert False, 'model_args.optim_prefix should be either yes or no'
print(self.model_type)
config_prefix._my_arg_tune_mode = self.hparams.tuning_mode
config_prefix._my_arg_task_mode = self.hparams.task_mode
config_prefix._my_arg_control = True
config_prefix.train_weights = False
config_prefix.optim_prefix = optim_prefix_bool
config_prefix.preseqlen = self.hparams.preseqlen
config_prefix.use_infix = (self.hparams.format_mode == 'infix')
config_prefix.format_mode = self.hparams.format_mode
config_prefix.prefix_dropout = self.hparams.prefix_dropout
config_prefix.vocab_size = len(self.tokenizer)
config_prefix.lowdata = ('lowdata' in self.hparams.output_dir)
if (config_prefix.lowdata and (self.hparams.use_lowdata_token == 'yes')):
config_prefix.lowdata_token = self.tokenizer([self.hparams.lowdata_token], add_prefix_space=True)['input_ids']
print(self.hparams.lowdata_token)
print(config_prefix.lowdata_token)
print(self.tokenizer.pad_token_id)
config_prefix.mid_dim = self.hparams.mid_dim
if (self.hparams.prefixModel_name_or_path is not None):
print('LOADING FROM {}'.format(hparams.prefixModel_name_or_path))
self.model = PrefixTuning.from_pretrained(self.hparams.prefixModel_name_or_path, from_tf=bool(('.ckpt' in self.hparams.prefixModel_name_or_path)), cache_dir=cache_dir, config=config_prefix)
else:
self.model = PrefixTuning(config_prefix)
def load_hf_checkpoint(self, *args, **kwargs):
assert False, 'why need to load model here?'
self.model = self.model_type.from_pretrained(*args, **kwargs)
def get_lr_scheduler(self):
get_schedule_func = arg_to_scheduler[self.hparams.lr_scheduler]
scheduler = get_schedule_func(self.opt, num_warmup_steps=self.hparams.warmup_steps, num_training_steps=self.total_steps)
scheduler = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1}
return scheduler
def configure_optimizers(self):
'Prepare optimizer and schedule (linear warmup and decay)'
model = self.model
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [{'params': [p for (n, p) in model.named_parameters() if (not any(((nd in n) for nd in no_decay)))], 'weight_decay': self.hparams.weight_decay}, {'params': [p for (n, p) in model.named_parameters() if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0}]
if self.hparams.adafactor:
optimizer = Adafactor(optimizer_grouped_parameters, lr=self.hparams.learning_rate, scale_parameter=False, relative_step=False)
else:
optimizer = AdamW(optimizer_grouped_parameters, lr=self.hparams.learning_rate, eps=self.hparams.adam_epsilon)
self.opt = optimizer
scheduler = self.get_lr_scheduler()
return ([optimizer], [scheduler])
def test_step(self, batch, batch_nb):
return self.validation_step(batch, batch_nb)
def test_epoch_end(self, outputs):
return self.validation_end(outputs)
@property
def total_steps(self) -> int:
'The number of total training steps that will be run. Used for lr scheduler purposes.'
num_devices = max(1, self.hparams.gpus)
if (self.hparams.original_batch_size is not None):
effective_batch_size = ((self.hparams.original_batch_size * self.hparams.accumulate_grad_batches) * num_devices)
else:
effective_batch_size = ((self.hparams.train_batch_size * self.hparams.accumulate_grad_batches) * num_devices)
dataset_size = len(self.train_loader.dataset)
return ((dataset_size / effective_batch_size) * self.hparams.max_epochs)
def setup(self, mode):
if (mode == 'fit'):
self.train_loader = self.get_dataloader('train', self.hparams.train_batch_size, shuffle=True)
def get_dataloader(self, type_path, batch_size, shuffle=False):
raise NotImplementedError('You must implement this for your task')
def train_dataloader(self):
return self.train_loader
def val_dataloader(self):
return self.get_dataloader('dev', self.hparams.eval_batch_size, shuffle=False)
def test_dataloader(self):
return self.get_dataloader('test', self.hparams.eval_batch_size, shuffle=False)
def _feature_file(self, mode):
return os.path.join(self.hparams.data_dir, 'cached_{}_{}_{}'.format(mode, list(filter(None, self.hparams.model_name_or_path.split('/'))).pop(), str(self.hparams.max_seq_length)))
@pl.utilities.rank_zero_only
def save_checkpoint(self, trainer) -> None:
print('Saving the the checkpoint.')
return
@pl.utilities.rank_zero_only
def on_save_checkpoint(self, checkpoint: Dict[(str, Any)], filepath=None) -> None:
save_path = self.output_dir.joinpath('checkpoint-curr_best')
self.model.config.save_step = self.step_count
self.model.save_pretrained(save_path)
self.tokenizer.save_pretrained(save_path)
print('SAVING TO checkpoint {}'.format(save_path))
@staticmethod
def add_model_specific_args(parser, root_dir):
parser.add_argument('--model_name_or_path', default='facebook/bart-large', type=str, required=False, help='Path to pretrained model or model identifier from huggingface.co/models')
parser.add_argument('--prefixModel_name_or_path', default=None, type=str, help='Path to pretrained prefix model or model identifier from huggingface.co/models')
parser.add_argument('--prefix_mode', default='activation', type=str, help='embedding or activation')
parser.add_argument('--preseqlen', default=200, type=int, help='the length of the prefix.')
parser.add_argument('--optim_prefix', default='yes', type=str, help='use the task specific optimization of the prefix.')
parser.add_argument('--different_scheduler', default=False, type=bool, help='.')
parser.add_argument('--tuning_mode', default='prefixtune', type=str, help='Could be prefixtune or finetune')
parser.add_argument('--prefix_dropout', default=0.0, type=float, help='the dropout rate for our prefix model.')
parser.add_argument('--use_dropout', default='no', type=str, help='whether to dropout the main model during training. ')
parser.add_argument('--mid_dim', default=800, type=int, help='the dimension of the intermediate layer.')
parser.add_argument('--cefr_mid_dim', default=100, type=int, help='the dimension of the intermediate layer.')
parser.add_argument('--cefr_length', default=2, type=int, help='the dimension of the intermediate layer.')
parser.add_argument('--CEFR_single_reparam', type=bool, default=True)
parser.add_argument('--same_CEFR_intialization', type=bool, default=False)
parser.add_argument('--format_mode', default='cat', type=str, help='whether to look at the input again, including [infix, cat, peek, nopeek]')
parser.add_argument('--use_lowdata_token', default='yes', type=str, help='whether or not to use the lowdata token, ')
parser.add_argument('--lowdata_token', default='summarize', type=str, help='the low data token to use. ')
parser.add_argument('--config_name', default='', type=str, help='Pretrained config name or path if not the same as model_name')
parser.add_argument('--tokenizer_name', default=None, type=str, help='Pretrained tokenizer name or path if not the same as model_name')
parser.add_argument('--cache_dir', default='/content/gdrive/MyDrive/cache_dir', type=str, help='Where do you want to store the pre-trained models downloaded from s3')
parser.add_argument('--encoder_layerdrop', type=float, help='Encoder layer dropout probability (Optional). Goes into model.config')
parser.add_argument('--decoder_layerdrop', type=float, help='Decoder layer dropout probability (Optional). Goes into model.config')
parser.add_argument('--dropout', type=float, help='Dropout probability (Optional). Goes into model.config')
parser.add_argument('--attention_dropout', type=float, help='Attention dropout probability (Optional). Goes into model.config')
parser.add_argument('--learning_rate', default=5e-05, type=float, help='The initial learning rate for Adam.')
parser.add_argument('--lr_scheduler', default='linear', choices=arg_to_scheduler_choices, metavar=arg_to_scheduler_metavar, type=str, help='Learning rate scheduler')
parser.add_argument('--weight_decay', default=0.0, type=float, help='Weight decay if we apply some.')
parser.add_argument('--adam_epsilon', default=1e-08, type=float, help='Epsilon for Adam optimizer.')
parser.add_argument('--warmup_steps', default=0, type=int, help='Linear warmup over warmup_steps.')
parser.add_argument('--num_workers', default=4, type=int, help='kwarg passed to DataLoader')
parser.add_argument('--num_train_epochs', dest='max_epochs', default=30, type=int)
parser.add_argument('--original_batch_size', default=None, type=int)
parser.add_argument('--hf_checkpoint', default=False, type=bool)
parser.add_argument('--train_batch_size', default=8, type=int)
parser.add_argument('--eval_batch_size', default=6, type=int)
parser.add_argument('--adafactor', action='store_true')
|
def add_generic_args(parser, root_dir) -> None:
parser.add_argument('--output_dir', default=None, type=str, required=True, help='The output directory where the model predictions and checkpoints will be written.')
parser.add_argument('--n_tpu_cores', dest='tpu_cores', type=int)
parser.add_argument('--max_grad_norm', dest='gradient_clip_val', default=1.0, type=float, help='Max gradient norm')
parser.add_argument('--do_train', default=True, action='store_true', help='Whether to run training.')
parser.add_argument('--do_predict', default=False, type=bool, help='Whether to run training.')
parser.add_argument('--gradient_accumulation_steps', dest='accumulate_grad_batches', type=int, default=1, help='Number of updates steps to accumulate before performing a backward/update pass.')
parser.add_argument('--seed', type=int, default=101, help='random seed for initialization')
parser.add_argument('--data_dir', default=None, type=str, required=True, help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.')
|
def generic_train(model, args: argparse.Namespace, early_stopping_callback=False, logger=True, extra_callbacks=[], checkpoint_callback=None, logging_callback=None, **extra_train_kwargs):
pl.seed_everything(args.seed)
odir = Path(model.hparams.output_dir)
odir.mkdir(exist_ok=True)
checkpoint_callback = pl.callbacks.ModelCheckpoint(dirpath=args.output_dir, monitor=('val_' + args.val_metric), mode='max', save_top_k=args.save_top_k, save_last=True)
if (early_stopping_callback is not False):
extra_callbacks.append(early_stopping_callback)
print('the max number of epochs is {}'.format(args.max_epochs))
print('early stopping', early_stopping_callback)
print('checkpoint_callback', checkpoint_callback)
print('logging', logging_callback)
trainer = pl.Trainer.from_argparse_args(args, max_epochs=args.max_epochs, weights_summary=None, callbacks=([logging_callback] + extra_callbacks), logger=logger, checkpoint_callback=checkpoint_callback)
print('args.do_train:', args.do_train)
if args.do_train:
trainer.fit(model)
return trainer
|
def run_experiment(yaml_file):
with open(yaml_file, 'r') as stream:
parsed_yaml = yaml.safe_load(stream)
args = ''
for (arg, value) in parsed_yaml.items():
args += f'--{arg} {value} '
os.system(f'python finetune.py {args}')
|
def count_trainable_parameters(model):
model_parameters = filter((lambda p: p.requires_grad), model.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
return params
|
class Seq2SeqLoggingCallback(pl.Callback):
def on_batch_end(self, trainer, pl_module):
lrs = {f'lr_group_{i}': param['lr'] for (i, param) in enumerate(pl_module.trainer.optimizers[0].param_groups)}
pl_module.logger.log_metrics(lrs)
@rank_zero_only
def _write_logs(self, trainer: pl.Trainer, pl_module: pl.LightningModule, type_path: str, save_generations=True) -> None:
logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****')
metrics = trainer.callback_metrics
trainer.logger.log_metrics({k: v for (k, v) in metrics.items() if (k not in ['log', 'progress_bar', 'preds'])})
od = Path(pl_module.hparams.output_dir)
if (type_path == 'test'):
results_file = (od / 'test_results.txt')
generations_file = (od / 'test_generations.txt')
else:
results_file = (od / f'{type_path}_results/{trainer.global_step:05d}.txt')
generations_file = (od / f'{type_path}_generations/{trainer.global_step:05d}.txt')
results_file.parent.mkdir(exist_ok=True)
generations_file.parent.mkdir(exist_ok=True)
with open(results_file, 'a+') as writer:
for key in sorted(metrics):
if (key in ['log', 'progress_bar', 'preds']):
continue
val = metrics[key]
if isinstance(val, torch.Tensor):
val = val.item()
msg = f'''{key}: {val:.6f}
'''
writer.write(msg)
if (not save_generations):
return
if ('preds' in metrics):
content = '\n'.join(metrics['preds'])
generations_file.open('w+').write(content)
@rank_zero_only
def on_train_start(self, trainer, pl_module):
try:
npars = pl_module.model.model.num_parameters()
except AttributeError:
npars = pl_module.model.num_parameters()
n_trainable_pars = count_trainable_parameters(pl_module)
trainer.logger.log_metrics({'n_params': npars, 'mp': (npars / 1000000.0), 'grad_mp': (n_trainable_pars / 1000000.0)})
@rank_zero_only
def on_test_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule):
save_json(pl_module.metrics, pl_module.metrics_save_path)
return self._write_logs(trainer, pl_module, 'test')
@rank_zero_only
def on_validation_end(self, trainer: pl.Trainer, pl_module):
save_json(pl_module.metrics, pl_module.metrics_save_path)
|
def bespoke_scheduler(optimizer, num_warmup_steps, num_training_steps, last_epoch=(- 1)):
'\n Create a schedule with a learning rate that decreases linearly from the initial lr set in the optimizer to 0, after\n a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer.\n\n Args:\n optimizer (:class:`~torch.optim.Optimizer`):\n The optimizer for which to schedule the learning rate.\n num_warmup_steps (:obj:`int`):\n The number of steps for the warmup phase.\n num_training_steps (:obj:`int`):\n The total number of training steps.\n last_epoch (:obj:`int`, `optional`, defaults to -1):\n The index of the last epoch when resuming training.\n\n Return:\n :obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.\n '
def lr_lambda(current_step: int):
if (current_step < num_warmup_steps):
return (float(current_step) / float(max(1, num_warmup_steps)))
return max(0.0, (float((num_training_steps - current_step)) / float(max(1, (num_training_steps - num_warmup_steps)))))
def lr_lambda2(current_step: int):
if (current_step < num_warmup_steps):
return (float(current_step) / float(max(1, num_warmup_steps)))
return max(0.0, (float(((num_training_steps * 3) - current_step)) / float(max(1, ((num_training_steps * 3) - num_warmup_steps)))))
return LambdaLR(optimizer, [lr_lambda, lr_lambda2], last_epoch)
|
class PrefixSummarizationModule(PrefixTransformer):
mode = 'summarization'
loss_names = ['loss']
metric_names = ROUGE_KEYS
default_val_metric = 'rouge2'
def __init__(self, hparams, **kwargs):
if (hparams.sortish_sampler and (hparams.gpus > 1)):
hparams.replace_sampler_ddp = False
elif (hparams.max_tokens_per_batch is not None):
if (hparams.gpus > 1):
raise NotImplementedError('Dynamic Batch size does not work for multi-gpu training')
if hparams.sortish_sampler:
raise ValueError('--sortish_sampler and --max_tokens_per_batch may not be used simultaneously')
super().__init__(hparams, num_labels=None, mode=self.mode, **kwargs)
use_task_specific_params(self.model, 'summarization')
self.metrics_save_path = (Path(self.output_dir) / 'metrics.json')
self.hparams_save_path = (Path(self.output_dir) / 'hparams.pkl')
pickle_save(self.hparams, self.hparams_save_path)
self.step_count = 0
self.metrics = defaultdict(list)
self.model_type = self.config.model_type
self.vocab_size = (self.config.tgt_vocab_size if (self.model_type == 'fsmt') else self.config.vocab_size)
self.val_dir = self.hparams.output_dir
self.dataset_kwargs: dict = dict(data_dir=self.hparams.data_dir, max_source_length=self.hparams.max_source_length, prefix=(self.model.config.prefix or ''))
n_observations_per_split = {'train': self.hparams.n_train, 'val': self.hparams.n_val, 'test': self.hparams.n_test}
self.n_obs = {k: (v if (v >= 0) else None) for (k, v) in n_observations_per_split.items()}
self.target_lens = {'train': self.hparams.max_target_length, 'val': self.hparams.val_max_target_length, 'test': self.hparams.test_max_target_length}
assert (self.target_lens['train'] <= self.target_lens['val']), f'target_lens: {self.target_lens}'
assert (self.target_lens['train'] <= self.target_lens['test']), f'target_lens: {self.target_lens}'
if (not self.hparams.finetune):
freeze_params(self.seq2seq_model)
assert_all_frozen(self.seq2seq_model)
print('FREEZING ENTIRE seq2seq model.')
else:
print('FINE-TUNING')
self.freeze_embeds()
self.num_workers = hparams.num_workers
self.decoder_start_token_id = None
if ((self.model.config.decoder_start_token_id is None) and isinstance(self.tokenizer, MBartTokenizer)):
self.decoder_start_token_id = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
self.model.config.decoder_start_token_id = self.decoder_start_token_id
self.dataset_class = (Seq2SeqDataset if hasattr(self.tokenizer, 'prepare_seq2seq_batch') else LegacySeq2SeqDataset)
self.eval_beams = (self.model.config.num_beams if (self.hparams.eval_beams is None) else self.hparams.eval_beams)
assert (self.eval_beams >= 1), f'got self.eval_beams={self.eval_beams}. Need an integer > 1'
if (self.hparams.eval_max_gen_length is not None):
self.eval_max_length = self.hparams.eval_max_gen_length
else:
self.eval_max_length = self.model.config.max_length
self.val_metric = (self.default_val_metric if (self.hparams.val_metric is None) else self.hparams.val_metric)
self.training_acc_across_batches_at_curr_epoch = []
self.eval_max_length = 60
self.eval_min_length = 10
self.eval_beams = 6
print('for decoding, eval_max_length={}, eval_min_length={}, eval_beams={}'.format(self.eval_max_length, self.eval_min_length, self.eval_beams))
def freeze_embeds(self):
'Freeze token embeddings and positional embeddings for bart, just token embeddings for t5.'
freeze_params(self.seq2seq_model.model.shared)
for d in [self.seq2seq_model.model.encoder, self.seq2seq_model.model.decoder]:
freeze_params(d.embed_positions)
freeze_params(d.embed_tokens)
def forward(self, input_ids, **kwargs):
return self.model(input_ids, frozen_model=self.seq2seq_model, **kwargs)
def ids_to_clean_text(self, generated_ids: List[int]):
gen_text = self.tokenizer.batch_decode(generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True)
return lmap(str.strip, gen_text)
def _step(self, batch: dict) -> Tuple:
pad_token_id = self.tokenizer.pad_token_id
(src_ids, src_mask) = (batch['input_ids'], batch['attention_mask'])
tgt_ids = batch['labels']
if isinstance(self.model, T5ForConditionalGeneration):
decoder_input_ids = self.model._shift_right(tgt_ids)
else:
decoder_input_ids = shift_tokens_right(tgt_ids, pad_token_id)
outputs = self(src_ids, attention_mask=src_mask, decoder_input_ids=decoder_input_ids, use_cache=False, use_prefix=True, conditional_info={'cats': batch['cats'], 'sport': batch['sport']})
lm_logits = outputs[0]
if (self.hparams.label_smoothing == 0):
ce_loss_fct = torch.nn.CrossEntropyLoss(ignore_index=pad_token_id)
assert (lm_logits.shape[(- 1)] == self.vocab_size)
loss = ce_loss_fct(lm_logits.view((- 1), lm_logits.shape[(- 1)]), tgt_ids.view((- 1)))
else:
lprobs = torch.nn.functional.log_softmax(lm_logits, dim=(- 1))
(loss, nll_loss) = label_smoothed_nll_loss(lprobs, tgt_ids, self.hparams.label_smoothing, ignore_index=pad_token_id)
return (loss,)
@property
def pad(self) -> int:
return self.tokenizer.pad_token_id
def training_step(self, batch, batch_idx) -> Dict:
loss_tensors = self._step(batch)
logs = {name: loss for (name, loss) in zip(self.loss_names, loss_tensors)}
logs['tpb'] = (batch['input_ids'].ne(self.pad).sum() + batch['labels'].ne(self.pad).sum())
logs['bs'] = batch['input_ids'].shape[0]
logs['src_pad_tok'] = batch['input_ids'].eq(self.pad).sum()
logs['src_pad_frac'] = batch['input_ids'].eq(self.pad).float().mean()
self.training_acc_across_batches_at_curr_epoch.append(loss_tensors[0].item())
self.log_dict(logs)
loss = loss_tensors[0]
return {'loss': loss}
def on_epoch_end(self):
train_acc_mean = np.mean(self.training_acc_across_batches_at_curr_epoch)
self.log_dict({'train_loss': train_acc_mean})
print('train_loss = {}'.format(train_acc_mean))
self.training_acc_across_batches_per_epoch = []
def validation_step(self, batch, batch_idx) -> Dict:
if (self.current_epoch < 1):
return 1
if self.hparams.skip_val:
return 1
if self.hparams.hf_checkpoint:
save_path = Path(self.hparams.save_hf)
save_path = save_path.joinpath('checkpoint-curr_best')
self.model.config.save_step = self.step_count
self.model.save_pretrained(save_path)
self.tokenizer.save_pretrained(save_path)
print('SAVING TO checkpoint {}'.format(save_path))
raise ValueError('just_saving')
return self._generative_step(batch)
def validation_epoch_end(self, outputs, prefix='val') -> Dict:
if (self.current_epoch < 1):
logg = 0.1
self.log('val_rouge2', logg)
return 1
if self.hparams.skip_val:
logg = 0.1
self.log('val_rouge2', logg)
return 1
self.step_count += 1
losses = {k: torch.stack([x[k] for x in outputs]).mean() for k in self.loss_names}
loss = losses['loss']
generative_metrics = {k: np.array([x[k] for x in outputs]).mean() for k in (self.metric_names + ['gen_time', 'gen_len'])}
metric_val = (generative_metrics[self.val_metric] if (self.val_metric in generative_metrics) else losses[self.val_metric])
metric_tensor: torch.FloatTensor = torch.tensor(metric_val).type_as(loss)
rank_zero_info(f'Rouge2: {metric_tensor}')
rank_zero_info(f'val_loss: {loss}')
generative_metrics.update({k: v.item() for (k, v) in losses.items()})
losses.update(generative_metrics)
all_metrics = {f'{prefix}_avg_{k}': x for (k, x) in losses.items()}
all_metrics['step_count'] = self.step_count
self.metrics[prefix].append(all_metrics)
self.log_dict({'log': all_metrics, f'{prefix}_loss': loss, f'{prefix}_{self.val_metric}': metric_tensor})
preds = flatten_list([x['preds'] for x in outputs])
return {'log': all_metrics, 'preds': preds, f'{prefix}_loss': loss, f'{prefix}_{self.val_metric}': metric_tensor}
def calc_generative_metrics(self, preds, target) -> Dict:
return calculate_rouge(preds, target)
def _generative_step(self, batch: dict) -> dict:
t0 = time.time()
bsz = batch['input_ids'].size(0)
prefix_prompt = self.model.get_prompt(bsz=bsz, sample_size=self.eval_beams, conditional_info={'cats': batch['cats'], 'sport': batch['sport']})
generated_ids = self.seq2seq_model.generate(batch['input_ids'], past_key_values=prefix_prompt, attention_mask=batch['attention_mask'], use_cache=True, length_penalty=self.hparams.length_penalty, use_prefix=True, decoder_start_token_id=self.decoder_start_token_id, num_beams=self.eval_beams, min_length=self.eval_min_length, max_length=self.eval_max_length, no_repeat_ngram_size=3)
gen_time = ((time.time() - t0) / batch['input_ids'].shape[0])
preds: List[str] = self.ids_to_clean_text(generated_ids)
target: List[str] = self.ids_to_clean_text(batch['labels'])
loss_tensors = self._step(batch)
base_metrics = {name: loss for (name, loss) in zip(self.loss_names, loss_tensors)}
rouge: Dict = self.calc_generative_metrics(preds, target)
summ_len = np.mean(lmap(len, generated_ids))
base_metrics.update(gen_time=gen_time, gen_len=summ_len, preds=preds, target=target, **rouge)
return base_metrics
def test_step(self, batch, batch_idx):
return self._generative_step(batch)
def test_epoch_end(self, outputs):
return self.validation_epoch_end(outputs, prefix='test')
def get_dataset(self, type_path) -> Seq2SeqDataset:
n_obs = self.n_obs[type_path]
max_target_length = self.target_lens[type_path]
dataset = self.dataset_class(self.tokenizer, type_path=type_path, n_obs=n_obs, max_target_length=max_target_length, **self.dataset_kwargs)
return dataset
def get_dataloader(self, type_path: str, batch_size: int, shuffle: bool=False) -> DataLoader:
dataset = self.get_dataset(type_path)
if (self.hparams.sortish_sampler and (type_path != 'test')):
sampler = dataset.make_sortish_sampler(batch_size, distributed=(self.hparams.gpus > 1))
return DataLoader(dataset, batch_size=batch_size, collate_fn=dataset.collate_fn, shuffle=False, num_workers=self.num_workers, sampler=sampler)
elif ((self.hparams.max_tokens_per_batch is not None) and (type_path != 'test')):
batch_sampler = dataset.make_dynamic_sampler(self.hparams.max_tokens_per_batch, distributed=(self.hparams.gpus > 1))
return DataLoader(dataset, batch_sampler=batch_sampler, collate_fn=dataset.collate_fn, num_workers=self.num_workers)
else:
return DataLoader(dataset, batch_size=batch_size, collate_fn=dataset.collate_fn, shuffle=shuffle, num_workers=self.num_workers, sampler=None)
def train_dataloader(self) -> DataLoader:
dataloader = self.get_dataloader('train', batch_size=self.hparams.train_batch_size, shuffle=True)
return dataloader
def val_dataloader(self) -> DataLoader:
return self.get_dataloader('val', batch_size=self.hparams.eval_batch_size)
def test_dataloader(self) -> DataLoader:
return self.get_dataloader('test', batch_size=self.hparams.eval_batch_size)
@staticmethod
def add_model_specific_args(parser, root_dir):
PrefixTransformer.add_model_specific_args(parser, root_dir)
add_generic_args(parser, root_dir)
parser.add_argument('--max_source_length', default=512, type=int, help='The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.')
parser.add_argument('--max_target_length', default=60, type=int, help='The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.')
parser.add_argument('--val_dir', default='', type=str, help='The directory for validation')
parser.add_argument('--val_max_target_length', default=60, type=int, help='The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.')
parser.add_argument('--test_max_target_length', default=100, type=int, help='The maximum total test target length specified for generation')
parser.add_argument('--freeze_encoder', action='store_true')
parser.add_argument('--freeze_embeds', action='store_true')
parser.add_argument('--sortish_sampler', action='store_true', default=False)
parser.add_argument('--max_tokens_per_batch', type=int, default=None)
parser.add_argument('--logger_name', type=str, choices=['default', 'wandb', 'wandb_shared'], default='default')
parser.add_argument('--n_train', type=int, default=(- 1), required=False, help='# examples. -1 means use all.')
parser.add_argument('--n_val', type=int, default=(- 1), required=False, help='# examples. -1 means use all.')
parser.add_argument('--n_test', type=int, default=(- 1), required=False, help='# examples. -1 means use all.')
parser.add_argument('--task_mode', type=str, default='summarization', required=False, help='if different tasks.')
parser.add_argument('--label_smoothing', type=float, default=0.0, required=False)
parser.add_argument('--src_lang', type=str, default='', required=False)
parser.add_argument('--save_hf', type=str, default='', required=False)
parser.add_argument('--tgt_lang', type=str, default='', required=False)
parser.add_argument('--eval_beams', type=int, default=6, required=False)
parser.add_argument('--eval_min_length', type=int, default=10, required=False)
parser.add_argument('--skip_val', type=bool, default=False, required=False)
parser.add_argument('--val_metric', type=str, default=None, required=False)
parser.add_argument('--eval_max_gen_length', type=int, default=60, help='never generate more than n tokens')
parser.add_argument('--length_penalty', type=float, default=1.0, help='never generate more than n tokens')
parser.add_argument('--save_top_k', type=int, default=1, required=False, help='How many checkpoints to save')
parser.add_argument('--wb_project', type=str, default='', help='wandb project name')
parser.add_argument('--finetune', type=bool, default=False)
parser.add_argument('--git', type=bool, default=True)
parser.add_argument('--dev', type=bool, default=False)
parser.add_argument('--freeze_base', type=bool, default=False)
parser.add_argument('--wb_entity', type=str, default='', help='wandb run name')
parser.add_argument('--wb_name', type=str, default='', help='wandb run name')
parser.add_argument('--id', type=str, default='wand id if continuing a run')
parser.add_argument('--early_stopping_patience', type=int, default=(- 1), required=False, help='-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So val_check_interval will effect it.')
return parser
|
def eval(args, model=None):
if (model is None):
if ('summarization' in args.task_mode):
if (args.tuning_mode == 'prefixtune'):
model = PrefixSummarizationModule(args)
print('the length penalty is {}'.format(args.length_penalty))
with torch.no_grad():
model.eval()
model = model.cuda()
data_loader = model.test_dataloader()
print('DATALOADER_LEN', len(data_loader))
out_lst = []
for (batch_idx, batch) in enumerate(data_loader):
batch = model.transfer_batch_to_device(batch, model.device)
out = model.test_step(batch, batch_idx)
out_lst.append(out)
if ((batch_idx % 50) == 0):
print(model.test_epoch_end(out_lst))
print(out['preds'])
result = model.test_epoch_end(out_lst)
for (k, v) in result.items():
if (k != 'preds'):
print('FINAL_RESULTS')
print(k, v)
out_path = os.path.join(args.output_dir, 'test_beam_{}'.format(args.length_penalty))
print('writing the test results to ', out_path)
with open(out_path, 'w') as f:
for preds in result['preds']:
print(preds, file=f)
|
def main(args, model=None):
Path(args.output_dir).mkdir(exist_ok=True)
if (model is None):
if ('summarization' in args.task_mode):
if (args.tuning_mode == 'prefixtune'):
model = PrefixSummarizationModule(args)
pickle_save(args, os.path.join(args.output_dir, 'args.pkl'))
pickle_save(model.hparams, (model.output_dir / 'hparams.pkl'))
dataset = Path(args.data_dir).name
print(dataset)
if ((args.logger_name == 'default') or args.fast_dev_run or str(args.output_dir).startswith('/tmp') or str(args.output_dir).startswith('/var')):
logger = True
elif (args.logger_name == 'wandb'):
from pytorch_lightning.loggers import WandbLogger
if (args.id is not None):
id_ = args.id
else:
id_ = wandb.util.generate_id()
print('ID', id_)
logger = WandbLogger(id=id_, name=args.wb_name, project=args.wb_project, entity=args.wb_entity)
if (args.early_stopping_patience >= 0):
es_callback = get_early_stopping_callback(model.val_metric, args.early_stopping_patience)
else:
es_callback = False
trainer: pl.Trainer = generic_train(model, args, logging_callback=Seq2SeqLoggingCallback(), early_stopping_callback=es_callback, logger=logger)
pickle_save(model.hparams, (model.output_dir / 'hparams.pkl'))
return model
|
class PrefixTransformer(pl.LightningModule):
def __init__(self, hparams: argparse.Namespace, num_labels=None, config=None, tokenizer=None, seq2seq_model=None, **config_kwargs):
'Initialize a model, tokenizer and config.'
super().__init__()
self.save_hyperparameters(hparams)
self.step_count = 0
self.output_dir = Path(self.hparams.output_dir)
cache_dir = (self.hparams.cache_dir if self.hparams.cache_dir else None)
print('the cache dir is {}'.format(cache_dir))
if (config is None):
self.config = AutoConfig.from_pretrained((self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path), **({'num_labels': num_labels} if (num_labels is not None) else {}), cache_dir=cache_dir, **config_kwargs)
else:
self.config: PretrainedConfig = config
extra_model_params = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(self.hparams, p, None):
assert hasattr(self.config, p), f"model config doesn't have a `{p}` attribute"
setattr(self.config, p, getattr(self.hparams, p))
if (tokenizer is None):
self.tokenizer = AutoTokenizer.from_pretrained((self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path), cache_dir=cache_dir)
else:
self.tokenizer: PreTrainedTokenizer = tokenizer
self.config.use_prefix = True
self.config.preseqlen = self.hparams.preseqlen
if self.hparams.control_prefixes:
self.config.preseqlen += (self.hparams.m_prefix_len * 2)
self.seq2seq_model_type = AutoModel
if (seq2seq_model is None):
self.seq2seq_model = BartForConditionalGeneration.from_pretrained(self.hparams.model_name_or_path, from_tf=bool(('.ckpt' in self.hparams.model_name_or_path)), config=self.config, cache_dir=cache_dir)
else:
self.seq2seq_model = seq2seq_model
config_prefix = AutoConfig.from_pretrained(self.hparams.model_name_or_path, cache_dir=cache_dir)
self.model_type = config_prefix.model_type
if (self.hparams.optim_prefix == 'yes'):
optim_prefix_bool = True
elif (self.hparams.optim_prefix == 'no'):
optim_prefix_bool = False
else:
assert False, 'model_args.optim_prefix should be either yes or no'
print(self.model_type)
config_prefix._my_arg_tune_mode = self.hparams.tuning_mode
config_prefix._my_arg_task_mode = self.hparams.task_mode
config_prefix._my_arg_control = True
config_prefix.train_weights = False
config_prefix.optim_prefix = optim_prefix_bool
config_prefix.preseqlen = self.hparams.preseqlen
config_prefix.use_infix = (self.hparams.format_mode == 'infix')
config_prefix.format_mode = self.hparams.format_mode
config_prefix.prefix_dropout = self.hparams.prefix_dropout
config_prefix.vocab_size = len(self.tokenizer)
config_prefix.lowdata = ('lowdata' in self.hparams.output_dir)
if (config_prefix.lowdata and (self.hparams.use_lowdata_token == 'yes')):
config_prefix.lowdata_token = self.tokenizer([self.hparams.lowdata_token], add_prefix_space=True)['input_ids']
print(self.hparams.lowdata_token)
print(config_prefix.lowdata_token)
print(self.tokenizer.pad_token_id)
config_prefix.mid_dim = self.hparams.mid_dim
if (self.hparams.prefixModel_name_or_path is not None):
print('LOADING FROM {}'.format(hparams.prefixModel_name_or_path))
self.model = ControlPrefixes.from_pretrained(self.hparams.prefixModel_name_or_path, from_tf=bool(('.ckpt' in self.hparams.prefixModel_name_or_path)), cache_dir=cache_dir, config=config_prefix)
else:
self.model = ControlPrefixes(config_prefix)
def load_hf_checkpoint(self, *args, **kwargs):
assert False, 'why need to load model here?'
self.model = self.model_type.from_pretrained(*args, **kwargs)
def get_lr_scheduler(self):
get_schedule_func = arg_to_scheduler[self.hparams.lr_scheduler]
scheduler = get_schedule_func(self.opt, num_warmup_steps=self.hparams.warmup_steps, num_training_steps=self.total_steps)
scheduler = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1}
return scheduler
def configure_optimizers(self):
'Prepare optimizer and schedule (linear warmup and decay)'
model = self.model
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [{'params': [p for (n, p) in model.named_parameters() if (not any(((nd in n) for nd in no_decay)))], 'weight_decay': self.hparams.weight_decay}, {'params': [p for (n, p) in model.named_parameters() if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0}]
if self.hparams.adafactor:
optimizer = Adafactor(optimizer_grouped_parameters, lr=self.hparams.learning_rate, scale_parameter=False, relative_step=False)
else:
optimizer = AdamW(optimizer_grouped_parameters, lr=self.hparams.learning_rate, eps=self.hparams.adam_epsilon)
self.opt = optimizer
scheduler = self.get_lr_scheduler()
return ([optimizer], [scheduler])
def test_step(self, batch, batch_nb):
return self.validation_step(batch, batch_nb)
def test_epoch_end(self, outputs):
return self.validation_end(outputs)
@property
def total_steps(self) -> int:
'The number of total training steps that will be run. Used for lr scheduler purposes.'
num_devices = max(1, self.hparams.gpus)
if (self.hparams.original_batch_size is not None):
effective_batch_size = ((self.hparams.original_batch_size * self.hparams.accumulate_grad_batches) * num_devices)
else:
effective_batch_size = ((self.hparams.train_batch_size * self.hparams.accumulate_grad_batches) * num_devices)
dataset_size = len(self.train_loader.dataset)
return ((dataset_size / effective_batch_size) * self.hparams.max_epochs)
def setup(self, mode):
if (mode == 'fit'):
self.train_loader = self.get_dataloader('train', self.hparams.train_batch_size, shuffle=True)
def get_dataloader(self, type_path, batch_size, shuffle=False):
raise NotImplementedError('You must implement this for your task')
def train_dataloader(self):
return self.train_loader
def val_dataloader(self):
return self.get_dataloader('dev', self.hparams.eval_batch_size, shuffle=False)
def test_dataloader(self):
return self.get_dataloader('test', self.hparams.eval_batch_size, shuffle=False)
def _feature_file(self, mode):
return os.path.join(self.hparams.data_dir, 'cached_{}_{}_{}'.format(mode, list(filter(None, self.hparams.model_name_or_path.split('/'))).pop(), str(self.hparams.max_seq_length)))
@pl.utilities.rank_zero_only
def save_checkpoint(self, trainer) -> None:
print('Saving the the checkpoint.')
return
@pl.utilities.rank_zero_only
def on_save_checkpoint(self, checkpoint: Dict[(str, Any)], filepath=None) -> None:
save_path = self.output_dir.joinpath('checkpoint-curr_best')
self.model.config.save_step = self.step_count
self.model.save_pretrained(save_path)
self.tokenizer.save_pretrained(save_path)
print('SAVING TO checkpoint {}'.format(save_path))
@staticmethod
def add_model_specific_args(parser, root_dir):
parser.add_argument('--model_name_or_path', default='facebook/bart-large', type=str, required=False, help='Path to pretrained model or model identifier from huggingface.co/models')
parser.add_argument('--prefixModel_name_or_path', default=None, type=str, help='Path to pretrained prefix model or model identifier from huggingface.co/models')
parser.add_argument('--prefix_mode', default='activation', type=str, help='embedding or activation')
parser.add_argument('--preseqlen', default=200, type=int, help='the length of the prefix.')
parser.add_argument('--optim_prefix', default='yes', type=str, help='use the task specific optimization of the prefix.')
parser.add_argument('--different_scheduler', default=False, type=bool, help='.')
parser.add_argument('--tuning_mode', default='prefixtune', type=str, help='Could be prefixtune or finetune')
parser.add_argument('--prefix_dropout', default=0.0, type=float, help='the dropout rate for our prefix model.')
parser.add_argument('--use_dropout', default='no', type=str, help='whether to dropout the main model during training. ')
parser.add_argument('--mid_dim', default=800, type=int, help='the dimension of the intermediate layer.')
parser.add_argument('--m_prefix_len', default=1, type=int, help='the control prefix length')
parser.add_argument('--format_mode', default='cat', type=str, help='whether to look at the input again, including [infix, cat, peek, nopeek]')
parser.add_argument('--use_lowdata_token', default='yes', type=str, help='whether or not to use the lowdata token, ')
parser.add_argument('--lowdata_token', default='summarize', type=str, help='the low data token to use. ')
parser.add_argument('--config_name', default='', type=str, help='Pretrained config name or path if not the same as model_name')
parser.add_argument('--tokenizer_name', default=None, type=str, help='Pretrained tokenizer name or path if not the same as model_name')
parser.add_argument('--cache_dir', default='/content/gdrive/MyDrive/cache_dir', type=str, help='Where do you want to store the pre-trained models downloaded from s3')
parser.add_argument('--encoder_layerdrop', type=float, help='Encoder layer dropout probability (Optional). Goes into model.config')
parser.add_argument('--decoder_layerdrop', type=float, help='Decoder layer dropout probability (Optional). Goes into model.config')
parser.add_argument('--dropout', type=float, help='Dropout probability (Optional). Goes into model.config')
parser.add_argument('--control_prefixes', type=bool, default=False, help='if using control prefixes')
parser.add_argument('--attention_dropout', type=float, help='Attention dropout probability (Optional). Goes into model.config')
parser.add_argument('--learning_rate', default=5e-05, type=float, help='The target learning rate.')
parser.add_argument('--lr_scheduler', default='linear', choices=arg_to_scheduler_choices, metavar=arg_to_scheduler_metavar, type=str, help='Learning rate scheduler')
parser.add_argument('--weight_decay', default=0.0, type=float, help='Weight decay if we apply some.')
parser.add_argument('--adam_epsilon', default=1e-08, type=float, help='Epsilon for Adam optimizer.')
parser.add_argument('--warmup_steps', default=0, type=int, help='Linear warmup over warmup_steps.')
parser.add_argument('--num_workers', default=4, type=int, help='kwarg passed to DataLoader')
parser.add_argument('--num_train_epochs', dest='max_epochs', default=30, type=int)
parser.add_argument('--original_batch_size', default=None, type=int)
parser.add_argument('--hf_checkpoint', default=False, type=bool)
parser.add_argument('--train_batch_size', default=8, type=int)
parser.add_argument('--eval_batch_size', default=6, type=int)
parser.add_argument('--adafactor', action='store_true')
|
def add_generic_args(parser, root_dir) -> None:
parser.add_argument('--output_dir', default=None, type=str, required=True, help='The output directory where the model predictions and checkpoints will be written.')
parser.add_argument('--n_tpu_cores', dest='tpu_cores', type=int)
parser.add_argument('--max_grad_norm', dest='gradient_clip_val', default=1.0, type=float, help='Max gradient norm')
parser.add_argument('--do_train', default=True, action='store_true', help='Whether to run training.')
parser.add_argument('--do_predict', default=False, type=bool, help='Whether to run training.')
parser.add_argument('--gradient_accumulation_steps', dest='accumulate_grad_batches', type=int, default=1, help='Number of updates steps to accumulate before performing a backward/update pass.')
parser.add_argument('--seed', type=int, default=101, help='random seed for initialization')
parser.add_argument('--data_dir', default=None, type=str, required=True, help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.')
|
def generic_train(model, args: argparse.Namespace, early_stopping_callback=False, logger=True, extra_callbacks=[], checkpoint_callback=None, logging_callback=None, **extra_train_kwargs):
pl.seed_everything(args.seed)
odir = Path(model.hparams.output_dir)
odir.mkdir(exist_ok=True)
checkpoint_callback = pl.callbacks.ModelCheckpoint(dirpath=args.output_dir, monitor=('val_' + args.val_metric), mode='max', save_top_k=args.save_top_k, save_last=True)
if (early_stopping_callback is not False):
extra_callbacks.append(early_stopping_callback)
print('the max number of epochs is {}'.format(args.max_epochs))
print('early stopping', early_stopping_callback)
print('checkpoint_callback', checkpoint_callback)
print('logging', logging_callback)
trainer = pl.Trainer.from_argparse_args(args, max_epochs=args.max_epochs, weights_summary=None, callbacks=([logging_callback] + extra_callbacks), logger=logger, checkpoint_callback=checkpoint_callback)
print('args.do_train:', args.do_train)
if args.do_train:
trainer.fit(model)
return trainer
|
def run_experiment(yaml_file):
with open(yaml_file, 'r') as stream:
parsed_yaml = yaml.safe_load(stream)
args = ''
for (arg, value) in parsed_yaml.items():
args += f'--{arg} {value} '
os.system(f'python finetune.py {args}')
|
def extract_audioset_features(ids, id2audio_path, id2label):
first_audio = True
for i in ids:
if first_audio:
input_data = vggish_input.wavfile_to_examples(id2audio_path[i])
ground_truth = np.repeat(id2label[i], input_data.shape[0], axis=0)
identifiers = np.repeat(i, input_data.shape[0], axis=0)
first_audio = False
else:
tmp_in = vggish_input.wavfile_to_examples(id2audio_path[i])
input_data = np.concatenate((input_data, tmp_in), axis=0)
tmp_gt = np.repeat(id2label[i], tmp_in.shape[0], axis=0)
ground_truth = np.concatenate((ground_truth, tmp_gt), axis=0)
tmp_id = np.repeat(i, tmp_in.shape[0], axis=0)
identifiers = np.concatenate((identifiers, tmp_id), axis=0)
with tf.Graph().as_default(), tf.Session() as sess:
vggish_slim.define_vggish_slim(training=False)
vggish_slim.load_vggish_slim_checkpoint(sess, 'vggish_model.ckpt')
features_tensor = sess.graph.get_tensor_by_name(vggish_params.INPUT_TENSOR_NAME)
embedding_tensor = sess.graph.get_tensor_by_name(vggish_params.OUTPUT_TENSOR_NAME)
extracted_feat = sess.run([embedding_tensor], feed_dict={features_tensor: input_data})
feature = np.squeeze(np.asarray(extracted_feat))
return [feature, ground_truth, identifiers]
|
def select(x, config, is_training, reuse=False):
if (config['model_number'] == 2):
return vgg_bn(x, config, is_training, 10, reuse)
elif (config['model_number'] == 12):
return vgg_bn(log_learn(x), config, is_training, 10, reuse)
raise RuntimeError("ERROR: Model {} can't be found!".format(config['model_number']))
|
def vgg_bn(x, config, is_training, output_filters, reuse=False):
with tf.variable_scope('vggish', reuse=reuse):
NUMBER_FILTERS = 128
print(('VGG with batchnorm! #filters: ' + str(NUMBER_FILTERS)))
print(('Input: ' + str(x.get_shape)))
bn_input = tf.layers.batch_normalization(x, training=is_training, axis=1)
conv1 = tf.layers.conv2d(inputs=bn_input, filters=NUMBER_FILTERS, kernel_size=[3, 3], padding='same', activation=tf.nn.elu, name='1CNN', kernel_initializer=tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode='FAN_AVG', uniform=True))
bn_conv1 = tf.layers.batch_normalization(conv1, training=is_training, axis=(- 1))
pool1 = tf.layers.max_pooling2d(inputs=bn_conv1, pool_size=[2, 2], strides=[2, 2])
print(pool1.get_shape)
conv2 = tf.layers.conv2d(inputs=pool1, filters=NUMBER_FILTERS, kernel_size=[3, 3], padding='same', activation=tf.nn.elu, name='2CNN', kernel_initializer=tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode='FAN_AVG', uniform=True))
bn_conv2 = tf.layers.batch_normalization(conv2, training=is_training, axis=(- 1))
pool2 = tf.layers.max_pooling2d(inputs=bn_conv2, pool_size=[2, 2], strides=[2, 2])
print(pool2.get_shape)
conv3 = tf.layers.conv2d(inputs=pool2, filters=NUMBER_FILTERS, kernel_size=[3, 3], padding='same', activation=tf.nn.elu, name='3CNN', kernel_initializer=tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode='FAN_AVG', uniform=True))
bn_conv3 = tf.layers.batch_normalization(conv3, training=is_training, axis=(- 1))
pool3 = tf.layers.max_pooling2d(inputs=bn_conv3, pool_size=[2, 2], strides=[2, 2])
print(pool3.get_shape)
conv4 = tf.layers.conv2d(inputs=pool3, filters=NUMBER_FILTERS, kernel_size=[3, 3], padding='same', activation=tf.nn.elu, name='4CNN', kernel_initializer=tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode='FAN_AVG', uniform=True))
bn_conv4 = tf.layers.batch_normalization(conv4, training=is_training, axis=(- 1))
pool4 = tf.layers.max_pooling2d(inputs=bn_conv4, pool_size=[2, 2], strides=[2, 2])
print(pool4.get_shape)
conv5 = tf.layers.conv2d(inputs=pool4, filters=NUMBER_FILTERS, kernel_size=[3, 3], padding='same', activation=tf.nn.elu, name='5CNN', kernel_initializer=tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode='FAN_AVG', uniform=True))
bn_conv5 = tf.layers.batch_normalization(conv5, training=is_training, axis=(- 1))
pool5 = tf.layers.max_pooling2d(inputs=bn_conv5, pool_size=[2, 2], strides=[2, 2])
print(pool5.get_shape)
flat = tf.layers.flatten(pool5)
do = tf.layers.dropout(flat, rate=0.5, training=is_training)
print(do.get_shape)
output = tf.layers.dense(inputs=do, activation=None, units=output_filters, kernel_initializer=tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode='FAN_AVG', uniform=True))
config['embedding_size'] = output.get_shape().as_list()[1]
return [output, config]
|
def log_learn(x):
with tf.variable_scope('log_learn'):
ta = tf.Variable(tf.constant(7, dtype=tf.float32), name='ta', trainable=True)
ba = tf.Variable(tf.constant(1, dtype=tf.float32), name='ba', trainable=True)
alpha = tf.exp(ta, name='alpha')
beta = tf.log((1 + tf.exp(ba)), name='beta')
return tf.log((tf.scalar_mul(alpha, x) + beta))
|
def model_number(x, is_training, config):
if (config['model_number'] == 0):
print('\nMODEL: SB-CNN')
print('-----------------------------------\n')
return sb_cnn(x, is_training, config)
elif (config['model_number'] == 1):
print('\nMODEL: SB-CNN | BN input')
print('-----------------------------------\n')
return sb_cnn_bn(x, is_training, config)
elif (config['model_number'] == 2):
print('\nMODEL: Timbre | BN input')
print('-----------------------------------\n')
return timbre(x, is_training, config, num_filters=config['num_classes_dataset'])
elif (config['model_number'] == 3):
print('\nMODEL: VGG | BN input')
print('-----------------------------------\n')
return vgg(x, is_training, config, num_filters=32)
elif (config['model_number'] == 11):
print('\nMODEL: SB-CNN -> Justin | BN input | LOG learn')
print('-----------------------------------\n')
return sb_cnn_bn(log_learn(x), is_training, config)
elif (config['model_number'] == 12):
print('\nMODEL: Timbre | MP -> direct | BN input | LOG learn')
print('-----------------------------------\n')
return timbre(log_learn(x), is_training, config, num_filters=config['num_classes_dataset'])
elif (config['model_number'] == 13):
print('\nMODEL: VGG | BN input | LOG learn | 32 filters')
print('-----------------------------------\n')
return vgg(log_learn(x), is_training, config, num_filters=32)
elif (config['model_number'] == 14):
print('\nMODEL: VGG | BN input | LOG learn | 128 filters')
print('-----------------------------------\n')
return vgg(log_learn(x), is_training, config, num_filters=128)
raise RuntimeError("ERROR: Model {} can't be found!".format(config['model_number']))
|
def log_learn(x):
with tf.variable_scope('log_learn'):
ta = tf.Variable(tf.constant(7, dtype=tf.float32), name='ta', trainable=True)
ba = tf.Variable(tf.constant(1, dtype=tf.float32), name='ba', trainable=True)
alpha = tf.exp(ta, name='alpha')
beta = tf.log((1 + tf.exp(ba)), name='beta')
return tf.log((tf.scalar_mul(alpha, x) + beta))
|
def vgg(x, is_training, config, num_filters):
with tf.variable_scope('vggish'):
print(('[SMALL FILTERS] Input: ' + str(x.get_shape)))
input_layer = tf.expand_dims(x, 3)
bn_input = tf.layers.batch_normalization(input_layer, training=is_training, axis=(- 1))
conv1 = tf.layers.conv2d(inputs=bn_input, filters=num_filters, kernel_size=[3, 3], padding='same', activation=tf.nn.elu, name='1CNN', kernel_initializer=tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode='FAN_AVG', uniform=True))
bn_conv1 = tf.layers.batch_normalization(conv1, training=is_training, axis=(- 1))
pool1 = tf.layers.max_pooling2d(inputs=bn_conv1, pool_size=[2, 2], strides=[2, 2])
print(pool1.get_shape)
conv2 = tf.layers.conv2d(inputs=pool1, filters=num_filters, kernel_size=[3, 3], padding='same', activation=tf.nn.elu, name='2CNN', kernel_initializer=tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode='FAN_AVG', uniform=True))
bn_conv2 = tf.layers.batch_normalization(conv2, training=is_training, axis=(- 1))
pool2 = tf.layers.max_pooling2d(inputs=bn_conv2, pool_size=[2, 2], strides=[2, 2])
print(pool2.get_shape)
conv3 = tf.layers.conv2d(inputs=pool2, filters=num_filters, kernel_size=[3, 3], padding='same', activation=tf.nn.elu, name='3CNN', kernel_initializer=tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode='FAN_AVG', uniform=True))
bn_conv3 = tf.layers.batch_normalization(conv3, training=is_training, axis=(- 1))
pool3 = tf.layers.max_pooling2d(inputs=bn_conv3, pool_size=[2, 2], strides=[2, 2])
print(pool3.get_shape)
conv4 = tf.layers.conv2d(inputs=pool3, filters=num_filters, kernel_size=[3, 3], padding='same', activation=tf.nn.elu, name='4CNN', kernel_initializer=tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode='FAN_AVG', uniform=True))
bn_conv4 = tf.layers.batch_normalization(conv4, training=is_training, axis=(- 1))
pool4 = tf.layers.max_pooling2d(inputs=bn_conv4, pool_size=[2, 2], strides=[2, 2])
print(pool4.get_shape)
conv5 = tf.layers.conv2d(inputs=pool4, filters=num_filters, kernel_size=[3, 3], padding='same', activation=tf.nn.elu, name='5CNN', kernel_initializer=tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode='FAN_AVG', uniform=True))
bn_conv5 = tf.layers.batch_normalization(conv5, training=is_training, axis=(- 1))
pool5 = tf.layers.max_pooling2d(inputs=bn_conv5, pool_size=[2, 2], strides=[2, 2])
print(pool5.get_shape)
flat = tf.layers.flatten(pool5)
do = tf.layers.dropout(flat, rate=0.5, training=is_training)
print(do.get_shape)
output = tf.layers.dense(inputs=do, activation=None, units=config['num_classes_dataset'], kernel_initializer=tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode='FAN_AVG', uniform=True))
return output
|
def timbre(x, is_training, config, num_filters):
with tf.variable_scope('timbre'):
print(('[CNN SINGLE] Input: ' + str(x.get_shape)))
input_layer = tf.expand_dims(x, 3)
bn_input = tf.layers.batch_normalization(input_layer, training=is_training, axis=(- 1))
conv1 = tf.layers.conv2d(inputs=bn_input, filters=num_filters, kernel_size=[7, 108], padding='valid', activation=None, name='1CNN', kernel_initializer=tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode='FAN_AVG', uniform=True))
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[conv1.shape[1], conv1.shape[2]], strides=[conv1.shape[1], conv1.shape[2]])
output = tf.layers.flatten(pool1)
print(conv1.get_shape)
print(conv1.shape[1])
print(conv1.shape[2])
print(pool1.get_shape)
print(output)
return output
|
def sb_cnn_core(input_, is_training, config):
print(input_.get_shape)
conv1 = tf.layers.conv2d(inputs=input_, filters=24, kernel_size=[5, 5], padding='valid', activation=tf.nn.relu, name='1CNN', kernel_initializer=tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode='FAN_AVG', uniform=True))
print(conv1.get_shape)
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[4, 2], strides=[4, 2])
print(pool1.get_shape)
conv2 = tf.layers.conv2d(inputs=pool1, filters=48, kernel_size=[5, 5], padding='valid', activation=tf.nn.relu, name='2CNN', kernel_initializer=tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode='FAN_AVG', uniform=True))
print(conv2.get_shape)
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[4, 2], strides=[4, 2])
print(pool2.get_shape)
conv3 = tf.layers.conv2d(inputs=pool2, filters=48, kernel_size=[5, 5], padding='valid', activation=tf.nn.relu, name='3CNN', kernel_initializer=tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode='FAN_AVG', uniform=True))
print(conv3.get_shape)
flat_conv3 = tf.contrib.layers.flatten(conv3)
print(flat_conv3.get_shape)
do_pool5 = tf.layers.dropout(flat_conv3, rate=0.5, training=is_training)
print(do_pool5.get_shape)
dense_out = tf.layers.dense(inputs=do_pool5, activation=tf.nn.relu, units=64, kernel_initializer=tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode='FAN_AVG', uniform=True))
do = tf.layers.dropout(dense_out, rate=0.5, training=is_training)
print(do.get_shape)
output = tf.layers.dense(inputs=do, activation=None, units=config['num_classes_dataset'], kernel_initializer=tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode='FAN_AVG', uniform=True))
print(('output: ' + str(output.get_shape)))
return output
|
def sb_cnn(x, is_training, config):
print(('Input: ' + str(x.get_shape)))
input_layer = tf.expand_dims(x, 3)
return sb_cnn_core(input_layer, is_training, config)
|
def sb_cnn_bn(x, is_training, config):
print(('Input: ' + str(x.get_shape)))
input_layer = tf.expand_dims(x, 3)
print(input_layer.get_shape)
bn_input = tf.layers.batch_normalization(input_layer, training=is_training, axis=(- 1))
return sb_cnn_core(bn_input, is_training, config)
|
def compute_audio_repr(audio_file, audio_repr_file):
if (config['type'] == 'audioset'):
audio_repr = vggish_input.wavfile_to_examples(audio_file)
print(audio_repr.shape)
else:
(audio, sr) = librosa.load(audio_file, sr=config['resample_sr'])
if (config['type'] == 'waveform'):
audio_repr = audio
audio_repr = np.expand_dims(audio_repr, axis=1)
elif (config['spectrogram_type'] == 'cqt'):
audio_repr = librosa.cqt(audio, sr=sr, hop_length=config['hop'], n_bins=config['cqt_bins'], real=False).T
elif (config['spectrogram_type'] == 'mel'):
audio_repr = librosa.feature.melspectrogram(y=audio, sr=sr, hop_length=config['hop'], n_fft=config['n_fft'], n_mels=config['n_mels']).T
elif (config['spectrogram_type'] == 'stft'):
audio_repr = librosa.stft(y=audio, n_fft=config['n_fft']).T
length = audio_repr.shape[0]
with open(audio_repr_file, 'wb') as f:
pickle.dump(audio_repr, f)
return length
|
def do_process(files, index):
try:
[id, audio_file, audio_repr_file] = files[index]
if (not os.path.exists(audio_repr_file[:(audio_repr_file.rfind('/') + 1)])):
path = Path(audio_repr_file[:(audio_repr_file.rfind('/') + 1)])
path.mkdir(parents=True, exist_ok=True)
length = compute_audio_repr(audio_file, audio_repr_file)
fw = open(((((config_file.DATA_FOLDER + config['audio_representation_folder']) + 'index_') + str(config['machine_i'])) + '.tsv'), 'a')
fw.write(('%s\t%s\t%s\n' % (id, audio_repr_file[len(config_file.DATA_FOLDER):], audio_file[len(config_file.DATA_FOLDER):])))
fw.close()
print((((str(index) + '/') + str(len(files))) + (' Computed: %s' % audio_file)))
except Exception as e:
ferrors = open(((((config_file.DATA_FOLDER + config['audio_representation_folder']) + 'errors') + str(config['machine_i'])) + '.txt'), 'a')
ferrors.write((audio_file + '\n'))
ferrors.write(str(e))
ferrors.close()
print('Error computing audio representation: ', audio_file)
print(str(e))
|
def process_files(files):
if DEBUG:
print('WARNING: Parallelization is not used!')
for index in range(0, len(files)):
do_process(files, index)
else:
Parallel(n_jobs=config['num_processing_units'])((delayed(do_process)(files, index) for index in range(0, len(files))))
|
def eval(config, ids, id2audio_repr_path, support_set, id2gt, id2label, tf_vars, vis_vars):
[id_string, save_latents, track_accuracies, printing, transfer_learning, model_folder] = vis_vars
if transfer_learning:
[sess, x, q, log_p_y, emb_q, emb_prototypes] = tf_vars
pack = [config, 'overlap_sampling', 1]
eval_streams = [pescador.Streamer(transfer_train.data_gen, id, id2audio_repr_path[id], id2gt[id], pack) for id in ids]
else:
[sess, x, q, is_train, log_p_y, emb_q, emb_prototypes] = tf_vars
pack = [config, 'overlap_sampling', 42]
eval_streams = [pescador.Streamer(sl_train.data_gen, id, id2audio_repr_path[id], id2gt[id], pack) for id in ids]
eval_mux_stream = pescador.ChainMux(eval_streams, mode='exhaustive')
eval_batch_streamer = pescador.Streamer(pescador.buffer_stream, eval_mux_stream, buffer_size=config['test_batch_size'], partial=True)
first_eval = True
count = 0
for eval_batch in eval_batch_streamer:
if transfer_learning:
[probabilities, embeddings, prototypes] = sess.run([log_p_y, emb_q, emb_prototypes], feed_dict={x: support_set, q: np.expand_dims(eval_batch['X'], axis=(- 1))})
else:
[probabilities, embeddings, prototypes] = sess.run([log_p_y, emb_q, emb_prototypes], feed_dict={x: support_set, q: np.expand_dims(eval_batch['X'], axis=(- 1)), is_train: False})
if first_eval:
first_eval = False
pred_array = probabilities
id_array = eval_batch['ID']
if save_latents:
embed_array = embeddings
gt_array = eval_batch['Y']
else:
count = (count + 1)
pred_array = np.concatenate((pred_array, probabilities), axis=0)
id_array = np.append(id_array, eval_batch['ID'])
if save_latents:
embed_array = np.concatenate((embed_array, embeddings), axis=0)
gt_array = np.concatenate((gt_array, eval_batch['Y']), axis=0)
epoch_acc = shared.accuracy_with_aggergated_predictions(pred_array, id_array, ids, id2label)
if printing:
print(((id_string + ' Number of audios: ') + str(len(ids))))
print(((id_string + ' Accuracy: ') + str(epoch_acc)))
print(((id_string + ' Prototypes: ') + str(prototypes.shape)))
if track_accuracies:
fac = open((model_folder + 'epoch_accuracies.tsv'), 'a')
fac.write((str(epoch_acc) + '\n'))
fac.close()
if save_latents:
print(((id_string + ' Embed_array: ') + str(embed_array.shape)))
print(((id_string + ' GT: ') + str(gt_array.shape)))
np.savez((((model_folder + 'embeddings_') + id_string) + '.npz'), embed_array)
np.savez((model_folder + 'prototypes.npz'), prototypes)
np.savez((((model_folder + 'gt_') + id_string) + '.npz'), gt_array)
print('Storing latents for visualization..')
print('\nPrototypes: ')
print(prototypes)
return epoch_acc
|
def fetch_data(classes_vector, label2selectedIDs, id2audio_repr_path, id2gt, config, transfer_learning=False):
set_dic = {}
gt_dic = {}
id_dic = {}
minimum_number_of_patches = np.inf
total_number_of_patches = 0
for c in classes_vector:
preprocess_batch_size = np.min([len(label2selectedIDs[c]), config['preprocess_batch_size']])
print(('Batch size: ' + str(preprocess_batch_size)))
print(((('IDs used for computing the category ' + str(c)) + ' prototype: ') + str(label2selectedIDs[c])))
pack = [config, config['train_sampling'], config['param_train_sampling']]
if transfer_learning:
streams = [pescador.Streamer(transfer_train.data_gen, id, id2audio_repr_path[id], id2gt[id], pack) for id in label2selectedIDs[c]]
else:
streams = [pescador.Streamer(sl_train.data_gen, id, id2audio_repr_path[id], id2gt[id], pack) for id in label2selectedIDs[c]]
mux_stream = pescador.ChainMux(streams, mode='exhaustive')
batch_streamer = pescador.Streamer(pescador.buffer_stream, mux_stream, buffer_size=preprocess_batch_size, partial=True)
first = True
gt = []
for batch in batch_streamer:
if first:
class_set = batch['X']
class_gt = batch['Y']
class_id = batch['ID']
first = False
else:
class_set = np.concatenate((class_set, batch['X']), axis=0)
class_gt = np.concatenate((class_gt, batch['Y']), axis=0)
class_id = np.concatenate((class_id, batch['ID']), axis=0)
print(class_set.shape)
print(class_gt.shape)
print(class_id.shape)
set_dic[c] = class_set
gt_dic[c] = class_gt
id_dic[c] = class_id
minimum_number_of_patches = min(minimum_number_of_patches, class_set.shape[0])
total_number_of_patches += class_set.shape[0]
return [set_dic, gt_dic, id_dic, minimum_number_of_patches, total_number_of_patches]
|
def compute_mean_std(index_file, percentage_index_file):
fgt = open(((config_file.DATA_FOLDER + config['audio_representation_folder']) + index_file))
num_lines = sum((1 for line in open(((config_file.DATA_FOLDER + config['audio_representation_folder']) + index_file))))
tmp = np.array([])
count = 0
for line in fgt.readlines():
(id, audio_repr_path, audio_path) = line.strip().split('\t')
with open((config_file.DATA_FOLDER + audio_repr_path), 'rb') as f:
audio_rep = pickle.load(f)
print(np.max(audio_rep))
audio_rep = shared.pre_processing(audio_rep, N_FRAMES, PAD_SHORT, PRE_PROCESSING, AUDIO_REP_TYPE, normalize_mean=None, normalize_std=None)
print(np.max(audio_rep))
if (count == 0):
tmp = audio_rep
else:
tmp = np.concatenate((tmp, audio_rep), axis=0)
print(tmp.shape)
print(((str(count) + '/') + str(num_lines)))
count = (count + 1)
if (count > (num_lines * percentage_index_file)):
break
print('Formatting data for computing mean - std!')
data_sample = tmp.flatten()
print('Computing mean:')
mean = np.mean(data_sample)
print(mean)
print('Computing std:')
std = np.std(data_sample)
print(std)
return (mean, std)
|
def eval(config, ids, id2audio_repr_path, support_set, id2gt, id2label, tf_vars, vis_vars):
[id_string, save_latents, track_accuracies, printing, transfer_learning, model_folder] = vis_vars
if transfer_learning:
[sess, x, q, log_p_y, emb_q, emb_prototypes] = tf_vars
pack = [config, 'overlap_sampling', 1]
eval_streams = [pescador.Streamer(transfer_train.data_gen, id, id2audio_repr_path[id], id2gt[id], pack) for id in ids]
else:
[sess, x, q, is_train, log_p_y, emb_q, emb_prototypes] = tf_vars
pack = [config, 'overlap_sampling', 42]
eval_streams = [pescador.Streamer(sl_train.data_gen, id, id2audio_repr_path[id], id2gt[id], pack) for id in ids]
eval_mux_stream = pescador.ChainMux(eval_streams, mode='exhaustive')
eval_batch_streamer = pescador.Streamer(pescador.buffer_stream, eval_mux_stream, buffer_size=config['test_batch_size'], partial=True)
first_eval = True
count = 0
for eval_batch in eval_batch_streamer:
if transfer_learning:
[probabilities, embeddings, prototypes] = sess.run([log_p_y, emb_q, emb_prototypes], feed_dict={x: support_set, q: np.expand_dims(eval_batch['X'], axis=(- 1))})
else:
[probabilities, embeddings, prototypes] = sess.run([log_p_y, emb_q, emb_prototypes], feed_dict={x: support_set, q: np.expand_dims(eval_batch['X'], axis=(- 1)), is_train: False})
if first_eval:
first_eval = False
pred_array = probabilities
id_array = eval_batch['ID']
if save_latents:
embed_array = embeddings
gt_array = eval_batch['Y']
else:
count = (count + 1)
pred_array = np.concatenate((pred_array, probabilities), axis=0)
id_array = np.append(id_array, eval_batch['ID'])
if save_latents:
embed_array = np.concatenate((embed_array, embeddings), axis=0)
gt_array = np.concatenate((gt_array, eval_batch['Y']), axis=0)
epoch_acc = shared.accuracy_with_aggergated_predictions(pred_array, id_array, ids, id2label)
if printing:
print(((id_string + ' Number of audios: ') + str(len(ids))))
print(((id_string + ' Accuracy: ') + str(epoch_acc)))
print(((id_string + ' Prototypes: ') + str(prototypes.shape)))
if track_accuracies:
fac = open(((model_folder + id_string) + 'epoch_accuracies.tsv'), 'a')
fac.write((str(epoch_acc) + '\n'))
fac.close()
if save_latents:
print(((id_string + ' Embed_array: ') + str(embed_array.shape)))
print(((id_string + ' GT: ') + str(gt_array.shape)))
np.savez((((model_folder + 'embeddings_') + id_string) + '.npz'), embed_array)
np.savez((model_folder + 'prototypes.npz'), prototypes)
np.savez((((model_folder + 'gt_') + id_string) + '.npz'), gt_array)
print('Storing latents for visualization..')
print('\nPrototypes: ')
print(prototypes)
return epoch_acc
|
def fetch_data(classes_vector, label2selectedIDs, id2audio_repr_path, id2gt, config, transfer_learning=False):
set_dic = {}
gt_dic = {}
id_dic = {}
minimum_number_of_patches = np.inf
total_number_of_patches = 0
for c in classes_vector:
preprocess_batch_size = np.min([len(label2selectedIDs[c]), config['preprocess_batch_size']])
print(('Batch size: ' + str(preprocess_batch_size)))
print(((('IDs used for computing the category ' + str(c)) + ' prototype: ') + str(label2selectedIDs[c])))
pack = [config, config['train_sampling'], config['param_train_sampling']]
if transfer_learning:
streams = [pescador.Streamer(transfer_train.data_gen, id, id2audio_repr_path[id], id2gt[id], pack) for id in label2selectedIDs[c]]
else:
streams = [pescador.Streamer(sl_train.data_gen, id, id2audio_repr_path[id], id2gt[id], pack) for id in label2selectedIDs[c]]
mux_stream = pescador.ChainMux(streams, mode='exhaustive')
batch_streamer = pescador.Streamer(pescador.buffer_stream, mux_stream, buffer_size=preprocess_batch_size, partial=True)
first = True
gt = []
for batch in batch_streamer:
if first:
class_set = batch['X']
class_gt = batch['Y']
class_id = batch['ID']
first = False
else:
class_set = np.concatenate((class_set, batch['X']), axis=0)
class_gt = np.concatenate((class_gt, batch['Y']), axis=0)
class_id = np.concatenate((class_id, batch['ID']), axis=0)
print(class_set.shape)
print(class_gt.shape)
print(class_id.shape)
set_dic[c] = class_set
gt_dic[c] = class_gt
id_dic[c] = class_id
minimum_number_of_patches = min(minimum_number_of_patches, class_set.shape[0])
total_number_of_patches += class_set.shape[0]
return [set_dic, gt_dic, id_dic, minimum_number_of_patches, total_number_of_patches]
|
def eval(config, ids, id2audio_repr_path, support_set, id2gt, id2label, tf_vars, vis_vars):
[id_string, save_latents, track_accuracies, printing, transfer_learning, model_folder] = vis_vars
if transfer_learning:
[sess, x, q, log_p_y, emb_q, emb_prototypes] = tf_vars
pack = [config, 'overlap_sampling', 1]
eval_streams = [pescador.Streamer(transfer_train.data_gen, id, id2audio_repr_path[id], id2gt[id], pack) for id in ids]
else:
[sess, x, q, is_train, log_p_y, emb_q, emb_prototypes] = tf_vars
pack = [config, 'overlap_sampling', 42]
eval_streams = [pescador.Streamer(sl_train.data_gen, id, id2audio_repr_path[id], id2gt[id], pack) for id in ids]
eval_mux_stream = pescador.ChainMux(eval_streams, mode='exhaustive')
eval_batch_streamer = pescador.Streamer(pescador.buffer_stream, eval_mux_stream, buffer_size=config['test_batch_size'], partial=True)
first_eval = True
count = 0
for eval_batch in eval_batch_streamer:
if transfer_learning:
[probabilities, embeddings, prototypes] = sess.run([log_p_y, emb_q, emb_prototypes], feed_dict={x: support_set, q: np.expand_dims(eval_batch['X'], axis=(- 1))})
else:
[probabilities, embeddings, prototypes] = sess.run([log_p_y, emb_q, emb_prototypes], feed_dict={x: support_set, q: np.expand_dims(eval_batch['X'], axis=(- 1)), is_train: False})
if first_eval:
first_eval = False
pred_array = probabilities
id_array = eval_batch['ID']
if save_latents:
embed_array = embeddings
gt_array = eval_batch['Y']
else:
count = (count + 1)
pred_array = np.concatenate((pred_array, probabilities), axis=0)
id_array = np.append(id_array, eval_batch['ID'])
if save_latents:
embed_array = np.concatenate((embed_array, embeddings), axis=0)
gt_array = np.concatenate((gt_array, eval_batch['Y']), axis=0)
epoch_acc = shared.accuracy_with_aggergated_predictions(pred_array, id_array, ids, id2label)
if printing:
print(((id_string + ' Number of audios: ') + str(len(ids))))
print(((id_string + ' Accuracy: ') + str(epoch_acc)))
print(((id_string + ' Prototypes: ') + str(prototypes.shape)))
if track_accuracies:
fac = open(((model_folder + id_string) + 'epoch_accuracies.tsv'), 'a')
fac.write((str(epoch_acc) + '\n'))
fac.close()
if save_latents:
print(((id_string + ' Embed_array: ') + str(embed_array.shape)))
print(((id_string + ' GT: ') + str(gt_array.shape)))
np.savez((((model_folder + 'embeddings_') + id_string) + '.npz'), embed_array)
np.savez((model_folder + 'prototypes.npz'), prototypes)
np.savez((((model_folder + 'gt_') + id_string) + '.npz'), gt_array)
print('Storing latents for visualization..')
print('\nPrototypes: ')
print(prototypes)
return epoch_acc
|
def fetch_data(classes_vector, label2selectedIDs, id2audio_repr_path, id2gt, config, transfer_learning=False):
set_dic = {}
gt_dic = {}
id_dic = {}
minimum_number_of_patches = np.inf
total_number_of_patches = 0
for c in classes_vector:
preprocess_batch_size = np.min([len(label2selectedIDs[c]), config['preprocess_batch_size']])
print(('Batch size: ' + str(preprocess_batch_size)))
print(((('IDs used for computing the category ' + str(c)) + ' prototype: ') + str(label2selectedIDs[c])))
pack = [config, config['train_sampling'], config['param_train_sampling']]
if transfer_learning:
streams = [pescador.Streamer(transfer_train.data_gen, id, id2audio_repr_path[id], id2gt[id], pack) for id in label2selectedIDs[c]]
else:
streams = [pescador.Streamer(sl_train.data_gen, id, id2audio_repr_path[id], id2gt[id], pack) for id in label2selectedIDs[c]]
mux_stream = pescador.ChainMux(streams, mode='exhaustive')
batch_streamer = pescador.Streamer(pescador.buffer_stream, mux_stream, buffer_size=preprocess_batch_size, partial=True)
first = True
gt = []
for batch in batch_streamer:
if first:
class_set = batch['X']
class_gt = batch['Y']
class_id = batch['ID']
first = False
else:
class_set = np.concatenate((class_set, batch['X']), axis=0)
class_gt = np.concatenate((class_gt, batch['Y']), axis=0)
class_id = np.concatenate((class_id, batch['ID']), axis=0)
print(class_set.shape)
print(class_gt.shape)
print(class_id.shape)
set_dic[c] = class_set
gt_dic[c] = class_gt
id_dic[c] = class_id
minimum_number_of_patches = min(minimum_number_of_patches, class_set.shape[0])
total_number_of_patches += class_set.shape[0]
return [set_dic, gt_dic, id_dic, minimum_number_of_patches, total_number_of_patches]
|
def euclidean_distance(a, b):
(N, D) = (tf.shape(a)[0], tf.shape(a)[1])
M = tf.shape(b)[0]
a = tf.tile(tf.expand_dims(a, axis=1), (1, M, 1))
b = tf.tile(tf.expand_dims(b, axis=0), (N, 1, 1))
return tf.reduce_mean(tf.square((a - b)), axis=2)
|
def cosine_distance(a, b):
norm_a = tf.nn.l2_normalize(a, axis=1)
norm_b = tf.nn.l2_normalize(b, axis=1)
prod = tf.matmul(norm_a, norm_b, adjoint_b=True)
return (1 - prod)
|
def get_epoch_time():
return int((datetime.now() - datetime(1970, 1, 1)).total_seconds())
|
def label2onehot_exp(label, experiment_classes):
onehot = np.zeros(len(experiment_classes))
position = int(np.squeeze(np.where((label == np.array(experiment_classes)))))
onehot[position] = 1
return onehot
|
def label2onehot(label, length):
onehot = np.zeros(length)
onehot[label] = 1
return onehot
|
def onehot2label(gt):
label = np.int(np.squeeze(np.where((np.array(gt) == max(gt)))))
return label
|
def count_params(trainable_variables):
return np.sum([np.prod(v.get_shape().as_list()) for v in trainable_variables])
|
def load_id2label(gt_file):
ids = []
fgt = open(gt_file)
id2label = dict()
for line in fgt.readlines():
(id, gt) = line.strip().split('\t')
id2label[id] = onehot2label(eval(gt))
ids.append(id)
return (ids, id2label)
|
def load_id2gt(gt_file):
ids = []
fgt = open(gt_file)
id2gt = dict()
for line in fgt.readlines():
(id, gt) = line.strip().split('\t')
id2gt[id] = eval(gt)
ids.append(id)
return (ids, id2gt)
|
def load_label2ids(id2label):
label2ids = {}
for (id, label) in id2label.items():
if (label in label2ids):
label2ids[label].append(id)
else:
label2ids[label] = [id]
return label2ids
|
def load_id2audiopath(index_file):
f = open(index_file)
id2audiopath = dict()
for line in f.readlines():
(id, path) = line.strip().split('\t')
id2audiopath[id] = path
return id2audiopath
|
def load_id2audioReprPath(index_file):
audioReprPaths = []
fspec = open(index_file)
id2audioReprPath = dict()
for line in fspec.readlines():
(id, path, _) = line.strip().split('\t')
id2audioReprPath[id] = path
audioReprPaths.append(path)
return (audioReprPaths, id2audioReprPath)
|
def load_id2length(index_file):
f = open(index_file)
id2length = dict()
for line in f.readlines():
(id, length) = line.strip().split('\t')
id2length[id] = int(length)
return id2length
|
def accuracy_with_aggergated_predictions(pred_array, id_array, ids, id2label):
y_pred = []
y_true = []
for id in ids:
try:
avg = np.mean(pred_array[np.where((id_array == id))], axis=0)
idx_prediction = int(np.where((avg == max(avg)))[0][0])
y_pred.append(idx_prediction)
label = id2label[id]
y_true.append(int(label))
except:
print(id)
return accuracy_score(y_true, y_pred)
|
def few_shot_data_preparation(all_ids_train, all_ids_test, classes_vector, label2ids_train, label2ids_test, config):
if (config['n_shot'] == np.inf):
ids_train = all_ids_train
ids_test = all_ids_test
print('Train IDs: ALL!')
label2selectedIDs = {}
for c in classes_vector:
label2selectedIDs[c] = label2ids_train[c]
else:
first = True
label2selectedIDs = {}
for c in classes_vector:
if (config['n_shot'] != np.inf):
ids_class_train = random.sample(label2ids_train[c], config['n_shot'])
else:
ids_class_train = label2ids_train[c]
if first:
ids_train = ids_class_train
ids_test = label2ids_test[c]
first = False
else:
ids_train = np.concatenate((ids_train, ids_class_train), axis=0)
ids_test = np.concatenate((ids_test, label2ids_test[c]), axis=0)
label2selectedIDs[c] = ids_class_train
print((('\nTrain IDs: ' + str(ids_train)) + '\n'))
return [ids_train, ids_test, label2selectedIDs]
|
def audioset_model(input_signal, reuse=False):
slim = tf.contrib.slim
with slim.arg_scope([slim.conv2d, slim.fully_connected], weights_initializer=tf.truncated_normal_initializer(stddev=vggish_params.INIT_STDDEV), biases_initializer=tf.zeros_initializer(), activation_fn=tf.nn.relu, trainable=True), slim.arg_scope([slim.conv2d], kernel_size=[3, 3], stride=1, padding='SAME'), slim.arg_scope([slim.max_pool2d], kernel_size=[2, 2], stride=2, padding='SAME'), tf.variable_scope('vggish', reuse=reuse):
net = slim.conv2d(input_signal, 64, scope='conv1')
net = slim.max_pool2d(net, scope='pool1')
net = slim.conv2d(net, 128, scope='conv2')
net = slim.max_pool2d(net, scope='pool2')
net = slim.repeat(net, 2, slim.conv2d, 256, scope='conv3')
net = slim.max_pool2d(net, scope='pool3')
net = slim.repeat(net, 2, slim.conv2d, 512, scope='conv4')
net = slim.max_pool2d(net, scope='pool4')
net = slim.flatten(net)
net = slim.repeat(net, 2, slim.fully_connected, 4096, scope='fc1')
net = slim.fully_connected(net, vggish_params.EMBEDDING_SIZE, scope='fc2')
embeddings = tf.identity(net, name='embedding')
with tf.variable_scope('my_model', reuse=reuse):
return slim.fully_connected(embeddings, 10, activation_fn=None, scope='logits')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.