code
stringlengths
17
6.64M
def lazy_generate_modules(model, loader): ' A helper to build the modules that are lazily compiled\n\n :param model: the nn.Module\n :param loader: the dataloader\n :returns: None\n :rtype: None\n\n ' model.eval() for (augmentation1, augmentation2, labels) in loader: with torch.no_grad(): print('augmentation1 = {} / {} | augmentation2 = {} / {} | labels = {} / {}'.format(augmentation1.shape, augmentation1.dtype, augmentation2.shape, augmentation2.dtype, labels.shape, labels.dtype)) (aug1_min, aug1_max) = (augmentation1.min(), augmentation1.max()) (aug2_min, aug2_max) = (augmentation2.min(), augmentation2.max()) print('aug1 in range [min: {}, max: {}] | aug2 in range [min: {}, max: {}]'.format(aug1_min, aug1_max, aug2_min, aug2_max)) if ((aug1_max > 1.0) or (aug1_min < 0)): raise ValueError('aug1 max > 1.0 or aug1 min < 0. You probably dont want this.') if ((aug2_max > 1.0) or (aug2_min < 0)): raise ValueError('aug2 max > 1.0 or aug2 min < 0. You probably dont want this.') augmentation1 = (augmentation1.cuda(non_blocking=True) if args.cuda else augmentation1) augmentation2 = (augmentation2.cuda(non_blocking=True) if args.cuda else augmentation2) _ = model(augmentation1, augmentation2) break if (args.polyak_ema > 0): layers.polyak_ema_parameters(model, args.polyak_ema)
def register_plots(loss, grapher, epoch, prefix='train'): " Registers line plots with grapher.\n\n :param loss: the dict containing '*_mean' or '*_scalar' values\n :param grapher: the grapher object\n :param epoch: the current epoch\n :param prefix: prefix to append to the plot\n :returns: None\n :rtype: None\n\n " if ((args.distributed_rank == 0) and (grapher is not None)): for (k, v) in loss.items(): if isinstance(v, dict): register_plots(loss[k], grapher, epoch, prefix=prefix) if (('mean' in k) or ('scalar' in k)): key_name = '-'.join(k.split('_')[0:(- 1)]) value = (v.item() if (not isinstance(v, (float, np.float32, np.float64))) else v) grapher.add_scalar('{}_{}'.format(prefix, key_name), value, epoch)
def register_images(output_map, grapher, prefix='train'): " Registers image with grapher. Overwrites the existing image due to space.\n\n :param output_map: the dict containing '*_img' of '*_imgs' as keys\n :param grapher: the grapher object\n :param prefix: prefix to attach to images\n :returns: None\n :rtype: None\n\n " if ((args.distributed_rank == 0) and (grapher is not None)): for (k, v) in output_map.items(): if isinstance(v, dict): register_images(output_map[k], grapher, prefix=prefix) if (('img' in k) or ('imgs' in k)): key_name = '-'.join(k.split('_')[0:(- 1)]) img = torchvision.utils.make_grid(v, normalize=True, scale_each=True) grapher.add_image('{}_{}'.format(prefix, key_name), img.detach(), global_step=0)
def _extract_sum_scalars(v1, v2): 'Simple helper to sum values in a struct using dm_tree.' def chk(c): 'Helper to check if we have a primitive or tensor' return (not isinstance(c, (int, float, np.int32, np.int64, np.float32, np.float64))) v1_detached = (v1.detach() if chk(v1) else v1) v2_detached = (v2.detach() if chk(v2) else v2) return (v1_detached + v2_detached)
def execute_graph(epoch, model, loader, grapher, optimizer=None, prefix='test'): " execute the graph; wphen 'train' is in the name the model runs the optimizer\n\n :param epoch: the current epoch number\n :param model: the torch model\n :param loader: the train or **TEST** loader\n :param grapher: the graph writing helper (eg: visdom / tf wrapper)\n :param optimizer: the optimizer\n :param prefix: 'train', 'test' or 'valid'\n :returns: dictionary with scalars\n :rtype: dict\n\n " start_time = time.time() is_eval = ('train' not in prefix) (model.eval() if is_eval else model.train()) assert ((optimizer is None) if is_eval else (optimizer is not None)) (loss_map, num_samples) = ({}, 0) for (num_minibatches, (augmentation1, augmentation2, labels)) in enumerate(loader): augmentation1 = (augmentation1.cuda(non_blocking=True) if args.cuda else augmentation1) augmentation2 = (augmentation2.cuda(non_blocking=True) if args.cuda else augmentation2) labels = (labels.cuda(non_blocking=True) if args.cuda else labels) with (torch.no_grad() if is_eval else utils.dummy_context()): if (is_eval and (args.polyak_ema > 0)): output_dict = layers.get_polyak_prediction(model, pred_fn=functools.partial(model, augmentation1, augmentation2)) else: output_dict = model(augmentation1, augmentation2) byol_loss = loss_function(online_prediction1=output_dict['online_prediction1'], online_prediction2=output_dict['online_prediction2'], target_projection1=output_dict['target_projection1'], target_projection2=output_dict['target_projection2']) classifier_labels = (labels if is_eval else torch.cat([labels, labels], 0)) classifier_loss = F.cross_entropy(input=output_dict['linear_preds'], target=classifier_labels) (acc1, acc5) = metrics.topk(output=output_dict['linear_preds'], target=classifier_labels, topk=(1, 5)) loss_t = {'loss_mean': (byol_loss + classifier_loss), 'byol_loss_mean': byol_loss, 'linear_loss_mean': classifier_loss, 'top1_mean': acc1, 'top5_mean': acc5} loss_map = (loss_t if (not loss_map) else tree.map_structure(_extract_sum_scalars, loss_map, loss_t)) num_samples += augmentation1.size(0) if (not is_eval): optimizer.zero_grad() if args.half: with amp.scale_loss(loss_t['loss_mean'], optimizer) as scaled_loss: scaled_loss.backward() else: loss_t['loss_mean'].backward() if (args.clip > 0): nn.utils.clip_grad_value_(model.parameters(), args.clip) optimizer.step() if (args.polyak_ema > 0): layers.polyak_ema_parameters(model, args.polyak_ema) del loss_t if args.debug_step: break loss_map = tree.map_structure((lambda v: (v / (num_minibatches + 1))), loss_map) to_log = '{}-{}[Epoch {}][{} samples][{:.2f} sec]:\t Loss: {:.4f}\tTop-1: {:.4f}\tTop-5: {:.4f}' print(to_log.format(prefix, args.distributed_rank, epoch, num_samples, (time.time() - start_time), loss_map['loss_mean'].item(), loss_map['top1_mean'].item(), loss_map['top5_mean'].item())) register_plots({**loss_map}, grapher, epoch=epoch, prefix=prefix) num_images_to_post = min(64, augmentation1.shape[0]) image_size_to_post = min(64, augmentation1.shape[(- 1)]) image_map = {'augmentation1_imgs': F.interpolate(augmentation1[0:num_images_to_post], size=(image_size_to_post, image_size_to_post)), 'augmentation2_imgs': F.interpolate(augmentation2[0:num_images_to_post], size=(image_size_to_post, image_size_to_post))} register_images({**image_map}, grapher, prefix=prefix) if (grapher is not None): grapher.save() loss_val = loss_map['loss_mean'].detach().item() loss_map.clear() return loss_val
def train(epoch, model, optimizer, train_loader, grapher, prefix='train'): ' Helper to run execute-graph for the train dataset\n\n :param epoch: the current epoch\n :param model: the model\n :param test_loader: the train data-loader\n :param grapher: the grapher object\n :param prefix: the default prefix; useful if we have multiple training types\n :returns: mean ELBO scalar\n :rtype: float32\n\n ' return execute_graph(epoch, model, train_loader, grapher, optimizer, prefix='train')
def test(epoch, model, test_loader, grapher, prefix='test'): ' Helper to run execute-graph for the test dataset\n\n :param epoch: the current epoch\n :param model: the model\n :param test_loader: the test data-loaderpp\n :param grapher: the grapher object\n :param prefix: the default prefix; useful if we have multiple test types\n :returns: mean ELBO scalar\n :rtype: float32\n\n ' return execute_graph(epoch, model, test_loader, grapher, prefix='test')
def init_multiprocessing_and_cuda(rank, args_from_spawn): 'Sets the appropriate flags for multi-process jobs.' if args_from_spawn.multi_gpu_distributed: os.environ['CUDA_VISIBLE_DEVICES'] = str(rank) args_from_spawn.distributed_rank = rank args_from_spawn.cuda = ((not args_from_spawn.no_cuda) and torch.cuda.is_available()) if args_from_spawn.cuda: torch.backends.cudnn.benchmark = True print('Replica {} / {} using GPU: {}'.format((rank + 1), args_from_spawn.num_replicas, torch.cuda.get_device_name(0))) if (args_from_spawn.seed is not None): print(('setting seed %d' % args_from_spawn.seed)) np.random.seed(args_from_spawn.seed) torch.manual_seed(args_from_spawn.seed) if args_from_spawn.cuda: torch.cuda.manual_seed_all(args_from_spawn.seed) if (args_from_spawn.num_replicas > 1): torch.distributed.init_process_group(backend='nccl', init_method=os.environ['MASTER_ADDR'], world_size=args_from_spawn.num_replicas, rank=rank) print('Successfully created DDP process group!') args_from_spawn.batch_size = (args_from_spawn.batch_size // args_from_spawn.num_replicas) global args args = args_from_spawn
def run(rank, args): ' Main entry-point into the program\n\n :param rank: current device rank\n :param args: argparse\n :returns: None\n :rtype: None\n\n ' init_multiprocessing_and_cuda(rank, args) (loader, model, grapher) = build_loader_model_grapher(args) print(pprint.PrettyPrinter(indent=4).pformat(vars(args))) (optimizer, scheduler) = build_optimizer(model) if args.half: (model, optimizer) = amp.initialize(model, optimizer, opt_level='O2') model = layers.append_save_and_load_fns(model, optimizer, scheduler, grapher, args) saver = layers.ModelSaver(model, early_stop=args.early_stop, rank=args.distributed_rank, burn_in_interval=int((0.1 * args.epochs)), larger_is_better=False, max_early_stop_steps=10) restore_dict = saver.restore() init_epoch = restore_dict['epoch'] for epoch in range(init_epoch, (args.epochs + 1)): train(epoch, model, optimizer, loader.train_loader, grapher) test_loss = test(epoch, model, loader.test_loader, grapher) loader.set_all_epochs(epoch) scheduler.step() register_plots({'learning_rate_scalar': optimizer.param_groups[0]['lr']}, grapher, epoch) if saver(test_loss): saver.restore() test_loss = test(epoch, model, loader.test_loader, grapher) break if ((epoch == 2) and (args.distributed_rank == 0)): config_to_post = vars(args) slurm_id = utils.get_slurm_id() if (slurm_id is not None): config_to_post['slurm_job_id'] = slurm_id grapher.add_text('config', pprint.PrettyPrinter(indent=4).pformat(config_to_post), 0) if (grapher is not None): grapher.close()
def regression_loss(x, y): 'Pulled directly from BYOL' (norm_x, norm_y) = (x.norm(), y.norm()) return (((- 2) * torch.sum((x * y), dim=(- 1))) / (norm_x * norm_y))
def loss_function(online_prediction1, online_prediction2, target_projection1, target_projection2): 'BYOL loss.\n\n :param online_prediction1: the output of the final MLP of the online model for augmentation 1\n :param online_prediction2: the output of the final MLP of the online model for augmentation 2\n :param target_projection1: the output of the second-to-last MLP of the target model for augmentation 1\n :param target_projection2: the output of the second-to-last MLP of the target model for augmentation 1\n :returns: scalar loss\n :rtype: float32\n\n ' loss_ab = regression_loss(online_prediction1, target_projection2.detach()) loss_ba = regression_loss(online_prediction2, target_projection1.detach()) return torch.mean((loss_ab + loss_ba))
class LARS(Optimizer): "Implements 'LARS (Layer-wise Adaptive Rate Scaling)'__ as Optimizer a\n :class:`~torch.optim.Optimizer` wrapper.\n\n __ : https://arxiv.org/abs/1708.03888\n\n Wraps an arbitrary optimizer like :class:`torch.optim.SGD` to use LARS. If\n you want to the same performance obtained with small-batch training when\n you use large-batch training, LARS will be helpful::\n\n Args:\n optimizer (Optimizer):\n optimizer to wrap\n eps (float, optional):\n epsilon to help with numerical stability while calculating the\n adaptive learning rate\n trust_coef (float, optional):\n trust coefficient for calculating the adaptive learning rate\n\n Example::\n base_optimizer = optim.SGD(model.parameters(), lr=0.1)\n optimizer = LARS(optimizer=base_optimizer)\n\n output = model(input)\n loss = loss_fn(output, target)\n loss.backward()\n\n optimizer.step()\n\n " def __init__(self, optimizer, eps=1e-08, trust_coef=0.001): if (eps < 0.0): raise ValueError(('invalid epsilon value: , %f' % eps)) if (trust_coef < 0.0): raise ValueError(('invalid trust coefficient: %f' % trust_coef)) self.optim = optimizer self.eps = eps self.trust_coef = trust_coef def __getstate__(self): lars_dict = {} lars_dict['eps'] = self.eps lars_dict['trust_coef'] = self.trust_coef return (self.optim, lars_dict) def __setstate__(self, state): (self.optim, lars_dict) = state self.eps = lars_dict['eps'] self.trust_coef = lars_dict['trust_coef'] def __repr__(self): return ('%s(%r)' % (self.__class__.__name__, self.optim)) @property def param_groups(self): return self.optim.param_groups @property def state(self): return self.optim.state def state_dict(self): return self.optim.state_dict() def load_state_dict(self, state_dict): self.optim.load_state_dict(state_dict) def zero_grad(self): self.optim.zero_grad() def add_param_group(self, param_group): self.optim.add_param_group(param_group) def apply_adaptive_lrs(self): with torch.no_grad(): for group in self.optim.param_groups: weight_decay = group['weight_decay'] ignore = group.get('ignore', None) for p in group['params']: if (p.grad is None): continue if (weight_decay > 0): p.grad = p.grad.add(p, alpha=weight_decay) if ((ignore is not None) and (not ignore)): param_norm = p.norm() grad_norm = p.grad.norm() adaptive_lr = 1.0 if ((param_norm > 0) and (grad_norm > 0)): adaptive_lr = ((self.trust_coef * param_norm) / (grad_norm + self.eps)) p.grad = p.grad.mul(adaptive_lr) def step(self, *args, **kwargs): self.apply_adaptive_lrs() weight_decay_orig = [group['weight_decay'] for group in self.optim.param_groups] for group in self.optim.param_groups: group['weight_decay'] = 0 loss = self.optim.step(*args, **kwargs) for (group, wo) in zip(self.optim.param_groups, weight_decay_orig): group['weight_decay'] = wo return loss
class Scheduler(object): 'Simple container for warmup and normal scheduler.' def __init__(self, normal_schededuler, warmup_scheduler=None): self.warmup = warmup_scheduler self.sched = normal_schededuler def get_last_lr(self): ' Return last computed learning rate by current scheduler.' if ((self.warmup is not None) and (not self.warmup.complete)): return self.warmup.get_last_lr() return self.sched.get_last_lr() def state_dict(self): 'Returns the state of each scheduler as a :class:`dict`.' state_dict = {'warmup': (self.warmup.state_dict() if (self.warmup is not None) else {}), 'sched': self.sched.state_dict()} return state_dict def load_state_dict(self, state_dict): 'Loads the schedulers state.\n\n Arguments:\n state_dict (dict): scheduler state. Should be an object returned\n from a call to :meth:`state_dict`.\n ' if self.warmup: self.warmup.load_state_dict(state_dict['warmup']) self.sched.load_state_dict(state_dict['sched']) def step(self, *args, **kwargs): if ((self.warmup is not None) and (not self.warmup.complete)): return self.warmup.step(*args, **kwargs) return self.sched.step(*args, **kwargs)
class LinearWarmup(LambdaLR): ' Linear warmup and then constant.\n Linearly increases learning rate schedule from 0 to 1 over `warmup_steps` training steps.\n Keeps learning rate schedule equal to 1. after warmup_steps.\n\n From https://bit.ly/39o2W1f\n ' def __init__(self, optimizer, warmup_steps, last_epoch=(- 1)): self.warmup_steps = warmup_steps self.complete = False super(LinearWarmup, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch) def lr_lambda(self, step): if (step < self.warmup_steps): return (float(step) / float(max(1.0, self.warmup_steps))) self.complete = True return 1.0
class SimCLR(nn.Module): 'Simple SIMCLR implementation.' def __init__(self, base_network_output_size, nce_logits_output_size, classifier_output_size): 'SimCLR model.\n\n :param base_network_output_size: output-size of resnet50 embedding\n :param nce_logits_output_size: output-size to use for NCE loss\n :param classifier_output_size: number of classes in classifier problem\n :returns: SimCLR object\n :rtype: nn.Module\n\n ' super(SimCLR, self).__init__() self.base_network_output_size = base_network_output_size model_fn = models.__dict__[args.arch] self.base_network = nn.Sequential(*list(model_fn(pretrained=False).children())[:(- 1)]) self.head = nn.Sequential(nn.Linear(base_network_output_size, args.head_latent_size), nn.BatchNorm1d(args.head_latent_size), nn.ReLU(), nn.Linear(args.head_latent_size, nce_logits_output_size), nn.BatchNorm1d(nce_logits_output_size)) self.linear_classifier = nn.Linear(base_network_output_size, classifier_output_size) def forward(self, augmentation1, augmentation2): 'Returns the NCE logits and the linear predictions.' representation1 = self.base_network(augmentation1).view((- 1), self.base_network_output_size) representation2 = self.base_network(augmentation2).view((- 1), self.base_network_output_size) logits_for_nce1 = self.head(representation1) logits_for_nce2 = self.head(representation2) repr_to_classifier = (torch.cat([representation1, representation2], 0) if self.training else representation1) linear_preds = self.linear_classifier(repr_to_classifier.clone().detach()) return (logits_for_nce1, logits_for_nce2, linear_preds)
def build_lr_schedule(optimizer, last_epoch=(- 1)): ' adds a lr scheduler to the optimizer.\n\n :param optimizer: nn.Optimizer\n :returns: scheduler\n :rtype: optim.lr_scheduler\n\n ' if (args.lr_update_schedule == 'fixed'): sched = optim.lr_scheduler.LambdaLR(optimizer, (lambda epoch: 1.0), last_epoch=last_epoch) elif (args.lr_update_schedule == 'cosine'): total_epochs = (args.epochs - args.warmup) sched = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=total_epochs, last_epoch=last_epoch) else: raise NotImplementedError('lr scheduler {} not implemented'.format(args.lr_update_schedule)) if (args.warmup > 0): warmup = scheduler.LinearWarmup(optimizer, warmup_steps=args.warmup, last_epoch=last_epoch) sched = scheduler.Scheduler(sched, warmup) return sched
def build_optimizer(model, last_epoch=(- 1)): ' helper to build the optimizer and wrap model\n\n :param model: the model to wrap\n :returns: optimizer wrapping model provided\n :rtype: nn.Optim\n\n ' optim_map = {'rmsprop': optim.RMSprop, 'adam': optim.Adam, 'adadelta': optim.Adadelta, 'sgd': optim.SGD, 'momentum': functools.partial(optim.SGD, momentum=0.9), 'lbfgs': optim.LBFGS} params_to_optimize = layers.add_weight_decay(model, args.weight_decay) full_opt_name = args.optimizer.lower().strip() is_lars = ('lars' in full_opt_name) if (full_opt_name == 'lamb'): assert args.half, 'Need fp16 precision to use Apex FusedLAMB.' optim_map['lamb'] = optimizers.fused_lamb.FusedLAMB opt_name = (full_opt_name.split('_')[(- 1)] if is_lars else full_opt_name) print('using {} optimizer {} lars.'.format(opt_name, ('with' if is_lars else 'without'))) lr = args.lr if (opt_name in ['momentum', 'sgd']): lr = (args.lr * ((args.batch_size * args.num_replicas) / 256)) opt = optim_map[opt_name](params_to_optimize, lr=lr) if is_lars: opt = LARS(opt, eps=0.0) sched = build_lr_schedule(opt, last_epoch=last_epoch) return (opt, sched)
def build_train_and_test_transforms(): 'Returns torchvision OR nvidia-dali transforms.\n\n :returns: train_transforms, test_transforms\n :rtype: list, list\n\n ' resize_shape = (args.image_size_override, args.image_size_override) if ('dali' in args.task): import nvidia.dali.ops as ops import nvidia.dali.types as types from datasets.dali_imagefolder import ColorJitter, RandomHorizontalFlip, RandomGrayScale train_transform = [ops.RandomResizedCrop(device=('gpu' if args.cuda else 'cpu'), size=resize_shape, random_area=(0.08, 1.0), random_aspect_ratio=((3.0 / 4), (4.0 / 3))), RandomHorizontalFlip(prob=0.2, cuda=args.cuda), ColorJitter(brightness=(0.8 * args.color_jitter_strength), contrast=(0.8 * args.color_jitter_strength), saturation=(0.2 * args.color_jitter_strength), hue=(0.2 * args.color_jitter_strength), prob=0.8, cuda=args.cuda), RandomGrayScale(prob=0.2, cuda=args.cuda)] test_transform = [ops.Resize(resize_x=resize_shape[0], resize_y=resize_shape[1], device=('gpu' if args.cuda else 'cpu'), image_type=types.RGB, interp_type=types.INTERP_LINEAR)] else: from datasets.utils import GaussianBlur train_transform = [transforms.RandomResizedCrop((args.image_size_override, args.image_size_override)), transforms.RandomHorizontalFlip(p=0.5), transforms.RandomApply([transforms.ColorJitter(brightness=(0.8 * args.color_jitter_strength), contrast=(0.8 * args.color_jitter_strength), saturation=(0.8 * args.color_jitter_strength), hue=(0.2 * args.color_jitter_strength))], p=0.8), transforms.RandomGrayscale(p=0.2), GaussianBlur(kernel_size=int((0.1 * args.image_size_override)), p=0.5)] test_transform = [transforms.Resize(resize_shape)] return (train_transform, test_transform)
def build_loader_model_grapher(args): 'builds a model, a dataloader and a grapher\n\n :param args: argparse\n :param transform: the dataloader transform\n :returns: a dataloader, a grapher and a model\n :rtype: list\n\n ' (train_transform, test_transform) = build_train_and_test_transforms() loader_dict = {'train_transform': train_transform, 'test_transform': test_transform, **vars(args)} loader = get_loader(**loader_dict) args.input_shape = loader.input_shape args.num_train_samples = (loader.num_train_samples // args.num_replicas) args.num_test_samples = loader.num_test_samples args.num_valid_samples = (loader.num_valid_samples // args.num_replicas) args.steps_per_train_epoch = (args.num_train_samples // args.batch_size) args.total_train_steps = (args.epochs * args.steps_per_train_epoch) network = SimCLR(base_network_output_size=args.representation_size, nce_logits_output_size=args.nce_size, classifier_output_size=loader.output_size) network = (nn.SyncBatchNorm.convert_sync_batchnorm(network) if args.convert_to_sync_bn else network) network = (network.cuda() if args.cuda else network) lazy_generate_modules(network, loader.train_loader) network = layers.init_weights(network, init=args.weight_initialization) if (args.num_replicas > 1): print('wrapping model with DDP...') network = layers.DistributedDataParallelPassthrough(network, device_ids=[0], output_device=0, find_unused_parameters=True) print(network) print('model has {} million parameters.'.format((utils.number_of_parameters(network) / 1000000.0))) grapher = None if ((args.visdom_url is not None) and (args.distributed_rank == 0)): grapher = Grapher('visdom', env=utils.get_name(args), server=args.visdom_url, port=args.visdom_port) elif (args.distributed_rank == 0): grapher = Grapher('tensorboard', logdir=os.path.join('runs', utils.get_name(args))) return (loader, network, grapher)
def lazy_generate_modules(model, loader): ' A helper to build the modules that are lazily compiled\n\n :param model: the nn.Module\n :param loader: the dataloader\n :returns: None\n :rtype: None\n\n ' model.eval() for (augmentation1, augmentation2, labels) in loader: with torch.no_grad(): print('augmentation1 = {} / {} | augmentation2 = {} / {} | labels = {} / {}'.format(augmentation1.shape, augmentation1.dtype, augmentation2.shape, augmentation2.dtype, labels.shape, labels.dtype)) (aug1_min, aug1_max) = (augmentation1.min(), augmentation1.max()) (aug2_min, aug2_max) = (augmentation2.min(), augmentation2.max()) print('aug1 in range [min: {}, max: {}] | aug2 in range [min: {}, max: {}]'.format(aug1_min, aug1_max, aug2_min, aug2_max)) if ((aug1_max > 1.0) or (aug1_min < 0)): raise ValueError('aug1 max > 1.0 or aug1 min < 0. You probably dont want this.') if ((aug2_max > 1.0) or (aug2_min < 0)): raise ValueError('aug2 max > 1.0 or aug2 min < 0. You probably dont want this.') augmentation1 = (augmentation1.cuda(non_blocking=True) if args.cuda else augmentation1) augmentation2 = (augmentation2.cuda(non_blocking=True) if args.cuda else augmentation2) _ = model(augmentation1, augmentation2) break if (args.polyak_ema > 0): layers.polyak_ema_parameters(model, args.polyak_ema)
def register_plots(loss, grapher, epoch, prefix='train'): " Registers line plots with grapher.\n\n :param loss: the dict containing '*_mean' or '*_scalar' values\n :param grapher: the grapher object\n :param epoch: the current epoch\n :param prefix: prefix to append to the plot\n :returns: None\n :rtype: None\n\n " if ((args.distributed_rank == 0) and (grapher is not None)): for (k, v) in loss.items(): if isinstance(v, dict): register_plots(loss[k], grapher, epoch, prefix=prefix) if (('mean' in k) or ('scalar' in k)): key_name = '-'.join(k.split('_')[0:(- 1)]) value = (v.item() if (not isinstance(v, (float, np.float32, np.float64))) else v) grapher.add_scalar('{}_{}'.format(prefix, key_name), value, epoch)
def register_images(output_map, grapher, prefix='train'): " Registers image with grapher. Overwrites the existing image due to space.\n\n :param output_map: the dict containing '*_img' of '*_imgs' as keys\n :param grapher: the grapher object\n :param prefix: prefix to attach to images\n :returns: None\n :rtype: None\n\n " if ((args.distributed_rank == 0) and (grapher is not None)): for (k, v) in output_map.items(): if isinstance(v, dict): register_images(output_map[k], grapher, prefix=prefix) if (('img' in k) or ('imgs' in k)): key_name = '-'.join(k.split('_')[0:(- 1)]) img = torchvision.utils.make_grid(v, normalize=True, scale_each=True) grapher.add_image('{}_{}'.format(prefix, key_name), img.detach(), global_step=0)
def _extract_sum_scalars(v1, v2): 'Simple helper to sum values in a struct using dm_tree.' def chk(c): 'Helper to check if we have a primitive or tensor' return (not isinstance(c, (int, float, np.int32, np.int64, np.float32, np.float64))) v1_detached = (v1.detach() if chk(v1) else v1) v2_detached = (v2.detach() if chk(v2) else v2) return (v1_detached + v2_detached)
def execute_graph(epoch, model, loader, grapher, optimizer=None, prefix='test'): " execute the graph; wphen 'train' is in the name the model runs the optimizer\n\n :param epoch: the current epoch number\n :param model: the torch model\n :param loader: the train or **TEST** loader\n :param grapher: the graph writing helper (eg: visdom / tf wrapper)\n :param optimizer: the optimizer\n :param prefix: 'train', 'test' or 'valid'\n :returns: dictionary with scalars\n :rtype: dict\n\n " start_time = time.time() is_eval = ('train' not in prefix) (model.eval() if is_eval else model.train()) assert ((optimizer is None) if is_eval else (optimizer is not None)) (loss_map, num_samples) = ({}, 0) objective = NTXent() for (num_minibatches, (augmentation1, augmentation2, labels)) in enumerate(loader): augmentation1 = (augmentation1.cuda(non_blocking=True) if args.cuda else augmentation1) augmentation2 = (augmentation2.cuda(non_blocking=True) if args.cuda else augmentation2) labels = (labels.cuda(non_blocking=True) if args.cuda else labels) with (torch.no_grad() if is_eval else utils.dummy_context()): if (is_eval and (args.polyak_ema > 0)): (nce_logits1, nce_logits2, linear_preds) = layers.get_polyak_prediction(model, pred_fn=functools.partial(model, augmentation1, augmentation2)) else: (nce_logits1, nce_logits2, linear_preds) = model(augmentation1, augmentation2) nce_loss = objective(nce_logits1, nce_logits2, temperature=args.nce_temperature, num_replicas=args.num_replicas) classifier_labels = (labels if is_eval else torch.cat([labels, labels], 0)) classifier_loss = F.cross_entropy(input=linear_preds, target=classifier_labels) (acc1, acc5) = metrics.topk(output=linear_preds, target=classifier_labels, topk=(1, 5)) loss_t = {'loss_mean': (nce_loss + classifier_loss), 'nce_loss_mean': nce_loss, 'linear_loss_mean': classifier_loss, 'top1_mean': acc1, 'top5_mean': acc5} loss_map = (loss_t if (not loss_map) else tree.map_structure(_extract_sum_scalars, loss_map, loss_t)) num_samples += augmentation1.size(0) if (not is_eval): optimizer.zero_grad() if args.half: with amp.scale_loss(loss_t['loss_mean'], optimizer) as scaled_loss: scaled_loss.backward() else: loss_t['loss_mean'].backward() if (args.clip > 0): nn.utils.clip_grad_value_(model.parameters(), args.clip) optimizer.step() if (args.polyak_ema > 0): layers.polyak_ema_parameters(model, args.polyak_ema) del loss_t if args.debug_step: break loss_map = tree.map_structure((lambda v: (v / (num_minibatches + 1))), loss_map) to_log = '{}-{}[Epoch {}][{} samples][{:.2f} sec]:\t Loss: {:.4f}\tTop-1: {:.4f}\tTop-5: {:.4f}' print(to_log.format(prefix, args.distributed_rank, epoch, num_samples, (time.time() - start_time), loss_map['loss_mean'].item(), loss_map['top1_mean'].item(), loss_map['top5_mean'].item())) register_plots({**loss_map}, grapher, epoch=epoch, prefix=prefix) num_images_to_post = min(64, augmentation1.shape[0]) image_size_to_post = min(64, augmentation1.shape[(- 1)]) image_map = {'augmentation1_imgs': F.interpolate(augmentation1[0:num_images_to_post], size=(image_size_to_post, image_size_to_post)), 'augmentation2_imgs': F.interpolate(augmentation2[0:num_images_to_post], size=(image_size_to_post, image_size_to_post))} register_images({**image_map}, grapher, prefix=prefix) if (grapher is not None): grapher.save() loss_val = loss_map['loss_mean'].detach().item() loss_map.clear() return loss_val
def train(epoch, model, optimizer, train_loader, grapher, prefix='train'): ' Helper to run execute-graph for the train dataset\n\n :param epoch: the current epoch\n :param model: the model\n :param test_loader: the train data-loader\n :param grapher: the grapher object\n :param prefix: the default prefix; useful if we have multiple training types\n :returns: mean ELBO scalar\n :rtype: float32\n\n ' return execute_graph(epoch, model, train_loader, grapher, optimizer, prefix='train')
def test(epoch, model, test_loader, grapher, prefix='test'): ' Helper to run execute-graph for the test dataset\n\n :param epoch: the current epoch\n :param model: the model\n :param test_loader: the test data-loaderpp\n :param grapher: the grapher object\n :param prefix: the default prefix; useful if we have multiple test types\n :returns: mean ELBO scalar\n :rtype: float32\n\n ' return execute_graph(epoch, model, test_loader, grapher, prefix='test')
def init_multiprocessing_and_cuda(rank, args_from_spawn): 'Sets the appropriate flags for multi-process jobs.' if args_from_spawn.multi_gpu_distributed: os.environ['CUDA_VISIBLE_DEVICES'] = str(rank) args_from_spawn.distributed_rank = rank args_from_spawn.cuda = ((not args_from_spawn.no_cuda) and torch.cuda.is_available()) if args_from_spawn.cuda: torch.backends.cudnn.benchmark = True print('Replica {} / {} using GPU: {}'.format((rank + 1), args_from_spawn.num_replicas, torch.cuda.get_device_name(0))) if (args_from_spawn.seed is not None): print(('setting seed %d' % args_from_spawn.seed)) np.random.seed(args_from_spawn.seed) torch.manual_seed(args_from_spawn.seed) if args_from_spawn.cuda: torch.cuda.manual_seed_all(args_from_spawn.seed) if (args_from_spawn.num_replicas > 1): torch.distributed.init_process_group(backend='nccl', init_method=os.environ['MASTER_ADDR'], world_size=args_from_spawn.num_replicas, rank=rank) print('Successfully created DDP process group!') args_from_spawn.batch_size = (args_from_spawn.batch_size // args_from_spawn.num_replicas) global args args = args_from_spawn
def run(rank, args): ' Main entry-point into the program\n\n :param rank: current device rank\n :param args: argparse\n :returns: None\n :rtype: None\n\n ' init_multiprocessing_and_cuda(rank, args) (loader, model, grapher) = build_loader_model_grapher(args) print(pprint.PrettyPrinter(indent=4).pformat(vars(args))) (optimizer, scheduler) = build_optimizer(model) if args.half: (model, optimizer) = amp.initialize(model, optimizer, opt_level='O2') model = layers.append_save_and_load_fns(model, optimizer, scheduler, grapher, args) saver = layers.ModelSaver(model, early_stop=args.early_stop, rank=args.distributed_rank, burn_in_interval=int((0.1 * args.epochs)), larger_is_better=False, max_early_stop_steps=10) restore_dict = saver.restore() init_epoch = restore_dict['epoch'] for epoch in range(init_epoch, (args.epochs + 1)): train(epoch, model, optimizer, loader.train_loader, grapher) test_loss = test(epoch, model, loader.test_loader, grapher) loader.set_all_epochs(epoch) scheduler.step() register_plots({'learning_rate_scalar': optimizer.param_groups[0]['lr']}, grapher, epoch) if saver(test_loss): saver.restore() test_loss = test(epoch, model, loader.test_loader, grapher) break if ((epoch == 2) and (args.distributed_rank == 0)): config_to_post = vars(args) slurm_id = utils.get_slurm_id() if (slurm_id is not None): config_to_post['slurm_job_id'] = slurm_id grapher.add_text('config', pprint.PrettyPrinter(indent=4).pformat(config_to_post), 0) if (grapher is not None): grapher.close()
def l2_normalize(x, dim=None, eps=1e-12): 'Normalize a tensor over dim using the L2-norm.' sq_sum = torch.sum(torch.square(x), dim=dim, keepdim=True) inv_norm = torch.rsqrt(torch.max(sq_sum, (torch.ones_like(sq_sum) * eps))) return (x * inv_norm)
def all_gather(tensor, expand_dim=0, num_replicas=None): 'Gathers a tensor from other replicas, concat on expand_dim and return.' num_replicas = (dist.get_world_size() if (num_replicas is None) else num_replicas) other_replica_tensors = [torch.zeros_like(tensor) for _ in range(num_replicas)] dist.all_gather(other_replica_tensors, tensor) return torch.cat([o.unsqueeze(expand_dim) for o in other_replica_tensors], expand_dim)
class NTXent(nn.Module): 'Wrap a module to get self.training member.' def __init__(self): super(NTXent, self).__init__() def forward(self, embedding1, embedding2, temperature=0.1, num_replicas=None): 'NT-XENT Loss from SimCLR\n\n :param embedding1: embedding of augmentation1\n :param embedding2: embedding of augmentation2\n :param temperature: nce normalization temp\n :param num_replicas: number of compute devices\n :returns: scalar loss\n :rtype: float32\n\n ' batch_size = embedding1.shape[0] feature_size = embedding1.shape[(- 1)] num_replicas = (dist.get_world_size() if (num_replicas is None) else num_replicas) LARGE_NUM = 1000000000.0 embedding1 = l2_normalize(embedding1, dim=(- 1)) embedding2 = l2_normalize(embedding2, dim=(- 1)) if ((num_replicas > 1) and self.training): embedding1_full = all_gather(embedding1, num_replicas=num_replicas) embedding2_full = all_gather(embedding2, num_replicas=num_replicas) embedding1_full = embedding1_full.reshape((- 1), feature_size) embedding2_full = embedding2_full.reshape((- 1), feature_size) replica_id = dist.get_rank() labels = (torch.arange(batch_size, device=embedding1.device) + (replica_id * batch_size)) labels = labels.type(torch.int64) full_batch_size = embedding1_full.shape[0] masks = F.one_hot(labels, full_batch_size).to(embedding1_full.device) labels = F.one_hot(labels, (full_batch_size * 2)).to(embedding1_full.device) else: embedding1_full = embedding1 embedding2_full = embedding2 masks = F.one_hot(torch.arange(batch_size), batch_size).to(embedding1.device) labels = F.one_hot(torch.arange(batch_size), (batch_size * 2)).to(embedding1.device) logits_aa = (torch.matmul(embedding1, embedding1_full.T) / temperature) logits_aa = (logits_aa - (masks * LARGE_NUM)) logits_bb = (torch.matmul(embedding2, embedding2_full.T) / temperature) logits_bb = (logits_bb - (masks * LARGE_NUM)) logits_ab = (torch.matmul(embedding1, embedding2_full.T) / temperature) logits_ba = (torch.matmul(embedding2, embedding1_full.T) / temperature) loss_a = F.cross_entropy(input=torch.cat([logits_ab, logits_aa], 1), target=torch.argmax(labels, (- 1)), reduction='none') loss_b = F.cross_entropy(input=torch.cat([logits_ba, logits_bb], 1), target=torch.argmax(labels, (- 1)), reduction='none') loss = (loss_a + loss_b) return torch.mean(loss)
class LARS(Optimizer): "Implements 'LARS (Layer-wise Adaptive Rate Scaling)'__ as Optimizer a\n :class:`~torch.optim.Optimizer` wrapper.\n\n __ : https://arxiv.org/abs/1708.03888\n\n Wraps an arbitrary optimizer like :class:`torch.optim.SGD` to use LARS. If\n you want to the same performance obtained with small-batch training when\n you use large-batch training, LARS will be helpful::\n\n Args:\n optimizer (Optimizer):\n optimizer to wrap\n eps (float, optional):\n epsilon to help with numerical stability while calculating the\n adaptive learning rate\n trust_coef (float, optional):\n trust coefficient for calculating the adaptive learning rate\n\n Example::\n base_optimizer = optim.SGD(model.parameters(), lr=0.1)\n optimizer = LARS(optimizer=base_optimizer)\n\n output = model(input)\n loss = loss_fn(output, target)\n loss.backward()\n\n optimizer.step()\n\n " def __init__(self, optimizer, eps=1e-08, trust_coef=0.001): if (eps < 0.0): raise ValueError(('invalid epsilon value: , %f' % eps)) if (trust_coef < 0.0): raise ValueError(('invalid trust coefficient: %f' % trust_coef)) self.optim = optimizer self.eps = eps self.trust_coef = trust_coef def __getstate__(self): lars_dict = {} lars_dict['eps'] = self.eps lars_dict['trust_coef'] = self.trust_coef return (self.optim, lars_dict) def __setstate__(self, state): (self.optim, lars_dict) = state self.eps = lars_dict['eps'] self.trust_coef = lars_dict['trust_coef'] def __repr__(self): return ('%s(%r)' % (self.__class__.__name__, self.optim)) @property def param_groups(self): return self.optim.param_groups @property def state(self): return self.optim.state def state_dict(self): return self.optim.state_dict() def load_state_dict(self, state_dict): self.optim.load_state_dict(state_dict) def zero_grad(self): self.optim.zero_grad() def add_param_group(self, param_group): self.optim.add_param_group(param_group) def apply_adaptive_lrs(self): with torch.no_grad(): for group in self.optim.param_groups: weight_decay = group['weight_decay'] ignore = group.get('ignore', None) for p in group['params']: if (p.grad is None): continue if (weight_decay > 0): p.grad = p.grad.add(p, alpha=weight_decay) if ((ignore is not None) and (not ignore)): param_norm = p.norm() grad_norm = p.grad.norm() adaptive_lr = 1.0 if ((param_norm > 0) and (grad_norm > 0)): adaptive_lr = ((self.trust_coef * param_norm) / (grad_norm + self.eps)) p.grad = p.grad.mul(adaptive_lr) def step(self, *args, **kwargs): self.apply_adaptive_lrs() weight_decay_orig = [group['weight_decay'] for group in self.optim.param_groups] for group in self.optim.param_groups: group['weight_decay'] = 0 loss = self.optim.step(*args, **kwargs) for (group, wo) in zip(self.optim.param_groups, weight_decay_orig): group['weight_decay'] = wo return loss
class Scheduler(object): 'Simple container for warmup and normal scheduler.' def __init__(self, normal_schededuler, warmup_scheduler=None): self.warmup = warmup_scheduler self.sched = normal_schededuler def get_last_lr(self): ' Return last computed learning rate by current scheduler.' if ((self.warmup is not None) and (not self.warmup.complete)): return self.warmup.get_last_lr() return self.sched.get_last_lr() def state_dict(self): 'Returns the state of each scheduler as a :class:`dict`.' state_dict = {'warmup': (self.warmup.state_dict() if (self.warmup is not None) else {}), 'sched': self.sched.state_dict()} return state_dict def load_state_dict(self, state_dict): 'Loads the schedulers state.\n\n Arguments:\n state_dict (dict): scheduler state. Should be an object returned\n from a call to :meth:`state_dict`.\n ' if self.warmup: self.warmup.load_state_dict(state_dict['warmup']) self.sched.load_state_dict(state_dict['sched']) def step(self, *args, **kwargs): if ((self.warmup is not None) and (not self.warmup.complete)): return self.warmup.step(*args, **kwargs) return self.sched.step(*args, **kwargs)
class LinearWarmup(LambdaLR): ' Linear warmup and then constant.\n Linearly increases learning rate schedule from 0 to 1 over `warmup_steps` training steps.\n Keeps learning rate schedule equal to 1. after warmup_steps.\n\n From https://bit.ly/39o2W1f\n ' def __init__(self, optimizer, warmup_steps, last_epoch=(- 1)): self.warmup_steps = warmup_steps self.complete = False super(LinearWarmup, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch) def lr_lambda(self, step): if (step < self.warmup_steps): return (float(step) / float(max(1.0, self.warmup_steps))) self.complete = True return 1.0
def dataloader_msrvtt_train(args, tokenizer): msrvtt_dataset = MSRVTTDataset(subset='train', anno_path=args.anno_path, video_path=args.video_path, max_words=args.max_words, tokenizer=tokenizer, max_frames=args.max_frames, video_framerate=args.video_framerate, config=args) try: train_sampler = torch.utils.data.distributed.DistributedSampler(msrvtt_dataset) except: train_sampler = None dataloader = DataLoader(msrvtt_dataset, batch_size=(args.batch_size // args.world_size), num_workers=args.workers, pin_memory=False, shuffle=(train_sampler is None), sampler=train_sampler, drop_last=True) return (dataloader, len(msrvtt_dataset), train_sampler)
def dataloader_msrvtt_test(args, tokenizer, subset='test'): msrvtt_testset = MSRVTTDataset(subset=subset, anno_path=args.anno_path, video_path=args.video_path, max_words=args.max_words, tokenizer=tokenizer, max_frames=args.max_frames, video_framerate=args.video_framerate, config=args) try: test_sampler = torch.utils.data.distributed.DistributedSampler(msrvtt_testset) except: test_sampler = None dataloader_msrvtt = DataLoader(msrvtt_testset, batch_size=(args.batch_size_val // args.world_size), num_workers=args.workers, shuffle=False, sampler=test_sampler, drop_last=False) return (dataloader_msrvtt, len(msrvtt_testset))
def dataloader_msrvtt_train_test(args, tokenizer): msrvtt_dataset = MSRVTTDataset(subset='train_test', anno_path=args.anno_path, video_path=args.video_path, max_words=args.max_words, tokenizer=tokenizer, max_frames=args.max_frames, video_framerate=args.video_framerate, config=args) try: train_sampler = torch.utils.data.distributed.DistributedSampler(msrvtt_dataset) except: train_sampler = None dataloader = DataLoader(msrvtt_dataset, batch_size=(args.batch_size // args.world_size), num_workers=args.workers, pin_memory=False, shuffle=(train_sampler is None), sampler=train_sampler, drop_last=True) return (dataloader, len(msrvtt_dataset), train_sampler)
def dataloader_lsmdc_train(args, tokenizer): lsmdc_dataset = LsmdcDataset(subset='train', anno_path=args.anno_path, video_path=args.video_path, max_words=args.max_words, tokenizer=tokenizer, max_frames=args.max_frames, video_framerate=args.video_framerate, config=args) train_sampler = torch.utils.data.distributed.DistributedSampler(lsmdc_dataset) dataloader = DataLoader(lsmdc_dataset, batch_size=(args.batch_size // args.world_size), num_workers=args.workers, pin_memory=False, shuffle=(train_sampler is None), sampler=train_sampler, drop_last=True) return (dataloader, len(lsmdc_dataset), train_sampler)
def dataloader_lsmdc_train_test(args, tokenizer): lsmdc_dataset = LsmdcDataset(subset='train_test', anno_path=args.anno_path, video_path=args.video_path, max_words=args.max_words, tokenizer=tokenizer, max_frames=args.max_frames, video_framerate=args.video_framerate, config=args) train_sampler = torch.utils.data.distributed.DistributedSampler(lsmdc_dataset) dataloader = DataLoader(lsmdc_dataset, batch_size=(args.batch_size // args.world_size), num_workers=args.workers, pin_memory=False, shuffle=(train_sampler is None), sampler=train_sampler, drop_last=True) return (dataloader, len(lsmdc_dataset), train_sampler)
def dataloader_lsmdc_test(args, tokenizer, subset='test'): lsmdc_testset = LsmdcDataset(subset=subset, anno_path=args.anno_path, video_path=args.video_path, max_words=args.max_words, tokenizer=tokenizer, max_frames=args.max_frames, video_framerate=args.video_framerate, config=args) try: test_sampler = torch.utils.data.distributed.DistributedSampler(lsmdc_testset) except: test_sampler = None dataloader_lsmdc = DataLoader(lsmdc_testset, batch_size=(args.batch_size_val // args.world_size), num_workers=args.workers, shuffle=False, sampler=test_sampler, drop_last=False) return (dataloader_lsmdc, len(lsmdc_testset))
def dataloader_activity_train(args, tokenizer): activity_dataset = ActivityNetDataset(subset='train', data_path=args.anno_path, features_path=args.video_path, max_words=args.max_words, feature_framerate=args.video_framerate, tokenizer=tokenizer, max_frames=args.max_frames) train_sampler = torch.utils.data.distributed.DistributedSampler(activity_dataset) dataloader = DataLoader(activity_dataset, batch_size=(args.batch_size // args.world_size), num_workers=args.workers, pin_memory=False, shuffle=(train_sampler is None), sampler=train_sampler, drop_last=True) return (dataloader, len(activity_dataset), train_sampler)
def dataloader_activity_train_test(args, tokenizer): activity_dataset = ActivityNetDataset(subset='train_test', data_path=args.anno_path, features_path=args.video_path, max_words=args.max_words, feature_framerate=args.video_framerate, tokenizer=tokenizer, max_frames=args.max_frames) train_sampler = torch.utils.data.distributed.DistributedSampler(activity_dataset) dataloader = DataLoader(activity_dataset, batch_size=(args.batch_size // args.world_size), num_workers=args.workers, pin_memory=False, shuffle=(train_sampler is None), sampler=train_sampler, drop_last=True) return (dataloader, len(activity_dataset), train_sampler)
def dataloader_activity_test(args, tokenizer, subset='test'): activity_testset = ActivityNetDataset(subset=subset, data_path=args.anno_path, features_path=args.video_path, max_words=args.max_words, feature_framerate=args.video_framerate, tokenizer=tokenizer, max_frames=args.max_frames) try: test_sampler = torch.utils.data.distributed.DistributedSampler(activity_testset) except: test_sampler = None dataloader_activity = DataLoader(activity_testset, batch_size=(args.batch_size_val // args.world_size), num_workers=args.workers, shuffle=False, sampler=test_sampler, drop_last=False) return (dataloader_activity, len(activity_testset))
def dataloader_msvd_train(args, tokenizer): msvd_dataset = MsvdDataset(subset='train', anno_path=args.anno_path, video_path=args.video_path, max_words=args.max_words, tokenizer=tokenizer, max_frames=args.max_frames, video_framerate=args.video_framerate, config=args) train_sampler = torch.utils.data.distributed.DistributedSampler(msvd_dataset) dataloader = DataLoader(msvd_dataset, batch_size=(args.batch_size // args.world_size), num_workers=args.workers, pin_memory=False, shuffle=(train_sampler is None), sampler=train_sampler, drop_last=True) return (dataloader, len(msvd_dataset), train_sampler)
def dataloader_msvd_train_test(args, tokenizer): msvd_dataset = MsvdDataset(subset='train_test', anno_path=args.anno_path, video_path=args.video_path, max_words=args.max_words, tokenizer=tokenizer, max_frames=args.max_frames, video_framerate=args.video_framerate, config=args) train_sampler = torch.utils.data.distributed.DistributedSampler(msvd_dataset) dataloader = DataLoader(msvd_dataset, batch_size=(args.batch_size // args.world_size), num_workers=args.workers, pin_memory=False, shuffle=(train_sampler is None), sampler=train_sampler, drop_last=True) return (dataloader, len(msvd_dataset), train_sampler)
def dataloader_msvd_test(args, tokenizer, subset='test'): msvd_testset = MsvdDataset(subset=subset, anno_path=args.anno_path, video_path=args.video_path, max_words=args.max_words, tokenizer=tokenizer, max_frames=args.max_frames, video_framerate=args.video_framerate, config=args) try: test_sampler = torch.utils.data.distributed.DistributedSampler(msvd_testset) except: test_sampler = None dataloader_msvd = DataLoader(msvd_testset, batch_size=(args.batch_size_val // args.world_size), num_workers=args.workers, shuffle=False, sampler=test_sampler, drop_last=False) return (dataloader_msvd, len(msvd_testset))
def dataloader_didemo_train(args, tokenizer): didemo_dataset = DiDeMoDataset(subset='train', data_path=args.anno_path, features_path=args.video_path, max_words=args.max_words, feature_framerate=args.video_framerate, tokenizer=tokenizer, max_frames=args.max_frames) train_sampler = torch.utils.data.distributed.DistributedSampler(didemo_dataset) dataloader = DataLoader(didemo_dataset, batch_size=(args.batch_size // args.world_size), num_workers=args.workers, pin_memory=False, shuffle=(train_sampler is None), sampler=train_sampler, drop_last=True) return (dataloader, len(didemo_dataset), train_sampler)
def dataloader_didemo_train_test(args, tokenizer): didemo_dataset = DiDeMoDataset(subset='train_test', data_path=args.anno_path, features_path=args.video_path, max_words=args.max_words, feature_framerate=args.video_framerate, tokenizer=tokenizer, max_frames=args.max_frames) train_sampler = torch.utils.data.distributed.DistributedSampler(didemo_dataset) dataloader = DataLoader(didemo_dataset, batch_size=(args.batch_size // args.world_size), num_workers=args.workers, pin_memory=False, shuffle=(train_sampler is None), sampler=train_sampler, drop_last=True) return (dataloader, len(didemo_dataset), train_sampler)
def dataloader_didemo_test(args, tokenizer, subset='test'): didemo_testset = DiDeMoDataset(subset=subset, data_path=args.anno_path, features_path=args.video_path, max_words=args.max_words, feature_framerate=args.video_framerate, tokenizer=tokenizer, max_frames=args.max_frames) try: test_sampler = torch.utils.data.distributed.DistributedSampler(didemo_testset) except: test_sampler = None dataloader_didemo = DataLoader(didemo_testset, batch_size=(args.batch_size_val // args.world_size), num_workers=args.workers, shuffle=False, sampler=test_sampler, drop_last=False) return (dataloader_didemo, len(didemo_testset))
class LsmdcDataset(RetrievalDataset): 'LSMDC dataset.' def __init__(self, subset, anno_path, video_path, tokenizer, max_words=32, max_frames=12, video_framerate=1, image_resolution=224, mode='all', config=None): super(LsmdcDataset, self).__init__(subset, anno_path, video_path, tokenizer, max_words, max_frames, video_framerate, image_resolution, mode, config=config) pass def _get_anns(self, subset='train'): '\n video_dict: dict: video_id -> video_path\n sentences_dict: list: [(video_id, caption)] , caption (list: [text:, start, end])\n ' video_json_path_dict = {} video_json_path_dict['train'] = os.path.join(self.anno_path, 'LSMDC16_annos_training.csv') video_json_path_dict['train_test'] = os.path.join(self.anno_path, 'LSMDC16_annos_val.csv') video_json_path_dict['val'] = os.path.join(self.anno_path, 'LSMDC16_annos_val.csv') video_json_path_dict['test'] = os.path.join(self.anno_path, 'LSMDC16_challenge_1000_publictect.csv') video_id_list = [] caption_dict = {} with open(video_json_path_dict[self.subset], 'r') as fp: for line in fp: line = line.strip() line_split = line.split('\t') assert (len(line_split) == 6) (clip_id, start_aligned, end_aligned, start_extracted, end_extracted, sentence) = line_split if (clip_id not in ['0017_Pianist_00.23.28.872-00.23.34.843', '0017_Pianist_00.30.36.767-00.30.38.009', '3064_SPARKLE_2012_01.41.07.000-01.41.11.793']): caption_dict[len(caption_dict)] = (clip_id, (sentence, None, None)) if (clip_id not in video_id_list): video_id_list.append(clip_id) video_dict = OrderedDict() sentences_dict = OrderedDict() for (root, dub_dir, video_files) in os.walk(self.video_path): for video_file in video_files: video_id_ = '.'.join(video_file.split('.')[:(- 1)]) if (video_id_ not in video_id_list): continue file_path_ = os.path.join(root, video_file) video_dict[video_id_] = file_path_ for (clip_id, sentence) in caption_dict.values(): if (clip_id not in video_dict): continue sentences_dict[len(sentences_dict)] = (clip_id, sentence) unique_sentence = set([v[1][0] for v in sentences_dict.values()]) print('[{}] Unique sentence is {} , all num is {}'.format(subset, len(unique_sentence), len(sentences_dict))) return (video_dict, sentences_dict)
class MSRVTTDataset(RetrievalDataset): 'MSRVTT dataset.' def __init__(self, subset, anno_path, video_path, tokenizer, max_words=32, max_frames=12, video_framerate=1, image_resolution=224, mode='all', config=None): super(MSRVTTDataset, self).__init__(subset, anno_path, video_path, tokenizer, max_words, max_frames, video_framerate, image_resolution, mode, config=config) pass def _get_anns(self, subset='train'): '\n video_dict: dict: video_id -> video_path\n sentences_dict: list: [(video_id, caption)] , caption (list: [text:, start, end])\n ' csv_path = {'train': join(self.anno_path, 'MSRVTT_train.9k.csv'), 'val': join(self.anno_path, 'MSRVTT_JSFUSION_test.csv'), 'test': join(self.anno_path, 'MSRVTT_JSFUSION_test.csv'), 'train_test': join(self.anno_path, 'MSRVTT_train.9k.csv')}[subset] if exists(csv_path): csv = pd.read_csv(csv_path) else: raise FileNotFoundError video_id_list = list(csv['video_id'].values) video_dict = OrderedDict() sentences_dict = OrderedDict() if (subset == 'train'): anno_path = join(self.anno_path, 'MSRVTT_data.json') data = json.load(open(anno_path, 'r')) for itm in data['sentences']: if (itm['video_id'] in video_id_list): sentences_dict[len(sentences_dict)] = (itm['video_id'], (itm['caption'], None, None)) video_dict[itm['video_id']] = join(self.video_path, '{}.mp4'.format(itm['video_id'])) elif (subset == 'train_test'): anno_path = join(self.anno_path, 'MSRVTT_data.json') data = json.load(open(anno_path, 'r')) used = [] for itm in data['sentences']: if ((itm['video_id'] in video_id_list) and (itm['video_id'] not in used)): used.append(itm['video_id']) sentences_dict[len(sentences_dict)] = (itm['video_id'], (itm['caption'], None, None)) video_dict[itm['video_id']] = join(self.video_path, '{}.mp4'.format(itm['video_id'])) else: for (_, itm) in csv.iterrows(): sentences_dict[len(sentences_dict)] = (itm['video_id'], (itm['sentence'], None, None)) video_dict[itm['video_id']] = join(self.video_path, '{}.mp4'.format(itm['video_id'])) unique_sentence = set([v[1][0] for v in sentences_dict.values()]) print('[{}] Unique sentence is {} , all num is {}'.format(subset, len(unique_sentence), len(sentences_dict))) return (video_dict, sentences_dict)
class MsvdDataset(RetrievalDataset): 'MSVD dataset loader.' def __init__(self, subset, anno_path, video_path, tokenizer, max_words=32, max_frames=12, video_framerate=1, image_resolution=224, mode='all', config=None): super(MsvdDataset, self).__init__(subset, anno_path, video_path, tokenizer, max_words, max_frames, video_framerate, image_resolution, mode, config=config) pass def _get_anns(self, subset='train'): self.sample_len = 0 self.cut_off_points = [] self.multi_sentence_per_video = True video_id_path_dict = {} video_id_path_dict['train'] = os.path.join(self.anno_path, 'train_list.txt') video_id_path_dict['train_test'] = os.path.join(self.anno_path, 'train_list.txt') video_id_path_dict['val'] = os.path.join(self.anno_path, 'val_list.txt') video_id_path_dict['test'] = os.path.join(self.anno_path, 'test_list.txt') caption_file = os.path.join(self.anno_path, 'raw-captions.pkl') with open(video_id_path_dict[subset], 'r') as fp: video_ids = [itm.strip() for itm in fp.readlines()] with open(caption_file, 'rb') as f: captions = pickle.load(f) video_dict = OrderedDict() sentences_dict = OrderedDict() for (root, dub_dir, video_files) in os.walk(self.video_path): for video_file in video_files: video_id_ = '.'.join(video_file.split('.')[:(- 1)]) if (video_id_ not in video_ids): continue file_path_ = os.path.join(root, video_file) video_dict[video_id_] = file_path_ for video_id in video_ids: assert (video_id in captions) for cap in captions[video_id]: cap_txt = ' '.join(cap) sentences_dict[len(sentences_dict)] = (video_id, (cap_txt, None, None)) self.cut_off_points.append((len(sentences_dict) - 1)) if ((subset == 'val') or (subset == 'test')): self.sentence_num = len(sentences_dict) self.video_num = len(video_ids) assert (len(self.cut_off_points) == self.video_num) print('For {}, sentence number: {}'.format(subset, self.sentence_num)) print('For {}, video number: {}'.format(subset, self.video_num)) print('Video number: {}'.format(len(video_dict))) print('Total Paire: {}'.format(len(sentences_dict))) self.sample_len = len(sentences_dict) return (video_dict, sentences_dict)
def _interpolation(kwargs): interpolation = kwargs.pop('resample', Image.BILINEAR) if isinstance(interpolation, (list, tuple)): return random.choice(interpolation) else: return interpolation
def _check_args_tf(kwargs): if (('fillcolor' in kwargs) and (_PIL_VER < (5, 0))): kwargs.pop('fillcolor') kwargs['resample'] = _interpolation(kwargs)
def shear_x(img, factor, **kwargs): _check_args_tf(kwargs) return img.transform(img.size, Image.AFFINE, (1, factor, 0, 0, 1, 0), **kwargs)
def shear_y(img, factor, **kwargs): _check_args_tf(kwargs) return img.transform(img.size, Image.AFFINE, (1, 0, 0, factor, 1, 0), **kwargs)
def translate_x_rel(img, pct, **kwargs): pixels = (pct * img.size[0]) _check_args_tf(kwargs) return img.transform(img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs)
def translate_y_rel(img, pct, **kwargs): pixels = (pct * img.size[1]) _check_args_tf(kwargs) return img.transform(img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs)
def translate_x_abs(img, pixels, **kwargs): _check_args_tf(kwargs) return img.transform(img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs)
def translate_y_abs(img, pixels, **kwargs): _check_args_tf(kwargs) return img.transform(img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs)
def rotate(img, degrees, **kwargs): _check_args_tf(kwargs) if (_PIL_VER >= (5, 2)): return img.rotate(degrees, **kwargs) elif (_PIL_VER >= (5, 0)): (w, h) = img.size post_trans = (0, 0) rotn_center = ((w / 2.0), (h / 2.0)) angle = (- math.radians(degrees)) matrix = [round(math.cos(angle), 15), round(math.sin(angle), 15), 0.0, round((- math.sin(angle)), 15), round(math.cos(angle), 15), 0.0] def transform(x, y, matrix): (a, b, c, d, e, f) = matrix return ((((a * x) + (b * y)) + c), (((d * x) + (e * y)) + f)) (matrix[2], matrix[5]) = transform(((- rotn_center[0]) - post_trans[0]), ((- rotn_center[1]) - post_trans[1]), matrix) matrix[2] += rotn_center[0] matrix[5] += rotn_center[1] return img.transform(img.size, Image.AFFINE, matrix, **kwargs) else: return img.rotate(degrees, resample=kwargs['resample'])
def auto_contrast(img, **__): return ImageOps.autocontrast(img)
def invert(img, **__): return ImageOps.invert(img)
def equalize(img, **__): return ImageOps.equalize(img)
def solarize(img, thresh, **__): return ImageOps.solarize(img, thresh)
def solarize_add(img, add, thresh=128, **__): lut = [] for i in range(256): if (i < thresh): lut.append(min(255, (i + add))) else: lut.append(i) if (img.mode in ('L', 'RGB')): if ((img.mode == 'RGB') and (len(lut) == 256)): lut = ((lut + lut) + lut) return img.point(lut) else: return img
def posterize(img, bits_to_keep, **__): if (bits_to_keep >= 8): return img return ImageOps.posterize(img, bits_to_keep)
def contrast(img, factor, **__): return ImageEnhance.Contrast(img).enhance(factor)
def color(img, factor, **__): return ImageEnhance.Color(img).enhance(factor)
def brightness(img, factor, **__): return ImageEnhance.Brightness(img).enhance(factor)
def sharpness(img, factor, **__): return ImageEnhance.Sharpness(img).enhance(factor)
def _randomly_negate(v): 'With 50% prob, negate the value' return ((- v) if (random.random() > 0.5) else v)
def _rotate_level_to_arg(level, _hparams): level = ((level / _MAX_LEVEL) * 30.0) level = _randomly_negate(level) return (level,)
def _enhance_level_to_arg(level, _hparams): return ((((level / _MAX_LEVEL) * 1.8) + 0.1),)
def _enhance_increasing_level_to_arg(level, _hparams): level = ((level / _MAX_LEVEL) * 0.9) level = (1.0 + _randomly_negate(level)) return (level,)
def _shear_level_to_arg(level, _hparams): level = ((level / _MAX_LEVEL) * 0.3) level = _randomly_negate(level) return (level,)
def _translate_abs_level_to_arg(level, hparams): translate_const = hparams['translate_const'] level = ((level / _MAX_LEVEL) * float(translate_const)) level = _randomly_negate(level) return (level,)
def _translate_rel_level_to_arg(level, hparams): translate_pct = hparams.get('translate_pct', 0.45) level = ((level / _MAX_LEVEL) * translate_pct) level = _randomly_negate(level) return (level,)
def _posterize_level_to_arg(level, _hparams): return (int(((level / _MAX_LEVEL) * 4)),)
def _posterize_increasing_level_to_arg(level, hparams): return ((4 - _posterize_level_to_arg(level, hparams)[0]),)
def _posterize_original_level_to_arg(level, _hparams): return ((int(((level / _MAX_LEVEL) * 4)) + 4),)
def _solarize_level_to_arg(level, _hparams): return (int(((level / _MAX_LEVEL) * 256)),)
def _solarize_increasing_level_to_arg(level, _hparams): return ((256 - _solarize_level_to_arg(level, _hparams)[0]),)
def _solarize_add_level_to_arg(level, _hparams): return (int(((level / _MAX_LEVEL) * 110)),)
class AugmentOp(): '\n Apply for video.\n ' def __init__(self, name, prob=0.5, magnitude=10, hparams=None): hparams = (hparams or _HPARAMS_DEFAULT) self.aug_fn = NAME_TO_OP[name] self.level_fn = LEVEL_TO_ARG[name] self.prob = prob self.magnitude = magnitude self.hparams = hparams.copy() self.kwargs = {'fillcolor': (hparams['img_mean'] if ('img_mean' in hparams) else _FILL), 'resample': (hparams['interpolation'] if ('interpolation' in hparams) else _RANDOM_INTERPOLATION)} self.magnitude_std = self.hparams.get('magnitude_std', 0) def __call__(self, img_list): if ((self.prob < 1.0) and (random.random() > self.prob)): return img_list magnitude = self.magnitude if (self.magnitude_std and (self.magnitude_std > 0)): magnitude = random.gauss(magnitude, self.magnitude_std) magnitude = min(_MAX_LEVEL, max(0, magnitude)) level_args = (self.level_fn(magnitude, self.hparams) if (self.level_fn is not None) else ()) if isinstance(img_list, list): return [self.aug_fn(img, *level_args, **self.kwargs) for img in img_list] else: return self.aug_fn(img_list, *level_args, **self.kwargs)
def _select_rand_weights(weight_idx=0, transforms=None): transforms = (transforms or _RAND_TRANSFORMS) assert (weight_idx == 0) rand_weights = _RAND_CHOICE_WEIGHTS_0 probs = [rand_weights[k] for k in transforms] probs /= np.sum(probs) return probs
def rand_augment_ops(magnitude=10, hparams=None, transforms=None): hparams = (hparams or _HPARAMS_DEFAULT) transforms = (transforms or _RAND_TRANSFORMS) return [AugmentOp(name, prob=0.5, magnitude=magnitude, hparams=hparams) for name in transforms]
class RandAugment(): def __init__(self, ops, num_layers=2, choice_weights=None): self.ops = ops self.num_layers = num_layers self.choice_weights = choice_weights def __call__(self, img): ops = np.random.choice(self.ops, self.num_layers, replace=(self.choice_weights is None), p=self.choice_weights) for op in ops: img = op(img) return img
def rand_augment_transform(config_str, hparams): "\n RandAugment: Practical automated data augmentation... - https://arxiv.org/abs/1909.13719\n\n Create a RandAugment transform\n :param config_str: String defining configuration of random augmentation. Consists of multiple sections separated by\n dashes ('-'). The first section defines the specific variant of rand augment (currently only 'rand'). The remaining\n sections, not order sepecific determine\n 'm' - integer magnitude of rand augment\n 'n' - integer num layers (number of transform ops selected per image)\n 'w' - integer probabiliy weight index (index of a set of weights to influence choice of op)\n 'mstd' - float std deviation of magnitude noise applied\n 'inc' - integer (bool), use augmentations that increase in severity with magnitude (default: 0)\n Ex 'rand-m9-n3-mstd0.5' results in RandAugment with magnitude 9, num_layers 3, magnitude_std 0.5\n 'rand-mstd1-w0' results in magnitude_std 1.0, weights 0, default magnitude of 10 and num_layers 2\n :param hparams: Other hparams (kwargs) for the RandAugmentation scheme\n :return: A PyTorch compatible Transform\n " magnitude = _MAX_LEVEL num_layers = 2 weight_idx = None transforms = _RAND_TRANSFORMS config = config_str.split('-') assert (config[0] == 'rand') config = config[1:] for c in config: cs = re.split('(\\d.*)', c) if (len(cs) < 2): continue (key, val) = cs[:2] if (key == 'mstd'): hparams.setdefault('magnitude_std', float(val)) elif (key == 'inc'): if bool(val): transforms = _RAND_INCREASING_TRANSFORMS elif (key == 'm'): magnitude = int(val) elif (key == 'n'): num_layers = int(val) elif (key == 'w'): weight_idx = int(val) else: assert NotImplementedError ra_ops = rand_augment_ops(magnitude=magnitude, hparams=hparams, transforms=transforms) choice_weights = (None if (weight_idx is None) else _select_rand_weights(weight_idx)) return RandAugment(ra_ops, num_layers, choice_weights=choice_weights)
class RawVideoExtractorCV2(): def __init__(self, centercrop=False, size=224, framerate=(- 1), subset='test'): self.centercrop = centercrop self.size = size self.framerate = framerate self.transform = self._transform(self.size) self.subset = subset self.tsfm_dict = {'clip_test': Compose([Resize(size, interpolation=InterpolationMode.BICUBIC), CenterCrop(size), (lambda image: image.convert('RGB')), ToTensor(), Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))]), 'clip_train': Compose([RandomResizedCrop(size, scale=(0.5, 1.0)), RandomHorizontalFlip(), (lambda image: image.convert('RGB')), ToTensor(), Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))])} self.aug_transform = video_transforms.create_random_augment(input_size=(size, size), auto_augment='rand-m7-n4-mstd0.5-inc1', interpolation='bicubic') def _transform(self, n_px): return Compose([Resize(n_px, interpolation=InterpolationMode.BICUBIC), CenterCrop(n_px), (lambda image: image.convert('RGB')), ToTensor(), Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))]) def video_to_tensor(self, video_file, preprocess, sample_fp=0, start_time=None, end_time=None, _no_process=False): if ((start_time is not None) or (end_time is not None)): assert (isinstance(start_time, int) and isinstance(end_time, int) and (start_time > (- 1)) and (end_time > start_time)) assert (sample_fp > (- 1)) cap = cv2.VideoCapture(video_file) frameCount = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) fps = int(cap.get(cv2.CAP_PROP_FPS)) if (fps == 0): print(((video_file + '\n') * 10)) total_duration = (((frameCount + fps) - 1) // fps) (start_sec, end_sec) = (0, total_duration) if (start_time is not None): (start_sec, end_sec) = (start_time, (end_time if (end_time <= total_duration) else total_duration)) cap.set(cv2.CAP_PROP_POS_FRAMES, int((start_time * fps))) interval = 1 if (sample_fp > 0): interval = (fps // sample_fp) else: sample_fp = fps if (interval == 0): interval = 1 inds = [ind for ind in np.arange(0, fps, interval)] assert (len(inds) >= sample_fp) inds = inds[:sample_fp] ret = True (images, included) = ([], []) for sec in np.arange(start_sec, (end_sec + 1)): if (not ret): break sec_base = int((sec * fps)) for ind in inds: cap.set(cv2.CAP_PROP_POS_FRAMES, (sec_base + ind)) (ret, frame) = cap.read() if (not ret): break frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) if _no_process: images.append(Image.fromarray(frame_rgb).convert('RGB')) else: images.append(Image.fromarray(frame_rgb)) cap.release() if (len(images) > 0): if _no_process: video_data = images else: if (self.subset == 'train'): images = self.aug_transform(images) video_data = th.stack([preprocess(img) for img in images]) else: video_data = th.zeros(1) return {'video': video_data} def get_video_data(self, video_path, start_time=None, end_time=None, _no_process=False): image_input = self.video_to_tensor(video_path, self.transform, sample_fp=self.framerate, start_time=start_time, end_time=end_time, _no_process=_no_process) return image_input def process_raw_data(self, raw_video_data): tensor_size = raw_video_data.size() tensor = raw_video_data.view((- 1), 1, tensor_size[(- 3)], tensor_size[(- 2)], tensor_size[(- 1)]) return tensor def process_frame_order(self, raw_video_data, frame_order=0): if (frame_order == 0): pass elif (frame_order == 1): reverse_order = np.arange((raw_video_data.size(0) - 1), (- 1), (- 1)) raw_video_data = raw_video_data[(reverse_order, ...)] elif (frame_order == 2): random_order = np.arange(raw_video_data.size(0)) np.random.shuffle(random_order) raw_video_data = raw_video_data[(random_order, ...)] return raw_video_data
class LayerNorm(nn.LayerNorm): "Subclass torch's LayerNorm to handle fp16." def forward(self, x: torch.Tensor): orig_type = x.dtype ret = super().forward(x.type(torch.float32)) return ret.type(orig_type)
class QuickGELU(nn.Module): def forward(self, x: torch.Tensor): return (x * torch.sigmoid((1.702 * x)))
class ResidualAttentionBlock(nn.Module): def __init__(self, d_model: int, n_head: int, attn_mask=None): super(ResidualAttentionBlock, self).__init__() self.attn = nn.MultiheadAttention(d_model, n_head) self.ln_1 = LayerNorm(d_model) self.mlp = nn.Sequential(OrderedDict([('c_fc', nn.Linear(d_model, (d_model * 4))), ('gelu', QuickGELU()), ('c_proj', nn.Linear((d_model * 4), d_model))])) self.ln_2 = LayerNorm(d_model) self.attn_mask = attn_mask self.n_head = n_head def attention(self, x: torch.Tensor, attn_mask_: torch.Tensor): attn_mask_ = attn_mask_.repeat_interleave(self.n_head, dim=0) attn_mask_ = (attn_mask_.to(dtype=x.dtype, device=x.device) if (attn_mask_ is not None) else None) return self.attn(x, x, x, need_weights=False, attn_mask=attn_mask_)[0] def forward(self, para_tuple: tuple): (x, attn_mask) = para_tuple x = (x + self.attention(self.ln_1(x), attn_mask)) x = (x + self.mlp(self.ln_2(x))) return (x, attn_mask)
class Transformer(nn.Module): def __init__(self, width: int, layers: int, heads: int, attn_mask=None): super(Transformer, self).__init__() self.width = width self.layers = layers self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads) for _ in range(layers)]) def forward(self, x: torch.Tensor, attn_mask: torch.Tensor): return self.resblocks((x, attn_mask))[0]
def warmup_cosine(x, warmup=0.002): if (x < warmup): return (x / warmup) return (0.5 * (1.0 + math.cos((math.pi * x))))
def warmup_constant(x, warmup=0.002): ' Linearly increases learning rate over `warmup`*`t_total` (as provided to BertAdam) training steps.\n Learning rate is 1. afterwards. ' if (x < warmup): return (x / warmup) return 1.0
def warmup_linear(x, warmup=0.002): ' Specifies a triangular learning rate schedule where peak is reached at `warmup`*`t_total`-th (as provided to BertAdam) training step.\n After `t_total`-th training step, learning rate is zero. ' if (x < warmup): return (x / warmup) return max(((x - 1.0) / (warmup - 1.0)), 0)
class BertAdam(Optimizer): "Implements BERT version of Adam algorithm with weight decay fix.\n Params:\n lr: learning rate\n warmup: portion of t_total for the warmup, -1 means no warmup. Default: -1\n t_total: total number of training steps for the learning\n rate schedule, -1 means constant learning rate. Default: -1\n schedule: schedule to use for the warmup (see above). Default: 'warmup_linear'\n b1: Adams b1. Default: 0.9\n b2: Adams b2. Default: 0.999\n e: Adams epsilon. Default: 1e-6\n weight_decay: Weight decay. Default: 0.01\n max_grad_norm: Maximum norm for the gradients (-1 means no clipping). Default: 1.0\n " def __init__(self, params, lr=required, warmup=(- 1), t_total=(- 1), schedule='warmup_linear', b1=0.9, b2=0.999, e=1e-06, weight_decay=0.01, max_grad_norm=1.0): if ((lr is not required) and (lr < 0.0)): raise ValueError('Invalid learning rate: {} - should be >= 0.0'.format(lr)) if (schedule not in SCHEDULES): raise ValueError('Invalid schedule parameter: {}'.format(schedule)) if ((not (0.0 <= warmup < 1.0)) and (not (warmup == (- 1)))): raise ValueError('Invalid warmup: {} - should be in [0.0, 1.0[ or -1'.format(warmup)) if (not (0.0 <= b1 < 1.0)): raise ValueError('Invalid b1 parameter: {} - should be in [0.0, 1.0['.format(b1)) if (not (0.0 <= b2 < 1.0)): raise ValueError('Invalid b2 parameter: {} - should be in [0.0, 1.0['.format(b2)) if (not (e >= 0.0)): raise ValueError('Invalid epsilon value: {} - should be >= 0.0'.format(e)) defaults = dict(lr=lr, schedule=schedule, warmup=warmup, t_total=t_total, b1=b1, b2=b2, e=e, weight_decay=weight_decay, max_grad_norm=max_grad_norm) super(BertAdam, self).__init__(params, defaults) def get_lr(self): lr = [] for group in self.param_groups: for p in group['params']: if (p.grad is None): continue state = self.state[p] if (len(state) == 0): return [0] if (group['t_total'] != (- 1)): schedule_fct = SCHEDULES[group['schedule']] lr_scheduled = (group['lr'] * schedule_fct((state['step'] / group['t_total']), group['warmup'])) else: lr_scheduled = group['lr'] lr.append(lr_scheduled) return lr def step(self, closure=None): 'Performs a single optimization step.\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n ' loss = None if (closure is not None): loss = closure() for group in self.param_groups: for p in group['params']: if (p.grad is None): continue grad = p.grad.data if grad.is_sparse: raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead') state = self.state[p] if (len(state) == 0): state['step'] = 0 state['next_m'] = torch.zeros_like(p.data) state['next_v'] = torch.zeros_like(p.data) (next_m, next_v) = (state['next_m'], state['next_v']) (beta1, beta2) = (group['b1'], group['b2']) if (group['max_grad_norm'] > 0): clip_grad_norm_(p, group['max_grad_norm']) next_m.mul_(beta1).add_(grad, alpha=(1 - beta1)) next_v.mul_(beta2).addcmul_(grad, grad, value=(1 - beta2)) update = (next_m / (next_v.sqrt() + group['e'])) if (group['weight_decay'] > 0.0): update += (group['weight_decay'] * p.data) if (group['t_total'] != (- 1)): schedule_fct = SCHEDULES[group['schedule']] progress = (state['step'] / group['t_total']) lr_scheduled = (group['lr'] * schedule_fct(progress, group['warmup'])) else: lr_scheduled = group['lr'] update_with_lr = (lr_scheduled * update) p.data.add_((- update_with_lr)) state['step'] += 1 return loss
@lru_cache() def default_bpe(): return os.path.join(os.path.dirname(os.path.abspath(__file__)), 'bpe_simple_vocab_16e6.txt.gz')
@lru_cache() def bytes_to_unicode(): "\n Returns list of utf-8 byte and a corresponding list of unicode strings.\n The reversible bpe codes work on unicode strings.\n This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.\n When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.\n This is a signficant percentage of your normal, say, 32K bpe vocab.\n To avoid that, we want lookup tables between utf-8 bytes and unicode strings.\n And avoids mapping to whitespace/control characters the bpe code barfs on.\n " bs = ((list(range(ord('!'), (ord('~') + 1))) + list(range(ord('¡'), (ord('¬') + 1)))) + list(range(ord('®'), (ord('ÿ') + 1)))) cs = bs[:] n = 0 for b in range((2 ** 8)): if (b not in bs): bs.append(b) cs.append(((2 ** 8) + n)) n += 1 cs = [chr(n) for n in cs] return dict(zip(bs, cs))