code
stringlengths
17
6.64M
class NotMNIST(Dataset): def __init__(self, root, train=False, download=False): assert (not train), 'Only test set available for NotMNIST' (self.data, self.targets) = self._load_tensors(root) def _load_tensors(self, root): data_path = os.path.join(root, 'data.pt') targets_path = os.path.join(root, 'targets.pt') try: with open(data_path, 'rb') as f: data = torch.load(f) with open(targets_path, 'rb') as f: targets = torch.load(f) except FileNotFoundError: (data, targets) = self._load_raw_images(os.path.join(root, 'notMNIST_small')) torch.save(data, data_path) torch.save(targets, targets_path) return (data, targets) def _load_raw_images(self, root): data = [] targets = [] for letter in os.listdir(root): folder_path = os.path.join(root, letter) for basename in os.listdir(folder_path): try: img_path = os.path.join(folder_path, basename) data.append(np.array(imageio.imread(img_path))) targets.append('ABCDEFGHIJ'.index(letter)) except ValueError: print('File {}/{} is broken'.format(letter, basename), flush=True) data = torch.tensor(data) targets = torch.tensor(targets) return (data, targets)
def get_raw_image_tensors(dataset_name, train, data_root): data_dir = os.path.join(data_root, dataset_name) if (dataset_name == 'cifar10'): dataset = torchvision.datasets.CIFAR10(root=data_dir, train=train, download=True) images = torch.tensor(dataset.data).permute((0, 3, 1, 2)) labels = torch.tensor(dataset.targets) elif (dataset_name == 'svhn'): dataset = torchvision.datasets.SVHN(root=data_dir, split=('train' if train else 'test'), download=True) images = torch.tensor(dataset.data) labels = torch.tensor(dataset.labels) elif (dataset_name in ['mnist', 'fashion-mnist']): dataset_class = {'mnist': torchvision.datasets.MNIST, 'fashion-mnist': torchvision.datasets.FashionMNIST}[dataset_name] dataset = dataset_class(root=data_dir, train=train, download=True) images = dataset.data.unsqueeze(1) labels = dataset.targets else: raise ValueError(f'Unknown dataset {dataset_name}') return (images.to(torch.uint8), labels.to(torch.uint8))
def image_tensors_to_supervised_dataset(dataset_name, dataset_role, images, labels): images = images.to(dtype=torch.get_default_dtype()) labels = labels.long() return SupervisedDataset(dataset_name, dataset_role, images, labels)
def get_train_valid_image_datasets(dataset_name, data_root, valid_fraction, add_train_hflips): (images, labels) = get_raw_image_tensors(dataset_name, train=True, data_root=data_root) perm = torch.randperm(images.shape[0]) shuffled_images = images[perm] shuffled_labels = labels[perm] valid_size = int((valid_fraction * images.shape[0])) valid_images = shuffled_images[:valid_size] valid_labels = shuffled_labels[:valid_size] train_images = shuffled_images[valid_size:] train_labels = shuffled_labels[valid_size:] if add_train_hflips: train_images = torch.cat((train_images, train_images.flip([3]))) train_labels = torch.cat((train_labels, train_labels)) train_dset = image_tensors_to_supervised_dataset(dataset_name, 'train', train_images, train_labels) valid_dset = image_tensors_to_supervised_dataset(dataset_name, 'valid', valid_images, valid_labels) return (train_dset, valid_dset)
def get_test_image_dataset(dataset_name, data_root): (images, labels) = get_raw_image_tensors(dataset_name, train=False, data_root=data_root) return image_tensors_to_supervised_dataset(dataset_name, 'test', images, labels)
def get_image_datasets(dataset_name, data_root, make_valid_dset): valid_fraction = (0.1 if make_valid_dset else 0) add_train_hflips = False (train_dset, valid_dset) = get_train_valid_image_datasets(dataset_name, data_root, valid_fraction, add_train_hflips) test_dset = get_test_image_dataset(dataset_name, data_root) return (train_dset, valid_dset, test_dset)
def get_loader(dset, device, batch_size, drop_last): return torch.utils.data.DataLoader(dset.to(device), batch_size=batch_size, shuffle=True, drop_last=drop_last, num_workers=0, pin_memory=False)
def get_loaders(dataset, device, data_root, make_valid_loader, train_batch_size, valid_batch_size, test_batch_size): print('Loading data...', end='', flush=True, file=sys.stderr) if (dataset in ['cifar10', 'svhn', 'mnist', 'fashion-mnist']): (train_dset, valid_dset, test_dset) = get_image_datasets(dataset, data_root, make_valid_loader) elif (dataset in ['miniboone', 'hepmass', 'power', 'gas', 'bsds300']): (train_dset, valid_dset, test_dset) = get_tabular_datasets(dataset, data_root) elif (dataset == 'linear-gaussian'): (train_dset, valid_dset, test_dset) = get_linear_gaussian_datasets() else: (train_dset, valid_dset, test_dset) = get_2d_datasets(dataset) print('Done.', file=sys.stderr) train_loader = get_loader(train_dset, device, train_batch_size, drop_last=True) if make_valid_loader: valid_loader = get_loader(valid_dset, device, valid_batch_size, drop_last=False) else: valid_loader = None test_loader = get_loader(test_dset, device, test_batch_size, drop_last=False) return (train_loader, valid_loader, test_loader)
class SupervisedDataset(torch.utils.data.Dataset): def __init__(self, name, role, x, y=None): if (y is None): y = torch.zeros(x.shape[0]).long() assert (x.shape[0] == y.shape[0]) assert (role in ['train', 'valid', 'test']) self.name = name self.role = role self.x = x self.y = y def __len__(self): return self.x.shape[0] def __getitem__(self, index): return (self.x[index], self.y[index]) def to(self, device): return SupervisedDataset(self.name, self.role, self.x.to(device), self.y.to(device))
def train(config, load_dir): (density, trainer, writer) = setup_experiment(config=config, load_dir=load_dir, checkpoint_to_load='latest') writer.write_json('config', config) writer.write_json('model', {'num_params': num_params(density), 'schema': get_schema(config)}) writer.write_textfile('git-head', subprocess.check_output(['git', 'rev-parse', 'HEAD']).decode('ascii')) writer.write_textfile('git-diff', subprocess.check_output(['git', 'diff']).decode('ascii')) print('\nConfig:') print(json.dumps(config, indent=4)) print(f''' Number of parameters: {num_params(density):,} ''') trainer.train()
def print_test_metrics(config, load_dir): (_, trainer, _) = setup_experiment(config={**config, 'write_to_disk': False}, load_dir=load_dir, checkpoint_to_load='best_valid') with torch.no_grad(): test_metrics = trainer.test() test_metrics = {k: v.item() for (k, v) in test_metrics.items()} print(json.dumps(test_metrics, indent=4))
def print_model(config): (density, _, _, _) = setup_density_and_loaders(config={**config, 'write_to_disk': False}, device=torch.device('cpu')) print(density)
def print_num_params(config): (density, _, _, _) = setup_density_and_loaders(config={**config, 'write_to_disk': False}, device=torch.device('cpu')) print(f'Number of parameters: {num_params(density):,}')
def setup_density_and_loaders(config, device): (train_loader, valid_loader, test_loader) = get_loaders(dataset=config['dataset'], device=device, data_root=config['data_root'], make_valid_loader=config['early_stopping'], train_batch_size=config['train_batch_size'], valid_batch_size=config['valid_batch_size'], test_batch_size=config['test_batch_size']) density = get_density(schema=get_schema(config=config), x_train=train_loader.dataset.x) density.to(device) return (density, train_loader, valid_loader, test_loader)
def load_run(run_dir, device): run_dir = Path(run_dir) with open((run_dir / 'config.json'), 'r') as f: config = json.load(f) (density, train_loader, valid_loader, test_loader) = setup_density_and_loaders(config=config, device=device) try: checkpoint = torch.load(((run_dir / 'checkpoints') / 'best_valid.pt'), map_location=device) except FileNotFoundError: checkpoint = torch.load(((run_dir / 'checkpoints') / 'latest.pt'), map_location=device) print('Loaded checkpoint after epoch', checkpoint['epoch']) density.load_state_dict(checkpoint['module_state_dict']) return (density, train_loader, valid_loader, test_loader, config, checkpoint)
def setup_experiment(config, load_dir, checkpoint_to_load): torch.manual_seed(config['seed']) np.random.seed((config['seed'] + 1)) random.seed((config['seed'] + 2)) device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu')) (density, train_loader, valid_loader, test_loader) = setup_density_and_loaders(config=config, device=device) if config['write_to_disk']: if (load_dir is None): logdir = config['logdir_root'] make_subdir = True else: logdir = load_dir make_subdir = False writer = Writer(logdir=logdir, make_subdir=make_subdir, tag_group=config['dataset']) else: writer = DummyWriter(logdir=load_dir) if (config['dataset'] in ['cifar10', 'svhn', 'fashion-mnist', 'mnist']): visualizer = ImageDensityVisualizer(writer=writer) elif (train_loader.dataset.x.shape[1:] == (2,)): visualizer = TwoDimensionalDensityVisualizer(writer=writer, x_train=train_loader.dataset.x, num_importance_samples=config['num_test_importance_samples'], device=device) else: visualizer = DummyDensityVisualizer(writer=writer) (train_metrics, opts) = get_train_metrics(density, config) lr_schedulers = {param_name: get_lr_scheduler(opt, len(train_loader), config) for (param_name, opt) in opts.items()} def valid_loss(density, x): key = f"iwae-{config['num_valid_importance_samples']}" return (- metrics(density, x, config['num_valid_importance_samples'])[key]) def test_metrics(density, x): return metrics(density, x, config['num_test_importance_samples']) trainer = Trainer(module=density, train_metrics=train_metrics, valid_loss=valid_loss, test_metrics=test_metrics, train_loader=train_loader, valid_loader=valid_loader, test_loader=test_loader, opts=opts, lr_schedulers=lr_schedulers, max_epochs=config['max_epochs'], max_grad_norm=config['max_grad_norm'], early_stopping=config['early_stopping'], max_bad_valid_epochs=config['max_bad_valid_epochs'], visualizer=visualizer, writer=writer, epochs_per_test=config['epochs_per_test'], should_checkpoint_latest=config['should_checkpoint_latest'], should_checkpoint_best_valid=config['should_checkpoint_best_valid'], checkpoint_to_load=checkpoint_to_load, device=device) return (density, trainer, writer)
def get_lr_scheduler(opt, num_train_batches, config): if (config['lr_schedule'] == 'cosine'): return torch.optim.lr_scheduler.CosineAnnealingLR(optimizer=opt, T_max=(config['max_epochs'] * num_train_batches), eta_min=0.0) elif (config['lr_schedule'] == 'none'): return torch.optim.lr_scheduler.LambdaLR(optimizer=opt, lr_lambda=(lambda epoch: 1.0)) else: assert False, f"Invalid learning rate schedule `{config['lr_schedule']}'"
def get_train_metrics(density, config): if (config['train_objective'] == 'iwae'): train_metric = (lambda density, x: {'losses': {'pq-loss': iwae(density, x, config['num_train_importance_samples'], detach_q=False)}}) opt = get_opt(density.parameters(), config) return (train_metric, {'pq-loss': opt}) else: assert (config['num_u_channels'] > 0), f"Invalid training objective `{config['train_objective']}' for a deterministic model" q_loss = get_q_loss(config) train_metrics = (lambda density, x: {'losses': {'p-loss': iwae(density, x, config['num_train_importance_samples'], detach_q=True), 'q-loss': q_loss(density, x)}}) p_opt = get_opt(density.p_parameters(), config) q_opt = get_opt(density.q_parameters(), config) return (train_metrics, {'p-loss': p_opt, 'q-loss': q_opt})
def get_q_loss(config): train_objective = config['train_objective'] if (train_objective == 'rws'): return (lambda density, x: rws(density, x, config['num_train_importance_samples'])) elif (train_objective == 'rws-dreg'): return (lambda density, x: rws_dreg(density, x, config['num_train_importance_samples'])) elif (train_objective in ['iwae-stl', 'iwae-dreg']): grad_weight_pow = (1 if (train_objective == 'iwae-stl') else 2) return (lambda density, x: iwae_alt(density, x, config['num_train_importance_samples'], grad_weight_pow)) else: assert False, f"Invalid training objective `{train_objective}'"
def get_opt(parameters, config): if (config['opt'] == 'sgd'): opt_class = optim.SGD elif (config['opt'] == 'adam'): opt_class = optim.Adam elif (config['opt'] == 'adamax'): opt_class = optim.Adamax else: assert False, f"Invalid optimiser type {config['opt']}" return opt_class(parameters, lr=config['lr'], weight_decay=config['weight_decay'])
def num_params(module): return sum((p.view((- 1)).shape[0] for p in module.parameters()))
def metrics(density, x, num_importance_samples): result = density.elbo(x, num_importance_samples, detach_q_params=False, detach_q_samples=False) elbo_samples = result['log-w'] elbo = elbo_samples.mean(dim=1) iwae = (elbo_samples.logsumexp(dim=1) - np.log(num_importance_samples)) dim = int(np.prod(x.shape[1:])) bpd = (((- iwae) / dim) / np.log(2)) elbo_gap = (iwae - elbo) return {'elbo': elbo, f'iwae-{num_importance_samples}': iwae, 'bpd': bpd, 'elbo-gap': elbo_gap}
def iwae(density, x, num_importance_samples, detach_q): log_w = density.elbo(x=x, num_importance_samples=num_importance_samples, detach_q_params=detach_q, detach_q_samples=detach_q)['log-w'] return (- log_w.logsumexp(dim=1).mean())
def iwae_alt(density, x, num_importance_samples, grad_weight_pow): log_w = density.elbo(x=x, num_importance_samples=num_importance_samples, detach_q_params=True, detach_q_samples=False)['log-w'] log_Z = log_w.logsumexp(dim=1).view(x.shape[0], 1, 1) grad_weight = ((log_w - log_Z).exp() ** grad_weight_pow) return (- (grad_weight.detach() * log_w).sum(dim=1).mean())
def rws(density, x, num_importance_samples): log_w = density.elbo(x=x, num_importance_samples=num_importance_samples, detach_q_params=False, detach_q_samples=True)['log-w'] log_Z = log_w.logsumexp(dim=1).view(x.shape[0], 1, 1) grad_weight = (log_w - log_Z).exp() return (grad_weight.detach() * log_w).sum(dim=1).mean()
def rws_dreg(density, x, num_importance_samples): log_w = density.elbo(x=x, num_importance_samples=num_importance_samples, detach_q_params=True, detach_q_samples=False)['log-w'] log_Z = log_w.logsumexp(dim=1).view(x.shape[0], 1, 1) grad_weight = (log_w - log_Z).exp().detach() return (- ((grad_weight - (grad_weight ** 2)) * log_w).sum(dim=1).mean())
class ActNormBijection(Bijection): def __init__(self, x_shape): super().__init__(x_shape=x_shape, z_shape=x_shape) self.actnorm = ActNormNd(num_features=x_shape[0]) self.actnorm.shape = ((1, (- 1)) + ((1,) * len(x_shape[1:]))) def _x_to_z(self, x, **kwargs): (z, neg_log_jac) = self.actnorm(x=x, logpx=0.0) return {'z': z, 'log-jac': (- neg_log_jac)} def _z_to_x(self, z, **kwargs): (x, neg_log_jac) = self.actnorm.inverse(y=z, logpy=0.0) return {'x': x, 'log-jac': (- neg_log_jac)}
class AffineBijection(Bijection): def __init__(self, x_shape, per_channel): super().__init__(x_shape=x_shape, z_shape=x_shape) if per_channel: param_shape = (x_shape[0], *[1 for _ in x_shape[1:]]) self.log_jac_factor = np.prod(x_shape[1:]) else: param_shape = x_shape self.log_jac_factor = 1 self.shift = nn.Parameter(torch.zeros(param_shape)) self.log_scale = nn.Parameter(torch.zeros(param_shape)) def _x_to_z(self, x, **kwargs): return {'z': ((x * torch.exp(self.log_scale)) + self.shift), 'log-jac': self._log_jac_x_to_z(x.shape[0])} def _z_to_x(self, z, **kwargs): return {'x': ((z - self.shift) * torch.exp((- self.log_scale))), 'log-jac': (- self._log_jac_x_to_z(z.shape[0]))} def _log_jac_x_to_z(self, batch_size): log_jac_single = (self.log_jac_factor * torch.sum(self.log_scale)) return log_jac_single.view(1, 1).expand(batch_size, 1)
class ConditionalAffineBijection(Bijection): def __init__(self, x_shape, coupler): super().__init__(x_shape, x_shape) self.coupler = coupler def _x_to_z(self, x, **kwargs): (shift, log_scale) = self._shift_log_scale(kwargs['u']) z = ((x + shift) * torch.exp(log_scale)) return {'z': z, 'log-jac': self._log_jac_x_to_z(log_scale)} def _z_to_x(self, z, **kwargs): (shift, log_scale) = self._shift_log_scale(kwargs['u']) x = ((z * torch.exp((- log_scale))) - shift) return {'x': x, 'log-jac': self._log_jac_z_to_x(log_scale)} def _shift_log_scale(self, u): shift_log_scale = self.coupler(u) return (shift_log_scale['shift'], shift_log_scale['log-scale']) def _log_jac_x_to_z(self, log_scale): return log_scale.flatten(start_dim=1).sum(dim=1, keepdim=True) def _log_jac_z_to_x(self, log_scale): return (- self._log_jac_x_to_z(log_scale))
class BatchNormBijection(Bijection): def __init__(self, x_shape, per_channel, apply_affine, momentum, eps=1e-05): super().__init__(x_shape=x_shape, z_shape=x_shape) assert (0 <= momentum <= 1) self.momentum = momentum assert (eps > 0) self.eps = eps if per_channel: param_shape = (x_shape[0], *[1 for _ in x_shape[1:]]) self.average_dims = ([0] + list(range(2, (len(x_shape) + 1)))) self.log_jac_factor = np.prod(x_shape[1:]) else: param_shape = x_shape self.average_dims = [0] self.log_jac_factor = 1 self.register_buffer('running_mean', torch.zeros(param_shape)) self.register_buffer('running_var', torch.ones(param_shape)) self.apply_affine = apply_affine if apply_affine: self.shift = nn.Parameter(torch.zeros(param_shape)) self.log_scale = nn.Parameter(torch.zeros(param_shape)) def _x_to_z(self, x, **kwargs): if self.training: mean = self._average(x) var = self._average(((x - mean) ** 2)) if (self.momentum == 1): self.running_mean = mean self.running_var = var elif (self.momentum > 0): self.running_mean.mul_((1 - self.momentum)).add_((self.momentum * mean.data)) self.running_var.mul_((1 - self.momentum)).add_((self.momentum * var.data)) else: mean = self.running_mean var = self.running_var z = ((x - mean) / torch.sqrt((var + self.eps))) if self.apply_affine: z = ((z * torch.exp(self.log_scale)) + self.shift) return {'z': z, 'log-jac': self._log_jac_x_to_z(var, x.shape[0])} def _z_to_x(self, z, **kwargs): assert (not self.training) if self.apply_affine: z = ((z - self.shift) * torch.exp((- self.log_scale))) x = ((z * torch.sqrt((self.running_var + self.eps))) + self.running_mean) return {'x': x, 'log-jac': (- self._log_jac_x_to_z(self.running_var, z.shape[0]))} def _average(self, data): return torch.mean(data, dim=self.average_dims, keepdim=True).squeeze(0) def _log_jac_x_to_z(self, var, batch_size): summands = ((- 0.5) * torch.log((var + self.eps))) if self.apply_affine: summands = (self.log_scale + summands) log_jac_single = (self.log_jac_factor * torch.sum(summands)) return log_jac_single.view(1, 1).expand(batch_size, 1)
class Bijection(nn.Module): def __init__(self, x_shape, z_shape): super().__init__() self.x_shape = x_shape self.z_shape = z_shape def forward(self, inputs, direction, **kwargs): if (direction == 'x-to-z'): assert (inputs.shape[1:] == self.x_shape), f'Expected shape {self.x_shape}; received {inputs.shape[1:]}' result = self._x_to_z(inputs, **kwargs) assert (result['z'].shape[1:] == self.z_shape) return result elif (direction == 'z-to-x'): assert (inputs.shape[1:] == self.z_shape) result = self._z_to_x(inputs, **kwargs) assert (result['x'].shape[1:] == self.x_shape) return result else: assert False, f'Invalid direction {direction}' def x_to_z(self, x, **kwargs): return self(x, 'x-to-z', **kwargs) def z_to_x(self, z, **kwargs): return self(z, 'z-to-x', **kwargs) def inverse(self): return InverseBijection(self) def condition(self, u): return ConditionedBijection(bijection=self, u=u) def _x_to_z(self, x, **kwargs): raise NotImplementedError def _z_to_x(self, z, **kwargs): raise NotImplementedError
class ConditionedBijection(Bijection): def __init__(self, bijection, u): super().__init__(x_shape=bijection.x_shape, z_shape=bijection.z_shape) self.bijection = bijection self.register_buffer('u', u) def _x_to_z(self, x, **kwargs): return self.bijection.x_to_z(x, u=self._expand_u(x)) def _z_to_x(self, z, **kwargs): return self.bijection.z_to_x(z, u=self._expand_u(z)) def _expand_u(self, inputs): return self.u.unsqueeze(0).expand(inputs.shape[0], *[(- 1) for _ in self.u.shape])
class InverseBijection(Bijection): def __init__(self, bijection): super().__init__(x_shape=bijection.z_shape, z_shape=bijection.x_shape) self.bijection = bijection def _x_to_z(self, x, **kwargs): result = self.bijection.z_to_x(x, **kwargs) z = result.pop('x') return {'z': z, **result} def _z_to_x(self, z, **kwargs): result = self.bijection.x_to_z(z, **kwargs) x = result.pop('z') return {'x': x, **result}
class IdentityBijection(Bijection): def __init__(self, x_shape): super().__init__(x_shape=x_shape, z_shape=x_shape) def _x_to_z(self, x, **kwargs): return {'z': x, 'log-jac': self._log_jac_like(x)} def _z_to_x(self, z, **kwargs): return {'x': z, 'log-jac': self._log_jac_like(z)} def _log_jac_like(self, inputs): return torch.zeros(inputs.shape[0], 1, dtype=inputs.dtype, device=inputs.device)
class CompositeBijection(Bijection): def __init__(self, layers, direction): if (direction == 'z-to-x'): x_shape = layers[(- 1)].x_shape z_shape = layers[0].z_shape elif (direction == 'x-to-z'): x_shape = layers[0].x_shape z_shape = layers[(- 1)].z_shape else: assert False, f'Invalid direction {direction}' super().__init__(x_shape, z_shape) if (direction == 'z-to-x'): layers = reversed(layers) self._x_to_z_layers = nn.ModuleList(layers) def _x_to_z(self, x, **kwargs): (z, log_jac) = self._pass_through(x, 'x-to-z', **kwargs) return {'z': z, 'log-jac': log_jac} def _z_to_x(self, z, **kwargs): (x, log_jac) = self._pass_through(z, 'z-to-x', **kwargs) return {'x': x, 'log-jac': log_jac} def _pass_through(self, inputs, direction, **kwargs): assert (direction in ['z-to-x', 'x-to-z']) if (direction == 'x-to-z'): output_name = 'z' layer_order = self._x_to_z_layers else: output_name = 'x' layer_order = reversed(self._x_to_z_layers) outputs = inputs log_jac = None for layer in layer_order: result = layer(outputs, direction, **kwargs) outputs = result[output_name] if (log_jac is None): log_jac = result['log-jac'] else: log_jac += result['log-jac'] return (outputs, log_jac)
class BlockNeuralAutoregressiveBijection(Bijection): def __init__(self, num_input_channels, num_hidden_layers, hidden_channels_factor, activation, residual): shape = (num_input_channels,) super().__init__(x_shape=shape, z_shape=shape) if (activation == 'tanh'): warnings.warn('BNAF with tanh nonlinearities is not surjective') act_class = Tanh elif (activation == 'leaky-relu'): act_class = LeakyReLU elif (activation == 'soft-leaky-relu'): act_class = SoftLeakyReLU else: assert False, f'Invalid activation {activation}' layers = [MaskedWeight(in_features=num_input_channels, out_features=(num_input_channels * hidden_channels_factor), dim=num_input_channels), act_class()] for _ in range(num_hidden_layers): layers += [MaskedWeight(in_features=(num_input_channels * hidden_channels_factor), out_features=(num_input_channels * hidden_channels_factor), dim=num_input_channels), act_class()] layers += [MaskedWeight(in_features=(num_input_channels * hidden_channels_factor), out_features=num_input_channels, dim=num_input_channels)] self.bnaf = BNAF(*layers, res=residual) def _x_to_z(self, x, **kwargs): (z, log_jac) = self.bnaf(x) return {'z': z, 'log-jac': log_jac.view(x.shape[0], 1)}
class Nonlinearity(nn.Module): def forward(self, inputs, grad=None): (outputs, log_jac) = self._do_forward(inputs) if (grad is None): grad = log_jac else: grad = (log_jac.view(grad.shape) + grad) return (outputs, grad)
class LeakyReLU(Nonlinearity): def _do_forward(self, inputs): outputs = F.leaky_relu(inputs, negative_slope=self.negative_slope) log_jac = torch.zeros_like(inputs) log_jac[(inputs < 0)] = np.log(self.negative_slope) return (outputs, log_jac)
class SoftLeakyReLU(Nonlinearity): def __init__(self, negative_slope=0.01): super().__init__() self.negative_slope = negative_slope def _do_forward(self, inputs): eps = self.negative_slope outputs = ((eps * inputs) + ((1 - eps) * F.softplus(inputs))) log_jac = torch.log((eps + ((1 - eps) * torch.sigmoid(inputs)))) return (outputs, log_jac)
class Invertible1x1ConvBijection(Bijection): def __init__(self, x_shape, num_u_channels=0): assert ((len(x_shape) == 1) or (len(x_shape) == 3)) super().__init__(x_shape, x_shape) num_channels = x_shape[0] self.weight_shape = [num_channels, num_channels] self.conv_weights_shape = (self.weight_shape + [1 for _ in x_shape[1:]]) self.num_non_channel_elements = np.prod(x_shape[1:]) self.weights_init = torch.qr(torch.randn(*self.weight_shape))[0] self.num_u_channels = num_u_channels if (num_u_channels > 0): self.u_weights = nn.Parameter(torch.zeros(num_channels, num_u_channels)) self.u_conv_weights_shape = ([num_channels, num_u_channels] + [1 for _ in x_shape[1:]]) def _convolve(self, inputs, weights, weights_shape): if (len(weights_shape) < 3): return torch.matmul(inputs, weights.t()) else: return F.conv2d(inputs, weights.view(*weights_shape)) def _log_jac_single(self): raise NotImplementedError def _get_weights(self): raise NotImplementedError def _get_Vu(self, **kwargs): if ('u' in kwargs): Vu = self._convolve(kwargs['u'], self.u_weights, self.u_conv_weights_shape) else: Vu = 0 assert (self.num_u_channels == 0) return Vu def _x_to_z(self, x, **kwargs): Vu = self._get_Vu(**kwargs) Wx = self._convolve(x, self._get_weights(), self.conv_weights_shape) z = (Wx + Vu) log_jac = self._log_jac_single().expand(x.shape[0], 1) return {'z': z, 'log-jac': log_jac} def _z_to_x(self, z, **kwargs): Vu = self._get_Vu(**kwargs) x = self._convolve((z - Vu), torch.inverse(self._get_weights()), self.conv_weights_shape) neg_log_jac = self._log_jac_single().expand(z.shape[0], 1) return {'x': x, 'log-jac': (- neg_log_jac)}
class BruteForceInvertible1x1ConvBijection(Invertible1x1ConvBijection): def __init__(self, x_shape, num_u_channels=0): super().__init__(x_shape, num_u_channels) self.weights = nn.Parameter(self.weights_init) def _get_weights(self): return self.weights def _log_jac_single(self): return (torch.slogdet(self.weights)[1] * self.num_non_channel_elements)
class LUInvertible1x1ConvBijection(Invertible1x1ConvBijection): def __init__(self, x_shape, num_u_channels=0): super().__init__(x_shape, num_u_channels) (P, lower, upper) = torch.lu_unpack(*torch.lu(self.weights_init)) s = torch.diag(upper) log_s = torch.log(torch.abs(s)) upper = torch.triu(upper, 1) self.register_buffer('P', P) self.register_buffer('sign_s', torch.sign(s)) self.register_buffer('l_mask', torch.tril(torch.ones(self.weight_shape), (- 1))) self.register_buffer('eye', torch.eye(*self.weight_shape)) self.lower = nn.Parameter(lower) self.log_s = nn.Parameter(log_s) self.upper = nn.Parameter(upper) self.bias = nn.Parameter(torch.zeros(x_shape[0], *x_shape[1:])) def _get_weights(self): L = ((self.lower * self.l_mask) + self.eye) U = (self.upper * self.l_mask.transpose(0, 1).contiguous()) U += torch.diag((self.sign_s * torch.exp(self.log_s))) W = torch.matmul(self.P, torch.matmul(L, U)) return W def _log_jac_single(self): return (torch.sum(self.log_s) * self.num_non_channel_elements)
class LULinearBijection(Bijection): def __init__(self, num_input_channels): shape = (num_input_channels,) super().__init__(x_shape=shape, z_shape=shape) self.linear = LULinear(features=num_input_channels, identity_init=True) def _x_to_z(self, x, **kwargs): (z, log_jac) = self.linear(x) return {'z': z, 'log-jac': log_jac.view(x.shape[0], 1)} def _z_to_x(self, z, **kwargs): (x, log_jac) = self.linear(z) return {'x': x, 'log-jac': log_jac.view(z.shape[0], 1)}
class ElementwiseBijection(Bijection): def __init__(self, x_shape): super().__init__(x_shape=x_shape, z_shape=x_shape) def _x_to_z(self, x, **kwargs): return {'z': self._F(x), 'log-jac': self._log_jac_x_to_z(x)} def _z_to_x(self, z, **kwargs): return {'x': self._F_inv(z), 'log-jac': self._log_jac_z_to_x(z)} def _log_jac_x_to_z(self, x): return self._log_dF(x).flatten(start_dim=1).sum(dim=1, keepdim=True) def _log_jac_z_to_x(self, z): return (- self._log_jac_x_to_z(z)) def _F(self, x): raise NotImplementedError def _F_inv(self, z): raise NotImplementedError def _log_dF(self, x): raise NotImplementedError
class LogitBijection(ElementwiseBijection): _EPS = 1e-07 def _F(self, x): return (torch.log(x) - torch.log((1 - x))) def _F_inv(self, z): return torch.sigmoid(z) def _log_dF(self, x): x_clamped = x.clamp(self._EPS, (1 - self._EPS)) return ((- torch.log(x_clamped)) - torch.log((1 - x_clamped)))
class TanhBijection(ElementwiseBijection): _EPS = 1e-07 def _F(self, x): return torch.tanh(x) def _F_inv(self, z): z_clamped = z.clamp(((- 1) + self._EPS), (1 - self._EPS)) return (0.5 * (torch.log((1 + z_clamped)) - torch.log((1 - z_clamped)))) def _log_dF(self, x): return ((y - (2 * F.softplus(y))) + np.log(4))
class ScalarMultiplicationBijection(ElementwiseBijection): def __init__(self, x_shape, value): assert np.isscalar(value) assert (value != 0.0), 'Scalar multiplication by zero is not a bijection' super().__init__(x_shape=x_shape) self.value = value def _F(self, x): return (self.value * x) def _F_inv(self, z): return (z / self.value) def _log_dF(self, x): return torch.full_like(x, np.log(np.abs(self.value)))
class ScalarAdditionBijection(ElementwiseBijection): def __init__(self, x_shape, value): assert np.isscalar(value) super().__init__(x_shape=x_shape) self.value = value def _F(self, x): return (x + self.value) def _F_inv(self, z): return (z - self.value) def _log_dF(self, x): return torch.zeros_like(x)
class RationalQuadraticSplineBijection(Bijection): def __init__(self, num_input_channels, flow): shape = (num_input_channels,) super().__init__(x_shape=shape, z_shape=shape) self.flow = flow def _x_to_z(self, x): (z, log_jac) = self.flow(x) return {'z': z, 'log-jac': log_jac.view(x.shape[0], 1)} def _z_to_x(self, z): (x, log_jac) = self.flow.inverse(z) return {'x': x, 'log-jac': log_jac.view(z.shape[0], 1)}
class CoupledRationalQuadraticSplineBijection(RationalQuadraticSplineBijection): def __init__(self, num_input_channels, num_hidden_layers, num_hidden_channels, num_bins, tail_bound, activation, dropout_probability, reverse_mask): def transform_net_create_fn(in_features, out_features): return ResidualNet(in_features=in_features, out_features=out_features, context_features=None, hidden_features=num_hidden_channels, num_blocks=num_hidden_layers, activation=activation(), dropout_probability=dropout_probability, use_batch_norm=False) super().__init__(num_input_channels=num_input_channels, flow=PiecewiseRationalQuadraticCouplingTransform(mask=create_alternating_binary_mask(num_input_channels, even=reverse_mask), transform_net_create_fn=transform_net_create_fn, num_bins=num_bins, tails='linear', tail_bound=tail_bound, apply_unconditional_transform=True))
class AutoregressiveRationalQuadraticSplineBijection(RationalQuadraticSplineBijection): def __init__(self, num_input_channels, num_hidden_layers, num_hidden_channels, num_bins, tail_bound, activation, dropout_probability): super().__init__(num_input_channels=num_input_channels, flow=MaskedPiecewiseRationalQuadraticAutoregressiveTransform(features=num_input_channels, hidden_features=num_hidden_channels, context_features=None, num_bins=num_bins, tails='linear', tail_bound=tail_bound, num_blocks=num_hidden_layers, use_residual_blocks=True, random_mask=False, activation=activation(), dropout_probability=dropout_probability, use_batch_norm=False))
class ODEVelocityFunction(ODEnet): def __init__(self, hidden_dims, x_input_shape, nonlinearity, num_u_channels=0, strides=None, conv=False, layer_type='concatsquash'): super().__init__(hidden_dims=hidden_dims, input_shape=x_input_shape, strides=strides, conv=conv, layer_type=layer_type, nonlinearity=nonlinearity) if (num_u_channels > 0): layer_0_class = self.layers[0].__class__ self.layers[0] = layer_0_class((x_input_shape[0] + num_u_channels), hidden_dims[0]) self.num_u_channels = num_u_channels def set_u(self, u): assert (self.num_u_channels > 0) self._u = u def forward(self, t, y): if (self.num_u_channels > 0): y = torch.cat((y, self._u), 1) return super().forward(t, y)
class FFJORDBijection(Bijection): _VELOCITY_NONLINEARITY = 'tanh' _DIVERGENCE_METHOD = 'brute_force' _SOLVER = 'dopri5' _INTEGRATION_TIME = 0.5 def __init__(self, x_shape, velocity_hidden_channels, num_u_channels, relative_tolerance, absolute_tolerance): super().__init__(x_shape=x_shape, z_shape=x_shape) self.diffeq = ODEVelocityFunction(hidden_dims=tuple(velocity_hidden_channels), x_input_shape=x_shape, nonlinearity=self._VELOCITY_NONLINEARITY, num_u_channels=num_u_channels) odefunc = ODEfunc(diffeq=self.diffeq, divergence_fn=self._DIVERGENCE_METHOD, residual=False, rademacher=False) self.cnf = CNF(odefunc=odefunc, T=self._INTEGRATION_TIME, train_T=False, regularization_fns=None, solver=self._SOLVER, atol=absolute_tolerance, rtol=relative_tolerance) def _get_nfes(self): return torch.tensor(self.cnf.odefunc.num_evals()) def _evolve_ODE(self, input_state, reverse, **kwargs): if ('u' in kwargs): self.diffeq.set_u(kwargs['u']) init_log_jac = input_state.new_zeros(input_state.shape[0], 1) (output_state, neg_log_jac) = self.cnf(input_state, init_log_jac, reverse=reverse) return (output_state, (- neg_log_jac)) def _x_to_z(self, x, **kwargs): (z, log_jac) = self._evolve_ODE(input_state=x, reverse=False, **kwargs) return {'z': z, 'log-jac': log_jac, 'nfes': self._get_nfes()} def _z_to_x(self, z, **kwargs): (x, log_jac) = self._evolve_ODE(input_state=z, reverse=True, **kwargs) return {'x': x, 'log-jac': log_jac}
class ResidualFlowBijection(Bijection): def __init__(self, x_shape, lipschitz_net, reduce_memory): super().__init__(x_shape=x_shape, z_shape=x_shape) self.block = self._get_iresblock(net=lipschitz_net, reduce_memory=reduce_memory) def _x_to_z(self, x, **kwargs): (z, neg_log_jac) = self.block(x=x, logpx=0.0) return {'z': z, 'log-jac': (- neg_log_jac)} def _z_to_x(self, z, **kwargs): (x, neg_log_jac) = self.block.inverse(y=z, logpy=0.0) return {'x': x, 'log-jac': (- neg_log_jac)} def _get_iresblock(self, net, reduce_memory): return iResBlock(nnet=net, brute_force=False, n_power_series=None, neumann_grad=reduce_memory, n_dist='geometric', geom_p=0.5, lamb=(- 1.0), n_exact_terms=2, n_samples=1, exact_trace=False, grad_in_forward=reduce_memory)
class SumOfSquaresPolynomialBijection(Bijection): def __init__(self, num_input_channels, hidden_channels, activation, num_polynomials, polynomial_degree): super().__init__(x_shape=(num_input_channels,), z_shape=(num_input_channels,)) arn = AutoRegressiveNN(input_dim=int(num_input_channels), hidden_dims=hidden_channels, param_dims=[((polynomial_degree + 1) * num_polynomials)], nonlinearity=activation()) self.flow = Polynomial(autoregressive_nn=arn, input_dim=int(num_input_channels), count_degree=polynomial_degree, count_sum=num_polynomials) def _x_to_z(self, x): z = self.flow._call(x) log_jac = self.flow.log_abs_det_jacobian(None, None).view(x.shape[0], 1) return {'z': z, 'log-jac': log_jac}
class BernoulliConditionalDensity(ConditionalDensity): def __init__(self, logit_net): super().__init__() self.logit_net = logit_net def _log_prob(self, inputs, cond_inputs): logits = self.logit_net(cond_inputs) log_probs = dist.bernoulli.Bernoulli(logits=logits).log_prob(inputs) return {'log-prob': log_probs.flatten(start_dim=1).sum(dim=1, keepdim=True)} def _sample(self, cond_inputs, detach_params, detach_samples): logits = self.logit_net(cond_inputs) bernoulli = dist.bernoulli.Bernoulli(logits=logits) samples = bernoulli.sample() if detach_params: logits = logits.detach() if detach_samples: samples = samples.detach() log_probs = bernoulli.log_prob(samples).flatten(start_dim=1).sum(dim=1, keepdim=True) return {'sample': samples, 'log-prob': log_probs}
def concrete_log_prob(u, alphas, lam): assert (alphas.shape == u.shape) flat_u = u.flatten(start_dim=1) flat_alphas = alphas.flatten(start_dim=1) (_, dim) = flat_u.shape const_term = (np.sum(np.log(np.arange(1, dim))) + ((dim - 1) * np.log(lam))) log_denominator = torch.logsumexp((torch.log(flat_alphas) - (lam * torch.log(flat_u))), dim=1, keepdim=True) log_numerator = (torch.log(flat_alphas) - ((lam + 1) * torch.log(flat_u))) log_product_quotient = torch.sum((log_numerator - log_denominator), dim=1, keepdim=True) return (const_term + log_product_quotient)
def concrete_sample(alphas, lam): standard_gumbel = torch.distributions.gumbel.Gumbel(torch.zeros_like(alphas), torch.ones_like(alphas)) gumbels = standard_gumbel.sample() log_numerator = ((torch.log(alphas) + gumbels) / lam) log_denominator = torch.logsumexp(log_numerator, dim=1, keepdim=True) return torch.exp((log_numerator - log_denominator))
class ConcreteConditionalDensity(ConditionalDensity): def __init__(self, log_alpha_map, lam): super().__init__() self.log_alpha_map = log_alpha_map self.lam = lam def _log_prob(self, inputs, cond_inputs): return {'log-prob': concrete_log_prob(inputs, self._alphas(cond_inputs), self.lam)} def _sample(self, cond_inputs, detach_params, detach_samples): alphas = self._alphas(cond_inputs) samples = concrete_sample(alphas, self.lam) if detach_params: alphas = alphas.detach() if detach_samples: samples = samples.detach() log_probs = concrete_log_prob(samples, alphas, self.lam) return {'log-prob': log_probs, 'sample': samples} def _alphas(self, cond_inputs): return torch.exp(self.log_alpha_map(cond_inputs))
class ConditionalDensity(nn.Module): def forward(self, mode, *args, **kwargs): if (mode == 'log-prob'): return self._log_prob(*args, **kwargs) elif (mode == 'sample'): return self._sample(*args, **kwargs) else: assert False, f'Invalid mode {mode}' def log_prob(self, inputs, cond_inputs): return self('log-prob', inputs, cond_inputs) def sample(self, cond_inputs, detach_params=False, detach_samples=False): return self('sample', cond_inputs, detach_params=detach_params, detach_samples=detach_samples) def _log_prob(self, inputs, cond_inputs): raise NotImplementedError def _sample(self, cond_inputs, detach_params, detach_samples): raise NotImplementedError
class DiagonalGaussianConditionalDensity(ConditionalDensity): def __init__(self, coupler): super().__init__() self.coupler = coupler def _log_prob(self, inputs, cond_inputs): (means, stddevs) = self._means_and_stddevs(cond_inputs) return {'log-prob': diagonal_gaussian_log_prob(inputs, means, stddevs)} def _sample(self, cond_inputs, detach_params, detach_samples): (means, stddevs) = self._means_and_stddevs(cond_inputs) samples = diagonal_gaussian_sample(means, stddevs) if detach_params: means = means.detach() stddevs = stddevs.detach() if detach_samples: samples = samples.detach() log_probs = diagonal_gaussian_log_prob(samples, means, stddevs) return {'sample': samples, 'log-prob': log_probs} def _means_and_stddevs(self, cond_inputs): result = self.coupler(cond_inputs) return (result['shift'], torch.exp(result['log-scale']))
class CIFDensity(Density): def __init__(self, prior, p_u_density, bijection, q_u_density): super().__init__() self.bijection = bijection self.prior = prior self.p_u_density = p_u_density self.q_u_density = q_u_density def p_parameters(self): return [*self.bijection.parameters(), *self.p_u_density.parameters(), *self.prior.p_parameters()] def q_parameters(self): result = list(self.q_u_density.parameters()) prior_q_params = list(self.prior.q_parameters()) result += prior_q_params if prior_q_params: result += list(self.bijection.parameters()) return result def _elbo(self, x, detach_q_params, detach_q_samples): result = self.q_u_density.sample(cond_inputs=x, detach_params=detach_q_params, detach_samples=detach_q_samples) u = result['sample'] log_q_u = result['log-prob'] result = self.bijection.x_to_z(x, u=u) z = result['z'] log_jac = result['log-jac'] result = self.p_u_density.log_prob(inputs=u, cond_inputs=z) log_p_u = result['log-prob'] prior_dict = self.prior('elbo', z, detach_q_params=detach_q_params, detach_q_samples=detach_q_samples) return {'log-p': ((log_jac + log_p_u) + prior_dict['log-p']), 'log-q': (log_q_u + prior_dict['log-q']), 'bijection-info': result, 'prior-dict': prior_dict} def _fix_random_u(self): (fixed_prior, z) = self.prior._fix_random_u() z = z.unsqueeze(0) u = self.p_u_density.sample(z)['sample'] fixed_bijection = self.bijection.condition(u.squeeze(0)) new_z = fixed_bijection.z_to_x(z)['x'].squeeze(0) return (FlowDensity(prior=fixed_prior, bijection=fixed_bijection), new_z) def fix_u(self, u): fixed_prior = self.prior.fix_u(u=u[1:]) fixed_bijection = self.bijection.condition(u[0]) return FlowDensity(prior=fixed_prior, bijection=fixed_bijection) def _sample(self, num_samples): z = self.prior.sample(num_samples) u = self.p_u_density.sample(z)['sample'] return self.bijection.z_to_x(z, u=u)['x'] def _fixed_sample(self, noise): z = self.prior.fixed_sample(noise=noise) u = self.p_u_density.sample(z)['sample'] return self.bijection.z_to_x(z, u=u)['x']
class Density(nn.Module): def forward(self, mode, *args, **kwargs): if (mode == 'elbo'): return self._elbo(*args, **kwargs) elif (mode == 'sample'): return self._sample(*args, **kwargs) elif (mode == 'fixed-sample'): return self._fixed_sample(*args, **kwargs) else: assert False, f'Invalid mode {mode}' def p_parameters(self): raise NotImplementedError def q_parameters(self): raise NotImplementedError def fix_random_u(self): (fixed_density, _) = self._fix_random_u() return fixed_density def fix_u(self, u): raise NotImplementedError def elbo(self, x, num_importance_samples, detach_q_params=False, detach_q_samples=False): result = self('elbo', x.repeat_interleave(num_importance_samples, dim=0), detach_q_params=detach_q_params, detach_q_samples=detach_q_samples) output_shape = (x.shape[0], num_importance_samples, 1) log_p = result['log-p'].view(output_shape) log_q = result['log-q'].view(output_shape) log_w = (log_p - log_q) return {'log-p': log_p, 'log-q': log_q, 'log-w': log_w} def sample(self, num_samples): return self('sample', num_samples) def fixed_sample(self, noise=None): return self('fixed-sample', noise) def _fix_random_u(self): raise NotImplementedError def _elbo(self, x, detach_q_params, detach_q_samples): raise NotImplementedError def _sample(self, num_samples): raise NotImplementedError def _fixed_sample(self, noise): raise NotImplementedError
class FlowDensity(Density): def __init__(self, prior, bijection): super().__init__() self.bijection = bijection self.prior = prior def p_parameters(self): return [*self.bijection.parameters(), *self.prior.p_parameters()] def q_parameters(self): return self.prior.q_parameters() def _fix_random_u(self): (fixed_prior, z) = self.prior._fix_random_u() new_z = self.bijection.z_to_x(z.unsqueeze(0))['x'].squeeze(0) return (FlowDensity(bijection=self.bijection, prior=fixed_prior), new_z) def fix_u(self, u): fixed_prior = self.prior.fix_u(u=u) return FlowDensity(bijection=self.bijection, prior=fixed_prior) def _elbo(self, x, detach_q_params, detach_q_samples): result = self.bijection.x_to_z(x) prior_dict = self.prior('elbo', result['z'], detach_q_params=detach_q_params, detach_q_samples=detach_q_samples) return {'log-p': (prior_dict['log-p'] + result['log-jac']), 'log-q': prior_dict['log-q'], 'bijection-info': result, 'prior-dict': prior_dict} def _sample(self, num_samples): z = self.prior.sample(num_samples) return self.bijection.z_to_x(z)['x'] def _fixed_sample(self, noise): z = self.prior.fixed_sample(noise=noise) return self.bijection.z_to_x(z)['x']
def diagonal_gaussian_log_prob(w, means, stddevs): assert (means.shape == stddevs.shape == w.shape) flat_w = w.flatten(start_dim=1) flat_means = means.flatten(start_dim=1) flat_vars = (stddevs.flatten(start_dim=1) ** 2) (_, dim) = flat_w.shape const_term = (((- 0.5) * dim) * np.log((2 * np.pi))) log_det_terms = ((- 0.5) * torch.sum(torch.log(flat_vars), dim=1, keepdim=True)) product_terms = ((- 0.5) * torch.sum((((flat_w - flat_means) ** 2) / flat_vars), dim=1, keepdim=True)) return ((const_term + log_det_terms) + product_terms)
def diagonal_gaussian_sample(means, stddevs): return ((stddevs * torch.randn_like(means)) + means)
def diagonal_gaussian_entropy(stddevs): flat_stddevs = stddevs.flatten(start_dim=1) (_, dim) = flat_stddevs.shape return (torch.sum(torch.log(flat_stddevs), dim=1, keepdim=True) + ((0.5 * dim) * (1 + np.log((2 * np.pi)))))
class DiagonalGaussianDensity(Density): def __init__(self, mean, stddev, num_fixed_samples=0): super().__init__() assert (mean.shape == stddev.shape) self.register_buffer('mean', mean) self.register_buffer('stddev', stddev) if (num_fixed_samples > 0): self.register_buffer('_fixed_samples', self.sample(num_fixed_samples)) @property def shape(self): return self.mean.shape def p_parameters(self): return [] def q_parameters(self): return [] def _fix_random_u(self): return (self, self.sample(num_samples=1)[0]) def fix_u(self, u): assert (not u) return self def _elbo(self, z, detach_q_params, detach_q_samples): log_prob = diagonal_gaussian_log_prob(z, self.mean.expand_as(z), self.stddev.expand_as(z)) return {'log-p': log_prob, 'log-q': z.new_zeros((z.shape[0], 1)), 'z': z} def _sample(self, num_samples): return diagonal_gaussian_sample(self.mean.expand(num_samples, *self.shape), self.stddev.expand(num_samples, *self.shape)) def _fixed_sample(self, noise): return (noise if (noise is not None) else self._fixed_samples)
class MarginalDensity(Density): def __init__(self, prior: Density, likelihood: ConditionalDensity, approx_posterior: ConditionalDensity): super().__init__() self.prior = prior self.likelihood = likelihood self.approx_posterior = approx_posterior def p_parameters(self): return [*self.prior.parameters(), *self.likelihood.parameters()] def q_parameters(self): return [*self.approx_posterior.parameters(), *self.prior.q_parameters()] def _elbo(self, x, detach_q_params, detach_q_samples): approx_posterior = self.approx_posterior.sample(cond_inputs=x, detach_params=detach_q_params, detach_samples=detach_q_samples) likelihood = self.likelihood.log_prob(inputs=x, cond_inputs=approx_posterior['sample']) prior = self.prior('elbo', approx_posterior['sample'], detach_q_params=detach_q_params, detach_q_samples=detach_q_samples) log_p = (likelihood['log-prob'] + prior['log-p']) log_q = (approx_posterior['log-prob'] + prior['log-q']) return {'log-p': log_p, 'log-q': log_q} def _sample(self, num_samples): z = self.prior.sample(num_samples) return self.likelihood.sample(cond_inputs=z)['sample'] def _fixed_sample(self, noise): z = self.prior.fixed_sample(noise=noise) return self.likelihood.sample(cond_inputs=z)['sample']
class SplitDensity(Density): def __init__(self, density_1, density_2, dim): super().__init__() self.density_1 = density_1 self.density_2 = density_2 self.dim = dim def _elbo(self, x, detach_q_params, detach_q_samples): (x1, x2) = torch.chunk(x, chunks=2, dim=self.dim) result_1 = self.density_1('elbo', x1, detach_q_params=detach_q_params, detach_q_samples=detach_q_samples) result_2 = self.density_2('elbo', x2, detach_q_params=detach_q_params, detach_q_samples=detach_q_samples) return {'log-p': (result_1['log-p'] + result_2['log-p']), 'log-q': (result_1['log-q'] + result_2['log-q'])} def _fixed_sample(self, noise): if (noise is not None): raise NotImplementedError('Proper splitting of noise is not yet implemented') x1 = self.density_1.fixed_sample(noise=noise) x2 = self.density_2.fixed_sample(noise=noise) return torch.cat((x1, x2), dim=self.dim) def _sample(self, num_samples): x1 = self.density_1.sample(num_samples) x2 = self.density_2.sample(num_samples) return torch.cat((x1, x2), dim=self.dim)
class WrapperDensity(Density): def __init__(self, density): super().__init__() self.density = density def p_parameters(self): return self.density.p_parameters() def q_parameters(self): return self.density.q_parameters() def elbo(self, x, num_importance_samples, detach_q_params, detach_q_samples): return self.density.elbo(x, num_importance_samples=num_importance_samples, detach_q_params=detach_q_params, detach_q_samples=detach_q_samples) def _elbo(self, x, detach_q_params, detach_q_samples): assert False, 'Wrapper Densities should not be preceded by standard Density layers' def _sample(self, num_samples): return self.density.sample(num_samples) def _fixed_sample(self, noise): return self.density.fixed_sample(noise=noise)
class DequantizationDensity(WrapperDensity): def elbo(self, x, num_importance_samples, detach_q_params, detach_q_samples): return super().elbo(x.add_(torch.rand_like(x)), num_importance_samples=num_importance_samples, detach_q_params=detach_q_params, detach_q_samples=detach_q_samples)
class BinarizationDensity(WrapperDensity): def __init__(self, density, scale): super().__init__(density) self.scale = scale def elbo(self, x, num_importance_samples, detach_q_params, detach_q_samples): bernoulli = dist.bernoulli.Bernoulli(probs=(x / self.scale)) return super().elbo(bernoulli.sample(), num_importance_samples=num_importance_samples, detach_q_params=detach_q_params, detach_q_samples=detach_q_samples)
class PassthroughBeforeEvalDensity(WrapperDensity): def __init__(self, density, x): super().__init__(density) self.register_buffer('x', x) def train(self, train_mode=True): if (not train_mode): self.training = True with torch.no_grad(): self.elbo(self.x, num_importance_samples=1, detach_q_params=False, detach_q_samples=False) super().train(train_mode)
class ConstantNetwork(nn.Module): def __init__(self, value, fixed): super().__init__() if fixed: self.register_buffer('value', value) else: self.value = nn.Parameter(value) def forward(self, inputs): return self.value.expand(inputs.shape[0], *self.value.shape)
class ResidualBlock(nn.Module): def __init__(self, num_channels): super().__init__() self.bn1 = nn.BatchNorm2d(num_channels) self.conv1 = self._get_conv3x3(num_channels) self.bn2 = nn.BatchNorm2d(num_channels) self.conv2 = self._get_conv3x3(num_channels) def forward(self, inputs): out = self.bn1(inputs) out = torch.relu(out) out = self.conv1(out) out = self.bn2(out) out = torch.relu(out) out = self.conv2(out) out = (out + inputs) return out def _get_conv3x3(self, num_channels): return nn.Conv2d(in_channels=num_channels, out_channels=num_channels, kernel_size=3, stride=1, padding=1, bias=False)
class ScaledTanh2dModule(nn.Module): def __init__(self, module, num_channels): super().__init__() self.module = module self.weights = nn.Parameter(torch.ones(num_channels, 1, 1)) self.bias = nn.Parameter(torch.zeros(num_channels, 1, 1)) def forward(self, inputs): out = self.module(inputs) out = ((self.weights * torch.tanh(out)) + self.bias) return out
def get_resnet(num_input_channels, hidden_channels, num_output_channels): num_hidden_channels = (hidden_channels[0] if hidden_channels else num_output_channels) layers = [nn.Conv2d(in_channels=num_input_channels, out_channels=num_hidden_channels, kernel_size=3, stride=1, padding=1, bias=False)] for num_hidden_channels in hidden_channels: layers.append(ResidualBlock(num_hidden_channels)) layers += [nn.BatchNorm2d(num_hidden_channels), nn.ReLU(), nn.Conv2d(in_channels=num_hidden_channels, out_channels=num_output_channels, kernel_size=1, padding=0, bias=True)] return ScaledTanh2dModule(module=nn.Sequential(*layers), num_channels=num_output_channels)
def get_glow_cnn(num_input_channels, num_hidden_channels, num_output_channels, zero_init_output): conv1 = nn.Conv2d(in_channels=num_input_channels, out_channels=num_hidden_channels, kernel_size=3, padding=1, bias=False) bn1 = nn.BatchNorm2d(num_hidden_channels) conv2 = nn.Conv2d(in_channels=num_hidden_channels, out_channels=num_hidden_channels, kernel_size=1, padding=0, bias=False) bn2 = nn.BatchNorm2d(num_hidden_channels) conv3 = nn.Conv2d(in_channels=num_hidden_channels, out_channels=num_output_channels, kernel_size=3, padding=1) if zero_init_output: conv3.weight.data.zero_() conv3.bias.data.zero_() relu = nn.ReLU() return nn.Sequential(conv1, bn1, relu, conv2, bn2, relu, conv3)
def get_mlp(num_input_channels, hidden_channels, num_output_channels, activation, log_softmax_outputs=False): layers = [] prev_num_hidden_channels = num_input_channels for num_hidden_channels in hidden_channels: layers.append(nn.Linear(prev_num_hidden_channels, num_hidden_channels)) layers.append(activation()) prev_num_hidden_channels = num_hidden_channels layers.append(nn.Linear(prev_num_hidden_channels, num_output_channels)) if log_softmax_outputs: layers.append(nn.LogSoftmax(dim=1)) return nn.Sequential(*layers)
class MaskedLinear(nn.Module): def __init__(self, input_degrees, output_degrees): super().__init__() assert (len(input_degrees.shape) == len(output_degrees.shape) == 1) num_input_channels = input_degrees.shape[0] num_output_channels = output_degrees.shape[0] self.linear = nn.Linear(num_input_channels, num_output_channels) mask = (output_degrees.view((- 1), 1) >= input_degrees) self.register_buffer('mask', mask.to(self.linear.weight.dtype)) def forward(self, inputs): return F.linear(inputs, (self.mask * self.linear.weight), self.linear.bias)
class AutoregressiveMLP(nn.Module): def __init__(self, num_input_channels, hidden_channels, num_output_heads, activation): super().__init__() self.flat_ar_mlp = self._get_flat_ar_mlp(num_input_channels, hidden_channels, num_output_heads, activation) self.num_input_channels = num_input_channels self.num_output_heads = num_output_heads def _get_flat_ar_mlp(self, num_input_channels, hidden_channels, num_output_heads, activation): assert (num_input_channels >= 2) assert all([(num_input_channels <= d) for d in hidden_channels]), 'Random initialisation not yet implemented' prev_degrees = torch.arange(1, (num_input_channels + 1), dtype=torch.int64) layers = [] for hidden_channels in hidden_channels: degrees = ((torch.arange(hidden_channels, dtype=torch.int64) % (num_input_channels - 1)) + 1) layers.append(MaskedLinear(prev_degrees, degrees)) layers.append(activation()) prev_degrees = degrees degrees = torch.arange(num_input_channels, dtype=torch.int64).repeat(num_output_heads) layers.append(MaskedLinear(prev_degrees, degrees)) return nn.Sequential(*layers) def forward(self, inputs): assert (inputs.shape[1:] == (self.num_input_channels,)) result = self.flat_ar_mlp(inputs) result = result.view(inputs.shape[0], self.num_output_heads, self.num_input_channels) return result
class LipschitzNetwork(nn.Module): _MODULES_TO_UPDATE = (InducedNormConv2d, InducedNormLinear) def __init__(self, layers, max_train_lipschitz_iters, max_eval_lipschitz_iters, lipschitz_tolerance): super().__init__() self.layers = layers self.net = nn.Sequential(*layers) self.max_train_lipschitz_iters = max_train_lipschitz_iters self.max_eval_lipschitz_iters = max_eval_lipschitz_iters self.lipschitz_tolerance = lipschitz_tolerance self.register_forward_pre_hook(self._update_lipschitz_constant) self.register_full_backward_hook(self._queue_lipschitz_update) self._requires_train_lipschitz_update = True self._requires_eval_lipschitz_update = True def forward(self, inputs): return self.net(inputs) def _queue_lipschitz_update(self, *args, **kwargs): self._requires_train_lipschitz_update = True self._requires_eval_lipschitz_update = True def _update_lipschitz_constant(self, *args, **kwargs): if self.training: if self._requires_train_lipschitz_update: self._update_lipschitz(max_iterations=self.max_train_lipschitz_iters) self._requires_train_lipschitz_update = False elif self._requires_eval_lipschitz_update: self._update_lipschitz(max_iterations=self.max_eval_lipschitz_iters) self._requires_eval_lipschitz_update = False self._requires_train_lipschitz_update = False def _update_lipschitz(self, max_iterations): for m in self._modules_to_update(): m.compute_weight(update=True, n_iterations=max_iterations, atol=self.lipschitz_tolerance, rtol=self.lipschitz_tolerance) def _modules_to_update(self): for m in self.layers: if isinstance(m, self._MODULES_TO_UPDATE): (yield m)
def get_lipschitz_mlp(num_input_channels, hidden_channels, num_output_channels, lipschitz_constant, max_train_lipschitz_iters, max_eval_lipschitz_iters, lipschitz_tolerance): layers = [] prev_num_channels = num_input_channels for (i, num_channels) in enumerate((hidden_channels + [num_output_channels])): layers += [Swish(), _get_lipschitz_linear_layer(num_input_channels=prev_num_channels, num_output_channels=num_channels, lipschitz_constant=lipschitz_constant, max_lipschitz_iters=max_eval_lipschitz_iters, lipschitz_tolerance=lipschitz_tolerance, zero_init=(i == len(hidden_channels)))] prev_num_channels = num_channels return LipschitzNetwork(layers=layers, max_train_lipschitz_iters=max_train_lipschitz_iters, max_eval_lipschitz_iters=max_eval_lipschitz_iters, lipschitz_tolerance=lipschitz_tolerance)
def _get_lipschitz_linear_layer(num_input_channels, num_output_channels, lipschitz_constant, max_lipschitz_iters, lipschitz_tolerance, zero_init): return InducedNormLinear(in_features=num_input_channels, out_features=num_output_channels, coeff=lipschitz_constant, domain=2, codomain=2, n_iterations=max_lipschitz_iters, atol=lipschitz_tolerance, rtol=lipschitz_tolerance, zero_init=zero_init)
def get_lipschitz_cnn(input_shape, num_hidden_channels, num_output_channels, lipschitz_constant, max_train_lipschitz_iters, max_eval_lipschitz_iters, lipschitz_tolerance): assert (len(input_shape) == 3) num_input_channels = input_shape[0] conv1 = _get_lipschitz_conv_layer(num_input_channels=num_input_channels, num_output_channels=num_hidden_channels, kernel_size=3, padding=1, lipschitz_constant=lipschitz_constant, max_lipschitz_iters=max_eval_lipschitz_iters, lipschitz_tolerance=lipschitz_tolerance) conv2 = _get_lipschitz_conv_layer(num_input_channels=num_hidden_channels, num_output_channels=num_hidden_channels, kernel_size=1, padding=0, lipschitz_constant=lipschitz_constant, max_lipschitz_iters=max_eval_lipschitz_iters, lipschitz_tolerance=lipschitz_tolerance) conv3 = _get_lipschitz_conv_layer(num_input_channels=num_hidden_channels, num_output_channels=num_output_channels, kernel_size=3, padding=1, lipschitz_constant=lipschitz_constant, max_lipschitz_iters=max_eval_lipschitz_iters, lipschitz_tolerance=lipschitz_tolerance) layers = [Swish(), conv1, Swish(), conv2, Swish(), conv3] dummy_inputs = torch.empty(1, *input_shape) nn.Sequential(*layers)(dummy_inputs) return LipschitzNetwork(layers=layers, max_train_lipschitz_iters=max_train_lipschitz_iters, max_eval_lipschitz_iters=max_eval_lipschitz_iters, lipschitz_tolerance=lipschitz_tolerance)
def _get_lipschitz_conv_layer(num_input_channels, num_output_channels, kernel_size, padding, lipschitz_constant, max_lipschitz_iters, lipschitz_tolerance): assert ((max_lipschitz_iters is not None) or (lipschitz_tolerance is not None)) return InducedNormConv2d(in_channels=num_input_channels, out_channels=num_output_channels, kernel_size=kernel_size, stride=1, padding=padding, bias=True, coeff=lipschitz_constant, domain=2, codomain=2, n_iterations=max_lipschitz_iters, atol=lipschitz_tolerance, rtol=lipschitz_tolerance)
def get_density(schema, x_train): x_shape = x_train.shape[1:] if (schema[0]['type'] == 'passthrough-before-eval'): num_points = schema[0]['num_passthrough_data_points'] x_idxs = torch.randperm(x_train.shape[0])[:num_points] return PassthroughBeforeEvalDensity(density=get_density_recursive(schema[1:], x_shape), x=x_train[x_idxs]) else: return get_density_recursive(schema, x_shape)
def get_density_recursive(schema, x_shape): if (not schema): return get_standard_gaussian_density(x_shape=x_shape) layer_config = schema[0] schema_tail = schema[1:] if (layer_config['type'] == 'dequantization'): return DequantizationDensity(density=get_density_recursive(schema=schema_tail, x_shape=x_shape)) elif (layer_config['type'] == 'binarize'): return BinarizationDensity(density=get_density_recursive(schema=schema_tail, x_shape=x_shape), scale=layer_config['scale']) elif (layer_config['type'] == 'split'): split_x_shape = ((x_shape[0] // 2), *x_shape[1:]) return SplitDensity(density_1=get_density_recursive(schema=schema_tail, x_shape=split_x_shape), density_2=get_standard_gaussian_density(x_shape=split_x_shape), dim=1) elif (layer_config['type'] == 'passthrough-before-eval'): assert False, '`passthrough-before-eval` must occur as the first item in a schema' elif (layer_config['type'] in ['bernoulli-likelihood', 'gaussian-likelihood']): return get_marginal_density(layer_config=layer_config, schema_tail=schema_tail, x_shape=x_shape) else: return get_bijection_density(layer_config=layer_config, schema_tail=schema_tail, x_shape=x_shape)
def get_marginal_density(layer_config, schema_tail, x_shape): (likelihood, z_shape) = get_likelihood(layer_config, schema_tail, x_shape) prior = get_density_recursive(schema_tail, z_shape) approx_posterior = DiagonalGaussianConditionalDensity(coupler=get_coupler(input_shape=x_shape, num_channels_per_output=layer_config['num_z_channels'], config=layer_config['q_coupler'])) return MarginalDensity(prior=prior, likelihood=likelihood, approx_posterior=approx_posterior)
def get_likelihood(layer_config, schema_tail, x_shape): z_shape = (layer_config['num_z_channels'], *x_shape[1:]) if (layer_config['type'] == 'gaussian-likelihood'): likelihood = DiagonalGaussianConditionalDensity(coupler=get_coupler(input_shape=z_shape, num_channels_per_output=x_shape[0], config=layer_config['p_coupler'])) elif (layer_config['type'] == 'bernoulli-likelihood'): likelihood = BernoulliConditionalDensity(logit_net=get_net(input_shape=z_shape, num_output_channels=x_shape[0], net_config=layer_config['logit_net'])) else: assert False, f"Invalid layer type `{layer_config['type']}'" return (likelihood, z_shape)
def get_bijection_density(layer_config, schema_tail, x_shape): bijection = get_bijection(layer_config=layer_config, x_shape=x_shape) prior = get_density_recursive(schema=schema_tail, x_shape=bijection.z_shape) if (layer_config.get('num_u_channels', 0) == 0): return FlowDensity(bijection=bijection, prior=prior) else: return CIFDensity(bijection=bijection, prior=prior, p_u_density=get_conditional_density(num_u_channels=layer_config['num_u_channels'], coupler_config=layer_config['p_coupler'], x_shape=x_shape), q_u_density=get_conditional_density(num_u_channels=layer_config['num_u_channels'], coupler_config=layer_config['q_coupler'], x_shape=x_shape))
def get_uniform_density(x_shape): return FlowDensity(bijection=LogitBijection(x_shape=x_shape).inverse(), prior=UniformDensity(x_shape))
def get_standard_gaussian_density(x_shape): return DiagonalGaussianDensity(mean=torch.zeros(x_shape), stddev=torch.ones(x_shape), num_fixed_samples=64)
def get_bijection(layer_config, x_shape): if (layer_config['type'] == 'acl'): return get_acl_bijection(config=layer_config, x_shape=x_shape) elif (layer_config['type'] == 'squeeze'): return Squeeze2dBijection(x_shape=x_shape, factor=layer_config['factor']) elif (layer_config['type'] == 'logit'): return LogitBijection(x_shape=x_shape) elif (layer_config['type'] == 'sigmoid'): return LogitBijection(x_shape=x_shape).inverse() elif (layer_config['type'] == 'tanh'): return TanhBijection(x_shape=x_shape) elif (layer_config['type'] == 'scalar-mult'): return ScalarMultiplicationBijection(x_shape=x_shape, value=layer_config['value']) elif (layer_config['type'] == 'scalar-add'): return ScalarAdditionBijection(x_shape=x_shape, value=layer_config['value']) elif (layer_config['type'] == 'flatten'): return ViewBijection(x_shape=x_shape, z_shape=(int(np.prod(x_shape)),)) elif (layer_config['type'] == 'made'): assert (len(x_shape) == 1) return MADEBijection(num_input_channels=x_shape[0], hidden_channels=layer_config['hidden_channels'], activation=get_activation(layer_config['activation'])) elif (layer_config['type'] == 'batch-norm'): return BatchNormBijection(x_shape=x_shape, per_channel=layer_config['per_channel'], apply_affine=layer_config['apply_affine'], momentum=layer_config['momentum']) elif (layer_config['type'] == 'act-norm'): return ActNormBijection(x_shape=x_shape) elif (layer_config['type'] == 'affine'): return AffineBijection(x_shape=x_shape, per_channel=layer_config['per_channel']) elif (layer_config['type'] == 'cond-affine'): return ConditionalAffineBijection(x_shape=x_shape, coupler=get_coupler(input_shape=(layer_config['num_u_channels'], *x_shape[1:]), num_channels_per_output=x_shape[0], config=layer_config['st_coupler'])) elif (layer_config['type'] == 'flip'): return FlipBijection(x_shape=x_shape, dim=1) elif (layer_config['type'] == 'invconv'): if layer_config['lu']: return LUInvertible1x1ConvBijection(x_shape=x_shape) else: return BruteForceInvertible1x1ConvBijection(x_shape=x_shape) elif (layer_config['type'] == 'linear'): assert (len(x_shape) == 1) return LULinearBijection(num_input_channels=x_shape[0]) elif (layer_config['type'] == 'rand-channel-perm'): return RandomChannelwisePermutationBijection(x_shape=x_shape) elif (layer_config['type'] == 'sos'): assert (len(x_shape) == 1) return SumOfSquaresPolynomialBijection(num_input_channels=x_shape[0], hidden_channels=layer_config['hidden_channels'], activation=get_activation(layer_config['activation']), num_polynomials=layer_config['num_polynomials'], polynomial_degree=layer_config['polynomial_degree']) elif (layer_config['type'] == 'nsf-ar'): assert (len(x_shape) == 1) return AutoregressiveRationalQuadraticSplineBijection(num_input_channels=x_shape[0], num_hidden_layers=layer_config['num_hidden_layers'], num_hidden_channels=layer_config['num_hidden_channels'], num_bins=layer_config['num_bins'], tail_bound=layer_config['tail_bound'], activation=get_activation(layer_config['activation']), dropout_probability=layer_config['dropout_probability']) elif (layer_config['type'] == 'nsf-c'): assert (len(x_shape) == 1) return CoupledRationalQuadraticSplineBijection(num_input_channels=x_shape[0], num_hidden_layers=layer_config['num_hidden_layers'], num_hidden_channels=layer_config['num_hidden_channels'], num_bins=layer_config['num_bins'], tail_bound=layer_config['tail_bound'], activation=get_activation(layer_config['activation']), dropout_probability=layer_config['dropout_probability'], reverse_mask=layer_config['reverse_mask']) elif (layer_config['type'] == 'bnaf'): assert (len(x_shape) == 1) return BlockNeuralAutoregressiveBijection(num_input_channels=x_shape[0], num_hidden_layers=layer_config['num_hidden_layers'], hidden_channels_factor=layer_config['hidden_channels_factor'], activation=layer_config['activation'], residual=layer_config['residual']) elif (layer_config['type'] == 'ode'): assert (len(x_shape) == 1) return FFJORDBijection(x_shape=x_shape, velocity_hidden_channels=layer_config['hidden_channels'], relative_tolerance=layer_config['numerical_tolerance'], absolute_tolerance=layer_config['numerical_tolerance'], num_u_channels=layer_config['num_u_channels']) elif (layer_config['type'] == 'planar'): assert (len(x_shape) == 1) return PlanarBijection(num_input_channels=x_shape[0]) elif (layer_config['type'] == 'cond-planar'): assert (len(x_shape) == 1) return ConditionalPlanarBijection(num_input_channels=x_shape[0], num_u_channels=layer_config['num_u_channels'], cond_hidden_channels=layer_config['cond_hidden_channels'], cond_activation=get_activation(layer_config['cond_activation'])) elif (layer_config['type'] == 'resblock'): return ResidualFlowBijection(x_shape=x_shape, lipschitz_net=get_lipschitz_net(input_shape=x_shape, num_output_channels=x_shape[0], config=layer_config['net']), reduce_memory=layer_config['reduce_memory']) else: assert False, f"Invalid layer type `{layer_config['type']}'"
def get_acl_bijection(config, x_shape): num_x_channels = x_shape[0] num_u_channels = config['num_u_channels'] if (config['mask_type'] == 'checkerboard'): return Checkerboard2dAffineCouplingBijection(x_shape=x_shape, coupler=get_coupler(input_shape=((num_x_channels + num_u_channels), *x_shape[1:]), num_channels_per_output=num_x_channels, config=config['coupler']), reverse_mask=config['reverse_mask']) else: def coupler_factory(num_passthrough_channels): return get_coupler(input_shape=((num_passthrough_channels + num_u_channels), *x_shape[1:]), num_channels_per_output=(num_x_channels - num_passthrough_channels), config=config['coupler']) if (config['mask_type'] == 'alternating-channel'): return AlternatingChannelwiseAffineCouplingBijection(x_shape=x_shape, coupler_factory=coupler_factory, reverse_mask=config['reverse_mask']) elif (config['mask_type'] == 'split-channel'): return SplitChannelwiseAffineCouplingBijection(x_shape=x_shape, coupler_factory=coupler_factory, reverse_mask=config['reverse_mask']) else: assert False, f"Invalid mask type {config['mask_type']}"
def get_conditional_density(num_u_channels, coupler_config, x_shape): return DiagonalGaussianConditionalDensity(coupler=get_coupler(input_shape=x_shape, num_channels_per_output=num_u_channels, config=coupler_config))
def get_coupler(input_shape, num_channels_per_output, config): if config['independent_nets']: return get_coupler_with_independent_nets(input_shape=input_shape, num_channels_per_output=num_channels_per_output, shift_net_config=config['shift_net'], log_scale_net_config=config['log_scale_net']) else: return get_coupler_with_shared_net(input_shape=input_shape, num_channels_per_output=num_channels_per_output, net_config=config['shift_log_scale_net'])
def get_coupler_with_shared_net(input_shape, num_channels_per_output, net_config): return ChunkedSharedCoupler(shift_log_scale_net=get_net(input_shape=input_shape, num_output_channels=(2 * num_channels_per_output), net_config=net_config))
def get_coupler_with_independent_nets(input_shape, num_channels_per_output, shift_net_config, log_scale_net_config): return IndependentCoupler(shift_net=get_net(input_shape=input_shape, num_output_channels=num_channels_per_output, net_config=shift_net_config), log_scale_net=get_net(input_shape=input_shape, num_output_channels=num_channels_per_output, net_config=log_scale_net_config))