code
stringlengths
17
6.64M
def get_net(input_shape, num_output_channels, net_config): num_input_channels = input_shape[0] if (net_config['type'] == 'mlp'): assert (len(input_shape) == 1) return get_mlp(num_input_channels=num_input_channels, hidden_channels=net_config['hidden_channels'], num_output_channels=num_output_channels, activation=get_activation(net_config['activation'])) elif (net_config['type'] == 'resnet'): assert (len(input_shape) == 3) return get_resnet(num_input_channels=num_input_channels, hidden_channels=net_config['hidden_channels'], num_output_channels=num_output_channels) elif (net_config['type'] == 'glow-cnn'): assert (len(input_shape) == 3) return get_glow_cnn(num_input_channels=num_input_channels, num_hidden_channels=net_config['num_hidden_channels'], num_output_channels=num_output_channels, zero_init_output=net_config['zero_init_output']) elif (net_config['type'] == 'constant'): value = torch.full((num_output_channels, *input_shape[1:]), net_config['value'], dtype=torch.get_default_dtype()) return ConstantNetwork(value=value, fixed=net_config['fixed']) elif (net_config['type'] == 'identity'): assert (num_output_channels == num_input_channels) return (lambda x: x) else: assert False, f"Invalid net type {net_config['type']}"
def get_activation(name): if (name == 'tanh'): return nn.Tanh elif (name == 'relu'): return nn.ReLU else: assert False, f'Invalid activation {name}'
def get_lipschitz_net(input_shape, num_output_channels, config): if (config['type'] == 'cnn'): return get_lipschitz_cnn(input_shape=input_shape, num_hidden_channels=config['num_hidden_channels'], num_output_channels=num_output_channels, lipschitz_constant=config['lipschitz_constant'], max_train_lipschitz_iters=config['max_train_lipschitz_iters'], max_eval_lipschitz_iters=config['max_test_lipschitz_iters'], lipschitz_tolerance=config['lipschitz_tolerance']) elif (config['type'] == 'mlp'): assert (len(input_shape) == 1) return get_lipschitz_mlp(num_input_channels=input_shape[0], hidden_channels=config['hidden_channels'], num_output_channels=num_output_channels, lipschitz_constant=config['lipschitz_constant'], max_train_lipschitz_iters=config['max_train_lipschitz_iters'], max_eval_lipschitz_iters=config['max_test_lipschitz_iters'], lipschitz_tolerance=config['lipschitz_tolerance']) else: assert False, f"Invalid Lipschitz net type {config['net']}"
class AverageMetric(Metric): _required_output_keys = ['metrics'] def reset(self): self._sums = Counter() self._num_examples = Counter() def update(self, output): (metrics,) = output for (k, v) in metrics.items(): self._sums[k] += torch.sum(v) self._num_examples[k] += torch.numel(v) def compute(self): return {k: (v / self._num_examples[k]) for (k, v) in self._sums.items()} def completed(self, engine): engine.state.metrics = {**engine.state.metrics, **self.compute()} def attach(self, engine): engine.add_event_handler(Events.EPOCH_STARTED, self.started) engine.add_event_handler(Events.ITERATION_COMPLETED, self.iteration_completed) engine.add_event_handler(Events.ITERATION_COMPLETED, self.completed)
class Trainer(): _STEPS_PER_LOSS_WRITE = 10 _STEPS_PER_GRAD_WRITE = 10 _STEPS_PER_LR_WRITE = 10 def __init__(self, module, device, train_metrics, train_loader, opts, lr_schedulers, max_epochs, max_grad_norm, test_metrics, test_loader, epochs_per_test, early_stopping, valid_loss, valid_loader, max_bad_valid_epochs, visualizer, writer, should_checkpoint_latest, should_checkpoint_best_valid, checkpoint_to_load): self._module = module self._device = device self._train_metrics = train_metrics self._train_loader = train_loader self._opts = opts self._lr_schedulers = lr_schedulers self._max_epochs = max_epochs self._max_grad_norm = max_grad_norm self._test_metrics = test_metrics self._test_loader = test_loader self._epochs_per_test = epochs_per_test self._valid_loss = valid_loss self._valid_loader = valid_loader self._max_bad_valid_epochs = max_bad_valid_epochs self._best_valid_loss = float('inf') self._num_bad_valid_epochs = 0 self._visualizer = visualizer self._writer = writer self._should_checkpoint_best_valid = should_checkpoint_best_valid self._trainer = Engine(self._train_batch) AverageMetric().attach(self._trainer) ProgressBar(persist=True).attach(self._trainer, list(self._opts.keys())) self._trainer.add_event_handler(Events.ITERATION_COMPLETED, TerminateOnNan()) self._trainer.add_event_handler(Events.ITERATION_COMPLETED, self._log_training_info) if early_stopping: self._validator = Engine(self._validate_batch) AverageMetric().attach(self._validator) ProgressBar(persist=False, desc='Validating').attach(self._validator) self._trainer.add_event_handler(Events.EPOCH_COMPLETED, self._validate) self._tester = Engine(self._test_batch) AverageMetric().attach(self._tester) ProgressBar(persist=False, desc='Testing').attach(self._tester) self._trainer.add_event_handler(Events.EPOCH_COMPLETED, self._test_and_log) if should_checkpoint_latest: self._trainer.add_event_handler(Events.EPOCH_COMPLETED, (lambda _: self._save_checkpoint('latest'))) try: self._load_checkpoint(checkpoint_to_load) except FileNotFoundError: print(f"Did not find `{checkpoint_to_load}' checkpoint.", file=sys.stderr) def train(self): self._trainer.run(data=self._train_loader, max_epochs=self._max_epochs) def test(self): self._module.eval() return self._tester.run(data=self._test_loader).metrics def _train_batch(self, engine, batch): self._module.train() (x, _) = batch x = x.to(self._device) for (param_name, opt) in self._opts.items(): self._set_requires_grad(param_name, True) opt.zero_grad() all_values = self._train_metrics(self._module, x) for (param_name, loss) in all_values['losses'].items(): self._isolate_params(param_name) loss.backward() self._clip_grad_norms(param_name) for (param_name, opt) in self._opts.items(): opt.step() self._lr_schedulers[param_name].step() return {'metrics': all_values['losses']} def _isolate_params(self, param_name): for other_param_name in self._opts: self._set_requires_grad(other_param_name, False) self._set_requires_grad(param_name, True) def _set_requires_grad(self, param_name, requires_grad): for param in self._iter_params(param_name): param.requires_grad = requires_grad def _clip_grad_norms(self, param_name): if (self._max_grad_norm is not None): for param in self._iter_params(param_name): torch.nn.utils.clip_grad_norm_(param, self._max_grad_norm) def _iter_params(self, param_name): for group in self._opts[param_name].param_groups: for param in group['params']: (yield param) @torch.no_grad() def _test_and_log(self, engine): epoch = engine.state.epoch if (((epoch - 1) % self._epochs_per_test) == 0): for (k, v) in self.test().items(): self._writer.write_scalar(f'test/{k}', v, global_step=engine.state.epoch) if (not torch.isfinite(v)): self._save_checkpoint(tag='nan_during_test') self._visualizer.visualize(self._module, epoch) def _test_batch(self, engine, batch): (x, _) = batch x = x.to(self._device) return {'metrics': self._test_metrics(self._module, x)} @torch.no_grad() def _validate(self, engine): self._module.eval() state = self._validator.run(data=self._valid_loader) valid_loss = state.metrics['loss'] if (valid_loss < self._best_valid_loss): print(f'Best validation loss {valid_loss} after epoch {engine.state.epoch}') self._num_bad_valid_epochs = 0 self._best_valid_loss = valid_loss if self._should_checkpoint_best_valid: self._save_checkpoint(tag='best_valid') else: if (not torch.isfinite(valid_loss)): self._save_checkpoint(tag='nan_during_validation') self._num_bad_valid_epochs += 1 if (self._num_bad_valid_epochs > self._max_bad_valid_epochs): print(f'No validation improvement after {self._num_bad_valid_epochs} epochs. Terminating.') self._trainer.terminate() def _validate_batch(self, engine, batch): (x, _) = batch x = x.to(self._device) return {'metrics': {'loss': self._valid_loss(self._module, x)}} def _log_training_info(self, engine): i = engine.state.iteration if ((i % self._STEPS_PER_LOSS_WRITE) == 0): for (k, v) in engine.state.output['metrics'].items(): self._writer.write_scalar(f'train/{k}', v, global_step=i) if ((i % self._STEPS_PER_GRAD_WRITE) == 0): for param_name in self._opts: self._writer.write_scalar(f'train/grad-norm-{param_name}', self._get_grad_norm(param_name), global_step=i) if ((i % self._STEPS_PER_LR_WRITE) == 0): for param_name in self._opts: self._writer.write_scalar(f'train/lr-{param_name}', self._get_lr(param_name), global_step=i) def _get_grad_norm(self, param_name): norm = 0 for param in self._iter_params(param_name): if (param.grad is not None): norm += (param.grad.norm().item() ** 2) return np.sqrt(norm) def _get_lr(self, param_name): (param_group,) = self._opts[param_name].param_groups return param_group['lr'] def _save_checkpoint(self, tag): checkpoint = {'epoch': self._trainer.state.epoch, 'iteration': self._trainer.state.iteration, 'module_state_dict': self._module.state_dict(), 'opt_state_dicts': {param_name: opt.state_dict() for (param_name, opt) in self._opts.items()}, 'lr_scheduler_state_dicts': self._get_lr_scheduler_state_dicts(), 'best_valid_loss': self._best_valid_loss, 'num_bad_valid_epochs': self._num_bad_valid_epochs} self._writer.write_checkpoint(tag, checkpoint) def _get_lr_scheduler_state_dicts(self): with warnings.catch_warnings(): warnings.filterwarnings('ignore', message='Please also save or load the state of the optimizer when saving or loading the scheduler.') return {param_name: lr_scheduler.state_dict() for (param_name, lr_scheduler) in self._lr_schedulers.items()} def _load_checkpoint(self, tag): checkpoint = self._writer.load_checkpoint(tag, device=self._device) @self._trainer.on(Events.STARTED) def resume_trainer_state(engine): engine.state.epoch = checkpoint['epoch'] engine.state.iteration = checkpoint['iteration'] self._module.load_state_dict(checkpoint['module_state_dict']) for (param_name, state_dict) in checkpoint['opt_state_dicts'].items(): self._opts[param_name].load_state_dict(state_dict) for (param_name, state_dict) in checkpoint['lr_scheduler_state_dicts'].items(): self._lr_schedulers[param_name].load_state_dict(state_dict) self._best_valid_loss = checkpoint['best_valid_loss'] self._num_bad_valid_epochs = checkpoint['num_bad_valid_epochs'] print(f"Loaded checkpoint `{tag}' after epoch {checkpoint['epoch']}", file=sys.stderr)
class Tee(): def __init__(self, primary_file, secondary_file): self.primary_file = primary_file self.secondary_file = secondary_file self.encoding = self.primary_file.encoding def isatty(self): return self.primary_file.isatty() def fileno(self): return self.primary_file.fileno() def write(self, data): if isinstance(data, bytes): data = data.decode() self.primary_file.write(data) self.secondary_file.write(data) def flush(self): self.primary_file.flush() self.secondary_file.flush()
class Writer(): _STDOUT = sys.stdout _STDERR = sys.stderr def __init__(self, logdir, make_subdir, tag_group): if make_subdir: os.makedirs(logdir, exist_ok=True) timestamp = f"{datetime.datetime.now().strftime('%b%d_%H-%M-%S')}" logdir = os.path.join(logdir, timestamp) self._writer = SummaryWriter(logdir=logdir) assert (logdir == self._writer.logdir) self._logdir = logdir self._tag_group = tag_group LINE_BUFFERING = 1 sys.stdout = Tee(primary_file=self._STDOUT, secondary_file=open(os.path.join(logdir, 'stdout'), 'a', buffering=LINE_BUFFERING)) sys.stderr = Tee(primary_file=self._STDERR, secondary_file=open(os.path.join(logdir, 'stderr'), 'a', buffering=LINE_BUFFERING)) def write_scalar(self, tag, scalar_value, global_step=None): self._writer.add_scalar(self._tag(tag), scalar_value, global_step=global_step) def write_image(self, tag, img_tensor, global_step=None): self._writer.add_image(self._tag(tag), img_tensor, global_step=global_step) def write_figure(self, tag, figure, global_step=None): self._writer.add_figure(self._tag(tag), figure, global_step=global_step) def write_hparams(self, hparam_dict=None, metric_dict=None): self._writer.add_hparams(hparam_dict=hparam_dict, metric_dict=metric_dict) def write_json(self, tag, data): text = json.dumps(data, indent=4) self._writer.add_text(self._tag(tag), ((4 * ' ') + text.replace('\n', ('\n' + (4 * ' '))))) json_path = os.path.join(self._logdir, f'{tag}.json') with open(json_path, 'w') as f: f.write(text) def write_textfile(self, tag, text): path = os.path.join(self._logdir, f'{tag}.txt') with open(path, 'w') as f: f.write(text) def write_checkpoint(self, tag, data): os.makedirs(self._checkpoints_dir, exist_ok=True) checkpoint_path = self._checkpoint_path(tag) tmp_checkpoint_path = os.path.join(os.path.dirname(checkpoint_path), f'{os.path.basename(checkpoint_path)}.tmp') torch.save(data, tmp_checkpoint_path) os.replace(tmp_checkpoint_path, checkpoint_path) def load_checkpoint(self, tag, device): return torch.load(self._checkpoint_path(tag), map_location=device) def _checkpoint_path(self, tag): return os.path.join(self._checkpoints_dir, f'{tag}.pt') @property def _checkpoints_dir(self): return os.path.join(self._logdir, 'checkpoints') def _tag(self, tag): return f'{self._tag_group}/{tag}'
class DummyWriter(Writer): def __init__(self, logdir): self._logdir = logdir def write_scalar(self, tag, scalar_value, global_step=None): pass def write_image(self, tag, img_tensor, global_step=None): pass def write_figure(self, tag, figure, global_step=None): pass def write_hparams(self, hparam_dict=None, metric_dict=None): pass def write_json(self, tag, data): pass def write_textfile(self, tag, text): pass def write_checkpoint(self, tag, data): pass def load_checkpoint(self, tag, device): if (self._logdir is None): raise FileNotFoundError else: return super().load_checkpoint(tag, device)
def get_config_group(dataset): for (group, group_data) in CONFIG_GROUPS.items(): if (dataset in group_data['datasets']): return group assert False, f"Dataset `{dataset}' not found"
def get_datasets(): result = [] for items in CONFIG_GROUPS.values(): result += items['datasets'] return result
def get_models(): result = [] for items in CONFIG_GROUPS.values(): result += list(items['model_configs']) return result
def get_base_config(dataset, use_baseline): return CONFIG_GROUPS[get_config_group(dataset)]['base_config'](dataset, use_baseline)
def get_model_config(dataset, model, use_baseline): group = CONFIG_GROUPS[get_config_group(dataset)] return group['model_configs'][model](dataset, model, use_baseline)
def get_config(dataset, model, use_baseline): config = {**get_base_config(dataset, use_baseline), **get_model_config(dataset, model, use_baseline)} if use_baseline: for prefix in ['s', 't', 'st']: config.pop(f'{prefix}_nets', None) for prefix in ['p', 'q']: for suffix in ['', '_mu', '_sigma']: config.pop(f'{prefix}{suffix}_nets', None) config = {**config, 'num_u_channels': 0, 'use_cond_affine': False, 'pure_cond_affine': False, 'num_valid_importance_samples': 1, 'num_test_importance_samples': 1} assert ('model' not in config), "Should not specify `model' in config" assert ('dataset' not in config), "Should not specify `dataset' in config" return {'model': model, 'dataset': dataset, **config}
def expand_grid_generator(config): if (not config): (yield {}) return items = list(config.items()) (first_key, first_val) = items[0] rest = dict(items[1:]) for config in expand_grid_generator(rest): if isinstance(first_val, GridParams): for val in first_val: (yield {first_key: val, **config}) elif isinstance(first_val, dict): for sub_config in expand_grid_generator(first_val): (yield {first_key: sub_config, **config}) else: (yield {first_key: first_val, **config})
def expand_grid(config): return list(expand_grid_generator(config))
def group(group, datasets): global CURRENT_CONFIG_GROUP assert (group not in CONFIG_GROUPS), f"Already exists group `{group}'" for dataset in datasets: for group_data in CONFIG_GROUPS.values(): assert (dataset not in group_data['datasets']), f"Dataset `{dataset}' already registered in group `{group}'" CONFIG_GROUPS[group] = {'datasets': datasets, 'base_config': None, 'model_configs': {}} CURRENT_CONFIG_GROUP = group
def base(f): assert (CONFIG_GROUPS[CURRENT_CONFIG_GROUP]['base_config'] is None), 'Already exists a base config' CONFIG_GROUPS[CURRENT_CONFIG_GROUP]['base_config'] = f return f
def provides(*models): def store_and_return(f): assert (CURRENT_CONFIG_GROUP is not None), 'Must register a config group first' for m in models: assert (m not in CONFIG_GROUPS[CURRENT_CONFIG_GROUP]['model_configs']), f"Already exists model `{m}' in group `{CURRENT_CONFIG_GROUP}'" CONFIG_GROUPS[CURRENT_CONFIG_GROUP]['model_configs'][m] = f return f return store_and_return
class GridParams(): def __init__(self, *values): self.values = values def __iter__(self): return iter(self.values) def __repr__(self): return f"{self.__class__.__name__}({', '.join((str(v) for v in self.values))})"
@base def config(dataset, use_baseline): assert (not use_baseline), 'Cannot use baseline model for this config' return {'pure_cond_affine': False, 'dequantize': False, 'batch_norm': False, 'act_norm': False, 'max_epochs': 2000, 'max_grad_norm': None, 'early_stopping': True, 'max_bad_valid_epochs': 50, 'train_batch_size': 1000, 'valid_batch_size': 1000, 'test_batch_size': 10000, 'opt': 'adam', 'lr': 0.01, 'lr_schedule': 'none', 'weight_decay': 0.0, 'epochs_per_test': 5, 'train_objective': 'iwae', 'num_train_importance_samples': 10, 'num_valid_importance_samples': 10, 'num_test_importance_samples': 100}
@provides('vae') def vae(dataset, model, use_baseline): return {'schema_type': 'gaussian-vae', 'use_cond_affine': False, 'num_z_channels': 1, 'p_mu_nets': [], 'p_sigma_nets': 'learned-constant', 'q_nets': [10, 10]}
@base def config(dataset, use_baseline): return {'num_u_channels': 1, 'use_cond_affine': True, 'pure_cond_affine': False, 'dequantize': True, 'act_norm': False, 'batch_norm': True, 'batch_norm_apply_affine': use_baseline, 'batch_norm_use_running_averages': True, 'batch_norm_momentum': 0.1, 'lr_schedule': 'none', 'max_bad_valid_epochs': 50, 'max_grad_norm': None, 'max_epochs': 1000, 'epochs_per_test': 1, 'early_stopping': True, 'train_objective': 'iwae', 'num_train_importance_samples': 1, 'num_valid_importance_samples': 5, 'num_test_importance_samples': 10}
@provides('bernoulli-vae') def bernoulli_vae(dataset, model, use_baseline): assert (not use_baseline), 'Cannot use baseline model for this config' return {'schema_type': 'bernoulli-vae', 'dequantize': False, 'binarize_scale': 255, 'logit_net': ([200] * 2), 'q_nets': ([200] * 2), 'num_z_channels': 50, 'train_batch_size': 100, 'valid_batch_size': 500, 'test_batch_size': 500, 'opt': 'adam', 'lr': 0.0001, 'weight_decay': 0.0}
@provides('realnvp') def realnvp(dataset, model, use_baseline): config = {'schema_type': 'multiscale-realnvp', 'g_hidden_channels': (([64] * 8) if use_baseline else ([64] * 4)), 'st_nets': ([8] * 2), 'p_nets': ([64] * 2), 'q_nets': ([64] * 2), 'train_batch_size': 100, 'valid_batch_size': 500, 'test_batch_size': 500, 'opt': 'adam', 'lr': 0.0001, 'weight_decay': 0.0} if (dataset in ['cifar10', 'svhn']): config['logit_tf_lambda'] = 0.05 config['logit_tf_scale'] = 256 elif (dataset in ['mnist', 'fashion-mnist']): config['logit_tf_lambda'] = 1e-06 config['logit_tf_scale'] = 256 return config
@provides('glow') def glow(dataset, model, use_baseline): assert (dataset in ['cifar10', 'svhn']), 'Currently only implemented for images of size 3x32x32' warnings.warn('Glow may quickly diverge for certain random seeds - if this happens just retry. This behaviour appears to be consistent with that in https://github.com/openai/glow and https://github.com/y0ast/Glow-PyTorch') if use_baseline: config = {'num_scales': 3, 'num_steps_per_scale': 32, 'g_num_hidden_channels': 512, 'valid_batch_size': 500, 'test_batch_size': 500} else: config = {'num_scales': 2, 'num_steps_per_scale': 32, 'g_num_hidden_channels': 256, 'st_nets': 64, 'p_nets': 128, 'q_nets': 128, 'valid_batch_size': 100, 'test_batch_size': 100} config['schema_type'] = 'glow' config['early_stopping'] = False config['train_batch_size'] = 64 config['opt'] = 'adamax' config['lr'] = 0.0005 if (dataset in ['cifar10']): config['weight_decay'] = 0.1 else: config['weight_decay'] = 0.0 config['centering_tf_scale'] = 256 return config
@provides('resflow-small') def resflow(dataset, model, use_baseline): logit_tf_lambda = {'mnist': 1e-06, 'fashion-mnist': 1e-06, 'cifar10': 0.05, 'svhn': 0.05}[dataset] return {'schema_type': 'multiscale-resflow', 'train_batch_size': 64, 'valid_batch_size': 128, 'test_batch_size': 128, 'epochs_per_test': 5, 'opt': 'adam', 'lr': 0.001, 'weight_decay': 0.0, 'logit_tf_lambda': logit_tf_lambda, 'logit_tf_scale': 256, 'batch_norm': False, 'act_norm': True, 'reduce_memory': True, 'scales': ([4] * 3), 'num_hidden_channels': 128, 'lipschitz_constant': 0.98, 'max_train_lipschitz_iters': None, 'max_test_lipschitz_iters': None, 'lipschitz_tolerance': 0.001, 'num_output_fc_blocks': 4, 'output_fc_hidden_channels': ([64] * 2), 'st_nets': ([32] * 2), 'p_nets': ([32] * 2), 'q_nets': ([32] * 2)}
def get_schema(config): schema = get_base_schema(config=config) if config['pure_cond_affine']: assert config['use_cond_affine'] schema = remove_non_normalise_layers(schema=schema) if config['use_cond_affine']: assert (config['num_u_channels'] > 0) schema = add_cond_affine_before_each_normalise(schema=schema, config=config) schema = apply_pq_coupler_config_settings(schema=schema, config=config) schema = (get_preproc_schema(config=config) + schema) assert (not (config['batch_norm'] and config['act_norm'])) if config['batch_norm']: schema = replace_normalise_with_batch_norm(schema=schema, config=config) elif config['act_norm']: schema = replace_normalise_with_act_norm(schema=schema) else: schema = remove_normalise_layers(schema=schema) return schema
def get_preproc_schema(config): if config['dequantize']: schema = [{'type': 'dequantization'}] else: schema = [] if (config.get('binarize_scale') is not None): schema += get_binarize_schema(config['binarize_scale']) if ((config.get('logit_tf_lambda') is not None) and (config.get('logit_tf_scale') is not None)): assert (config.get('centering_tf_scale') is None) schema += get_logit_tf_schema(lam=config['logit_tf_lambda'], scale=config['logit_tf_scale']) elif (config.get('centering_tf_scale') is not None): assert (config.get('logit_tf_lambda') is None) assert (config.get('logit_tf_scale') is None) schema += get_centering_tf_schema(scale=config['centering_tf_scale']) return schema
def get_base_schema(config): ty = config['schema_type'] if (ty == 'multiscale-realnvp'): return get_multiscale_realnvp_schema(coupler_hidden_channels=config['g_hidden_channels']) elif (ty == 'flat-realnvp'): return get_flat_realnvp_schema(config=config) elif (ty == 'maf'): return get_maf_schema(num_density_layers=config['num_density_layers'], hidden_channels=config['ar_map_hidden_channels']) elif (ty == 'sos'): return get_sos_schema(num_density_layers=config['num_density_layers'], hidden_channels=config['g_hidden_channels'], num_polynomials_per_layer=config['num_polynomials_per_layer'], polynomial_degree=config['polynomial_degree']) elif (ty == 'nsf'): return get_nsf_schema(config=config) elif (ty == 'bnaf'): return get_bnaf_schema(num_density_layers=config['num_density_layers'], num_hidden_layers=config['num_hidden_layers'], activation=config['activation'], hidden_channels_factor=config['hidden_channels_factor']) elif (ty == 'glow'): return get_glow_schema(num_scales=config['num_scales'], num_steps_per_scale=config['num_steps_per_scale'], coupler_num_hidden_channels=config['g_num_hidden_channels'], lu_decomposition=True) elif (ty == 'ffjord'): return get_ffjord_schema(num_density_layers=config['num_density_layers'], velocity_hidden_channels=config['hidden_channels'], numerical_tolerance=config['numerical_tolerance'], num_u_channels=config['num_u_channels']) elif (ty == 'planar'): return get_planar_schema(config=config) elif (ty == 'cond-affine'): return get_cond_affine_schema(config=config) elif (ty == 'affine'): return get_affine_schema(config=config) elif (ty == 'flat-resflow'): return get_flat_resflow_schema(config=config) elif (ty == 'multiscale-resflow'): return get_multiscale_resflow_schema(config=config) elif (ty == 'bernoulli-vae'): return get_bernoulli_vae_schema(config=config) elif (ty == 'gaussian-vae'): return get_gaussian_vae_schema(config=config) else: assert False, f"Invalid schema type `{ty}'"
def remove_non_normalise_layers(schema): return [layer for layer in schema if (layer['type'] == 'normalise')]
def remove_normalise_layers(schema): return [layer for layer in schema if (layer['type'] != 'normalise')]
def replace_normalise_with_batch_norm(schema, config): if config['batch_norm_use_running_averages']: new_schema = [] momentum = config['batch_norm_momentum'] else: new_schema = [{'type': 'passthrough-before-eval', 'num_passthrough_data_points': 100000}] momentum = 1.0 apply_affine = config['batch_norm_apply_affine'] for layer in schema: if (layer['type'] == 'normalise'): new_schema.append({'type': 'batch-norm', 'per_channel': True, 'momentum': momentum, 'apply_affine': config['batch_norm_apply_affine']}) else: new_schema.append(layer) return new_schema
def replace_normalise_with_act_norm(schema): new_schema = [] for layer in schema: if (layer['type'] == 'normalise'): new_schema.append({'type': 'act-norm'}) else: new_schema.append(layer) return new_schema
def add_cond_affine_before_each_normalise(schema, config): new_schema = [] flattened = False for layer in schema: if (layer['type'] == 'flatten'): flattened = True elif (layer['type'] == 'normalise'): new_schema.append(get_cond_affine_layer(config, flattened)) new_schema.append(layer) return new_schema
def apply_pq_coupler_config_settings(schema, config): new_schema = [] flattened = False for layer in schema: if (layer['type'] == 'flatten'): flattened = True if (layer.get('num_u_channels', 0) > 0): layer = {**layer, 'p_coupler': get_p_coupler_config(config, flattened), 'q_coupler': get_q_coupler_config(config, flattened)} new_schema.append(layer) return new_schema
def get_binarize_schema(scale): return [{'type': 'binarize', 'scale': scale}]
def get_logit_tf_schema(lam, scale): return [{'type': 'scalar-mult', 'value': ((1 - (2 * lam)) / scale)}, {'type': 'scalar-add', 'value': lam}, {'type': 'logit'}]
def get_centering_tf_schema(scale): return [{'type': 'scalar-mult', 'value': (1 / scale)}, {'type': 'scalar-add', 'value': (- 0.5)}]
def get_cond_affine_layer(config, flattened): return {'type': 'cond-affine', 'num_u_channels': config['num_u_channels'], 'st_coupler': get_st_coupler_config(config, flattened)}
def get_st_coupler_config(config, flattened): return get_coupler_config('t', 's', 'st', config, flattened)
def get_p_coupler_config(config, flattened): return get_coupler_config('p_mu', 'p_sigma', 'p', config, flattened)
def get_q_coupler_config(config, flattened): return get_coupler_config('q_mu', 'q_sigma', 'q', config, flattened)
def get_coupler_config(shift_prefix, log_scale_prefix, shift_log_scale_prefix, config, flattened): shift_key = f'{shift_prefix}_nets' log_scale_key = f'{log_scale_prefix}_nets' shift_log_scale_key = f'{shift_log_scale_prefix}_nets' if ((shift_key in config) and (log_scale_key in config)): assert (shift_log_scale_key not in config), 'Over-specified coupler config' return {'independent_nets': True, 'shift_net': get_coupler_net_config(config[shift_key], flattened), 'log_scale_net': get_coupler_net_config(config[log_scale_key], flattened)} elif (shift_log_scale_key in config): assert ((shift_key not in config) and (log_scale_key not in config)), 'Over-specified coupler config' return {'independent_nets': False, 'shift_log_scale_net': get_coupler_net_config(config[shift_log_scale_key], flattened)} else: assert False, f"Must specify either `{shift_log_scale_key}', or both `{shift_key}' and `{log_scale_key}'"
def get_coupler_net_config(net_spec, flattened): if (net_spec in ['fixed-constant', 'learned-constant']): return {'type': 'constant', 'value': 0, 'fixed': (net_spec == 'fixed-constant')} elif (net_spec == 'identity'): return {'type': 'identity'} elif isinstance(net_spec, list): if flattened: return {'type': 'mlp', 'activation': 'tanh', 'hidden_channels': net_spec} else: return {'type': 'resnet', 'hidden_channels': net_spec} elif isinstance(net_spec, int): if flattened: return {'type': 'mlp', 'activation': 'tanh', 'hidden_channels': ([net_spec] * 2)} else: return {'type': 'glow-cnn', 'num_hidden_channels': net_spec, 'zero_init_output': True} else: assert False, f'Invalid net specifier {net_spec}'
def get_multiscale_realnvp_schema(coupler_hidden_channels): base_schema = [{'type': 'acl', 'mask_type': 'checkerboard', 'reverse_mask': False}, {'type': 'acl', 'mask_type': 'checkerboard', 'reverse_mask': True}, {'type': 'acl', 'mask_type': 'checkerboard', 'reverse_mask': False}, {'type': 'squeeze', 'factor': 2}, {'type': 'acl', 'mask_type': 'split-channel', 'reverse_mask': True}, {'type': 'acl', 'mask_type': 'split-channel', 'reverse_mask': False}, {'type': 'acl', 'mask_type': 'split-channel', 'reverse_mask': True}, {'type': 'split'}, {'type': 'acl', 'mask_type': 'checkerboard', 'reverse_mask': False}, {'type': 'acl', 'mask_type': 'checkerboard', 'reverse_mask': True}, {'type': 'acl', 'mask_type': 'checkerboard', 'reverse_mask': False}, {'type': 'acl', 'mask_type': 'checkerboard', 'reverse_mask': True}] schema = [] for layer in base_schema: if (layer['type'] == 'acl'): schema += [{**layer, 'num_u_channels': 0, 'coupler': {'independent_nets': False, 'shift_log_scale_net': {'type': 'resnet', 'hidden_channels': coupler_hidden_channels}}}, {'type': 'normalise'}] else: schema.append(layer) return schema
def get_glow_schema(num_scales, num_steps_per_scale, coupler_num_hidden_channels, lu_decomposition): schema = [] for i in range(num_scales): if (i > 0): schema.append({'type': 'split'}) schema.append({'type': 'squeeze', 'factor': 2}) for _ in range(num_steps_per_scale): schema += [{'type': 'normalise'}, {'type': 'invconv', 'lu': lu_decomposition}, {'type': 'acl', 'mask_type': 'split-channel', 'reverse_mask': False, 'coupler': {'independent_nets': False, 'shift_log_scale_net': {'type': 'glow-cnn', 'num_hidden_channels': coupler_num_hidden_channels, 'zero_init_output': True}}, 'num_u_channels': 0}] return schema
def get_flat_realnvp_schema(config): result = [{'type': 'flatten'}] if config['coupler_shared_nets']: coupler_config = {'independent_nets': False, 'shift_log_scale_net': {'type': 'mlp', 'hidden_channels': config['coupler_hidden_channels'], 'activation': 'tanh'}} else: coupler_config = {'independent_nets': True, 'shift_net': {'type': 'mlp', 'hidden_channels': config['coupler_hidden_channels'], 'activation': 'relu'}, 'log_scale_net': {'type': 'mlp', 'hidden_channels': config['coupler_hidden_channels'], 'activation': 'tanh'}} for i in range(config['num_density_layers']): result += [{'type': 'acl', 'mask_type': 'alternating-channel', 'reverse_mask': ((i % 2) != 0), 'coupler': coupler_config, 'num_u_channels': 0}, {'type': 'normalise'}] return result
def get_maf_schema(num_density_layers, hidden_channels): result = [{'type': 'flatten'}] for i in range(num_density_layers): if (i > 0): result.append({'type': 'flip'}) result += [{'type': 'made', 'hidden_channels': hidden_channels, 'activation': 'tanh'}, {'type': 'normalise'}] return result
def get_sos_schema(num_density_layers, hidden_channels, num_polynomials_per_layer, polynomial_degree): result = [{'type': 'flatten'}] for i in range(num_density_layers): if (i > 0): result.append({'type': 'flip'}) result += [{'type': 'sos', 'hidden_channels': hidden_channels, 'activation': 'tanh', 'num_polynomials': num_polynomials_per_layer, 'polynomial_degree': polynomial_degree}, {'type': 'normalise'}] return result
def get_nsf_schema(config): result = [{'type': 'flatten'}] for i in range(config['num_density_layers']): if (('use_linear' in config) and (not config['use_linear'])): result += [{'type': 'rand-channel-perm'}] else: result += [{'type': 'rand-channel-perm'}, {'type': 'linear'}] layer = {'type': ('nsf-ar' if config['autoregressive'] else 'nsf-c'), 'num_hidden_channels': config['num_hidden_channels'], 'num_hidden_layers': config['num_hidden_layers'], 'num_bins': config['num_bins'], 'tail_bound': config['tail_bound'], 'activation': 'relu', 'dropout_probability': config['dropout_probability']} if (not config['autoregressive']): layer['reverse_mask'] = ((i % 2) == 0) result.append(layer) result.append({'type': 'normalise'}) if (('use_linear' in config) and (not config['use_linear'])): result += [{'type': 'rand-channel-perm'}] else: result += [{'type': 'rand-channel-perm'}, {'type': 'linear'}] return result
def get_bnaf_schema(num_density_layers, num_hidden_layers, activation, hidden_channels_factor): result = [{'type': 'flatten'}] for i in range(num_density_layers): if (i > 0): result.append({'type': 'flip'}) result += [{'type': 'bnaf', 'num_hidden_layers': num_hidden_layers, 'hidden_channels_factor': hidden_channels_factor, 'activation': activation, 'residual': (i < (num_density_layers - 1))}, {'type': 'normalise'}] return result
def get_ffjord_schema(num_density_layers, velocity_hidden_channels, numerical_tolerance, num_u_channels): result = [{'type': 'flatten'}] for i in range(num_density_layers): result += [{'type': 'ode', 'hidden_channels': velocity_hidden_channels, 'numerical_tolerance': numerical_tolerance, 'num_u_channels': num_u_channels}] return result
def get_planar_schema(config): if (config['num_u_channels'] == 0): layer = {'type': 'planar'} else: layer = {'type': 'cond-planar', 'num_u_channels': config['num_u_channels'], 'cond_hidden_channels': config['cond_hidden_channels'], 'cond_activation': 'tanh'} result = ([layer, {'type': 'normalise'}] * config['num_density_layers']) return ([{'type': 'flatten'}] + result)
def get_cond_affine_schema(config): return ([{'type': 'flatten'}] + ([{'type': 'normalise'}] * config['num_density_layers']))
def get_affine_schema(config): return ([{'type': 'flatten'}] + ([{'type': 'affine', 'per_channel': False}] * config['num_density_layers']))
def get_flat_resflow_schema(config): result = [{'type': 'flatten'}] for _ in range(config['num_density_layers']): result += [{'type': 'resblock', 'net': {'type': 'mlp', 'hidden_channels': config['hidden_channels']}}, {'type': 'normalise'}] add_lipschitz_config_to_resblocks(result, config) return result
def get_multiscale_resflow_schema(config): result = [] for (i, num_blocks) in enumerate(config['scales']): if (i == 0): result.append({'type': 'normalise'}) else: result.append({'type': 'squeeze', 'factor': 2}) for j in range(num_blocks): result += [{'type': 'resblock', 'net': {'type': 'cnn', 'num_hidden_channels': config['num_hidden_channels']}}, {'type': 'normalise'}] result.append({'type': 'flatten'}) for _ in range(config['num_output_fc_blocks']): result += [{'type': 'resblock', 'net': {'type': 'mlp', 'hidden_channels': config['output_fc_hidden_channels']}}, {'type': 'normalise'}] add_lipschitz_config_to_resblocks(result, config) return result
def add_lipschitz_config_to_resblocks(schema, config): net_keys_to_copy = ['lipschitz_constant', 'max_train_lipschitz_iters', 'max_test_lipschitz_iters', 'lipschitz_tolerance'] for layer in schema: if (layer['type'] == 'resblock'): for key in net_keys_to_copy: layer['net'][key] = config[key] layer['reduce_memory'] = config['reduce_memory']
def get_bernoulli_vae_schema(config): return [{'type': 'flatten'}, {'type': 'bernoulli-likelihood', 'num_z_channels': config['num_z_channels'], 'logit_net': {'type': 'mlp', 'activation': 'tanh', 'hidden_channels': config['logit_net']}, 'q_coupler': get_q_coupler_config(config, flattened=True)}]
def get_gaussian_vae_schema(config): return [{'type': 'flatten'}, {'type': 'gaussian-likelihood', 'num_z_channels': config['num_z_channels'], 'p_coupler': get_p_coupler_config(config, flattened=True), 'q_coupler': get_q_coupler_config(config, flattened=True)}]
@base def config(dataset, use_baseline): num_u_channels = {'gas': 2, 'power': 2, 'hepmass': 5, 'miniboone': 10, 'bsds300': 15}[dataset] return {'num_u_channels': num_u_channels, 'use_cond_affine': True, 'pure_cond_affine': False, 'dequantize': False, 'act_norm': False, 'batch_norm': True, 'batch_norm_apply_affine': use_baseline, 'batch_norm_use_running_averages': False, 'early_stopping': True, 'train_batch_size': 1000, 'valid_batch_size': 5000, 'test_batch_size': 5000, 'opt': 'adam', 'lr': 0.001, 'lr_schedule': 'none', 'weight_decay': 0.0, 'max_bad_valid_epochs': 50, 'max_epochs': 2000, 'max_grad_norm': None, 'epochs_per_test': 5, 'train_objective': 'iwae', 'num_train_importance_samples': 1, 'num_valid_importance_samples': 5, 'num_test_importance_samples': 10}
@provides('resflow') def resflow(dataset, model, use_baseline): config = {'schema_type': 'flat-resflow', 'num_density_layers': 10, 'hidden_channels': ([128] * 4), 'lipschitz_constant': 0.9, 'max_train_lipschitz_iters': 5, 'max_test_lipschitz_iters': 200, 'lipschitz_tolerance': None, 'reduce_memory': False, 'act_norm': False, 'batch_norm': False, 'st_nets': ([10] * 2), 'p_nets': ([10] * 2), 'q_nets': ([10] * 2)} if (not use_baseline): config['valid_batch_size'] = 1000 config['test_batch_size'] = 1000 return config
@provides('cond-affine') def cond_affine(dataset, model, use_baseline): assert (not use_baseline), 'Cannot use baseline model for this config' return {'schema_type': 'cond-affine', 'num_density_layers': 10, 'batch_norm': False, 'st_nets': ([128] * 2), 'p_nets': ([128] * 2), 'q_nets': GridParams(([10] * 2), ([100] * 4))}
@provides('linear-cond-affine-like-resflow') def linear_cond_affine_like_resflow(dataset, model, use_baseline): assert (not use_baseline), 'Cannot use baseline model for this config' assert (dataset != 'bsds300'), 'BSDS300 has not yet been tested' num_u_channels = {'miniboone': 43, 'hepmass': 21, 'gas': 8, 'power': 6}[dataset] config = {'schema_type': 'cond-affine', 'num_density_layers': 10, 'num_u_channels': num_u_channels, 'batch_norm': False, 's_nets': 'fixed-constant', 't_nets': 'identity', 'p_nets': ([128] * 4), 'q_nets': GridParams(([10] * 2), ([100] * 4)), 'valid_batch_size': 1000, 'test_batch_size': 1000} return config
@provides('nonlinear-cond-affine-like-resflow') def nonlinear_cond_affine_like_resflow(dataset, model, use_baseline): assert (not use_baseline), 'Cannot use baseline model for this config' assert (dataset != 'bsds300'), 'BSDS300 has not yet been tested' num_u_channels = {'miniboone': 43, 'hepmass': 21, 'gas': 8, 'power': 6}[dataset] config = {'schema_type': 'cond-affine', 'num_density_layers': 10, 'num_u_channels': num_u_channels, 'batch_norm': False, 's_nets': 'fixed-constant', 't_nets': ([128] * 2), 'p_nets': ([128] * 2), 'q_nets': GridParams(([10] * 2), ([100] * 4)), 'valid_batch_size': 1000, 'test_batch_size': 1000} return config
@provides('maf') def maf(dataset, model, use_baseline): if (dataset in ['gas', 'power']): config = {'num_density_layers': 10, 'ar_map_hidden_channels': (([200] * 2) if use_baseline else ([100] * 2)), 'st_nets': ([100] * 2), 'p_nets': ([200] * 2), 'q_nets': ([200] * 2)} elif (dataset in ['hepmass', 'miniboone', 'bsds300']): config = {'num_density_layers': 10, 'ar_map_hidden_channels': ([512] * 2), 'st_nets': ([128] * 2), 'p_nets': ([128] * 2), 'q_nets': ([128] * 2)} config['schema_type'] = 'maf' config['batch_norm'] = use_baseline if (dataset == 'bsds300'): config['lr'] = 0.0001 return config
@provides('realnvp') def realnvp(dataset, model, use_baseline): return {'schema_type': 'flat-realnvp', 'num_density_layers': 20, 'coupler_shared_nets': True, 'coupler_hidden_channels': ([1024] * 2), 'st_nets': ([100] * 2), 'p_nets': ([100] * 2), 'q_nets': ([100] * 2)}
@provides('sos') def sos(dataset, model, use_baseline): assert use_baseline, 'A CIF version of this config has not yet been tested' return {'schema_type': 'sos', 'num_density_layers': 8, 'g_hidden_channels': ([200] * 2), 'num_polynomials_per_layer': 5, 'polynomial_degree': 4, 'lr': 0.001, 'opt': 'sgd'}
@provides('nsf-ar') def nsf(dataset, model, use_baseline): common = {'schema_type': 'nsf', 'autoregressive': True, 'num_density_layers': 10, 'tail_bound': 3, 'batch_norm': False, 'opt': 'adam', 'lr_schedule': 'cosine', 'weight_decay': 0.0, 'early_stopping': False, 'max_grad_norm': 5, 'valid_batch_size': 5000, 'test_batch_size': 5000, 'epochs_per_test': 5} if (dataset in ['power', 'gas', 'hepmass', 'bsds300']): dropout = {'power': 0.0, 'gas': 0.1, 'hepmass': 0.2, 'bsds300': 0.2}[dataset] dset_size = {'power': 1615917, 'gas': 852174, 'hepmass': 315123, 'bsds300': 1000000}[dataset] batch_size = 512 train_steps = 400000 config = {'lr': 0.0005, 'num_hidden_layers': 2, 'num_hidden_channels': (512 if (dataset == 'bsds300') else 256), 'num_bins': 8, 'dropout_probability': dropout, 'st_nets': ([100] * 3), 'p_nets': ([200] * 3), 'q_nets': ([10] * 2)} elif (dataset == 'miniboone'): dset_size = 29556 batch_size = 64 train_steps = 250000 config = {'lr': 0.0003, 'num_hidden_layers': 1, 'num_hidden_channels': 64, 'num_bins': 4, 'dropout_probability': 0.2, 'st_nets': ([25] * 3), 'p_nets': ([50] * 3), 'q_nets': ([10] * 2)} else: assert False, f'Invalid dataset {dataset}' steps_per_epoch = (dset_size // batch_size) epochs = int(((train_steps / steps_per_epoch) + 0.5)) return {**common, **config, 'max_epochs': epochs, 'train_batch_size': batch_size}
@base def config(dataset, use_baseline): return {'num_u_channels': 1, 'use_cond_affine': (not use_baseline), 'pure_cond_affine': False, 'dequantize': False, 'batch_norm': False, 'act_norm': False, 'max_epochs': 2000, 'max_grad_norm': None, 'early_stopping': True, 'max_bad_valid_epochs': 250, 'train_batch_size': 1000, 'valid_batch_size': 1000, 'test_batch_size': 10000, 'opt': 'adam', 'lr': 0.001, 'lr_schedule': 'none', 'weight_decay': 0.0, 'epochs_per_test': 5, 'train_objective': 'iwae', 'num_train_importance_samples': 1, 'num_valid_importance_samples': 10, 'num_test_importance_samples': 100}
@provides('resflow') def resflow(dataset, model, use_baseline): config = {'schema_type': 'flat-resflow', 'num_density_layers': 10, 'hidden_channels': ([128] * 4), 'lipschitz_constant': 0.9, 'max_train_lipschitz_iters': 5, 'max_test_lipschitz_iters': 200, 'lipschitz_tolerance': None, 'reduce_memory': True, 'st_nets': ([10] * 2), 'p_nets': ([10] * 2), 'q_nets': ([10] * 2), 'max_epochs': 20000, 'max_bad_valid_epochs': 20000} if (not use_baseline): config['test_batch_size'] = 1000 return config
@provides('affine') def affine(dataset, model, use_baseline): assert use_baseline, 'Must use baseline model for this config' return {'schema_type': 'affine', 'num_density_layers': 10}
@provides('maf') def maf(dataset, model, use_baseline): return {'schema_type': 'maf', 'num_density_layers': (20 if use_baseline else 5), 'ar_map_hidden_channels': ([50] * 4), 'st_nets': ([10] * 2), 'p_nets': ([50] * 4), 'q_nets': ([50] * 4)}
@provides('maf-grid') def maf_grid(dataset, model, use_baseline): return {'schema_type': 'maf', 'num_density_layers': (20 if use_baseline else 5), 'ar_map_hidden_channels': GridParams(([10] * 2), ([50] * 4)), 'num_u_channels': 2, 'st_nets': GridParams(([10] * 2), ([50] * 4)), 'p_nets': ([10] * 2), 'q_nets': ([50] * 4)}
@provides('cond-affine-shallow-grid', 'cond-affine-deep-grid') def cond_affine_grid(dataset, model, use_baseline): assert (not use_baseline), 'Cannot use baseline model for this config' if ('deep' in model): num_layers = 5 net_factor = 1 else: num_layers = 1 net_factor = 5 return {'schema_type': 'cond-affine', 'num_density_layers': num_layers, 'num_u_channels': 2, 'st_nets': GridParams((([10] * 2) * net_factor), (([50] * 4) * net_factor)), 'p_nets': (([10] * 2) * net_factor), 'q_nets': (([50] * 4) * net_factor)}
@provides('dlgm-deep', 'dlgm-shallow') def dlgm_deep(dataset, model, use_baseline): assert (not use_baseline), 'Cannot use baseline model for this config' if ('deep' in model): cond_affine_model = 'cond-affine-deep-grid' else: cond_affine_model = 'cond-affine-shallow-grid' config = cond_affine_grid(dataset=dataset, model=cond_affine_model, use_baseline=False) del config['st_nets'] config['s_nets'] = 'fixed-constant' config['t_nets'] = 'identity' return config
@provides('realnvp') def realnvp(dataset, model, use_baseline): return {'schema_type': 'flat-realnvp', 'num_density_layers': 1, 'coupler_shared_nets': True, 'coupler_hidden_channels': ([10] * 2), 'use_cond_affine': True, 'st_nets': ([10] * 2), 'p_nets': ([10] * 2), 'q_nets': ([10] * 2)}
@provides('sos') def sos(dataset, model, use_baseline): return {'schema_type': 'sos', 'num_density_layers': (3 if use_baseline else 2), 'g_hidden_channels': ([40] * 2), 'num_polynomials_per_layer': 2, 'polynomial_degree': 4, 'st_nets': ([40] * 2), 'p_nets': ([40] * 4), 'q_nets': ([40] * 4)}
@provides('planar') def planar(dataset, model, use_baseline): return {'schema_type': 'planar', 'num_density_layers': 10, 'use_cond_affine': False, 'cond_hidden_channels': ([10] * 2), 'p_nets': ([50] * 4), 'q_nets': ([10] * 2)}
@provides('nsf-ar') def nsf(dataset, model, use_baseline): return {'schema_type': 'nsf', 'autoregressive': True, 'use_linear': False, 'max_grad_norm': 5, 'num_density_layers': 5, 'num_bins': 8, 'num_hidden_channels': 256, 'num_hidden_layers': 2, 'tail_bound': 3, 'dropout_probability': 0.0, 'lr_schedule': 'cosine', 'lr': 0.0005, 'max_epochs': 1000, 'st_nets': ([10] * 2), 'p_nets': ([10] * 2), 'q_nets': ([10] * 2)}
@provides('bnaf') def bnaf(dataset, model, use_baseline): return {'schema_type': 'bnaf', 'num_density_layers': 1, 'num_hidden_layers': 2, 'hidden_channels_factor': (50 if use_baseline else 45), 'activation': 'soft-leaky-relu', 'st_nets': ([24] * 2), 'p_nets': ([24] * 3), 'q_nets': ([24] * 3), 'test_batch_size': 1000}
@provides('ffjord') def ffjord(dataset, model, use_baseline): raise NotImplementedError('Currently broken; require changes in experiment.py') return {'schema_type': 'ffjord', 'num_density_layers': 1, 'hidden_channels': ([64] * 3), 'numerical_tolerance': 1e-05, 'st_nets': ([24] * 2), 'p_nets': ([24] * 3), 'q_nets': ([24] * 3)}
def parse_config_arg(key_value): assert ('=' in key_value), "Must specify config items with format `key=value'" (k, v) = key_value.split('=', maxsplit=1) assert k, "Config item can't have empty key" assert v, "Config item can't have empty value" try: v = ast.literal_eval(v) except ValueError: v = str(v) return (k, v)
def test_cif_realnvp_config(): config = get_config(dataset='mnist', model='realnvp', use_baseline=False) true_config = {'schema_type': 'multiscale-realnvp', 'use_cond_affine': True, 'pure_cond_affine': False, 'g_hidden_channels': [64, 64, 64, 64], 'num_u_channels': 1, 'st_nets': [8, 8], 'p_nets': [64, 64], 'q_nets': [64, 64], 'early_stopping': True, 'train_batch_size': 100, 'valid_batch_size': 500, 'test_batch_size': 500, 'opt': 'adam', 'lr': 0.0001, 'weight_decay': 0.0, 'logit_tf_lambda': 1e-06, 'logit_tf_scale': 256, 'dequantize': True, 'act_norm': False, 'batch_norm': True, 'batch_norm_apply_affine': False, 'batch_norm_use_running_averages': True, 'batch_norm_momentum': 0.1, 'lr_schedule': 'none', 'max_bad_valid_epochs': 50, 'max_grad_norm': None, 'max_epochs': 1000, 'epochs_per_test': 1, 'train_objective': 'iwae', 'num_train_importance_samples': 1, 'num_valid_importance_samples': 5, 'num_test_importance_samples': 10} assert (true_config == config)
def test_baseline_realnvp_config(): config = get_config(dataset='mnist', model='realnvp', use_baseline=True) true_config = {'schema_type': 'multiscale-realnvp', 'use_cond_affine': False, 'pure_cond_affine': False, 'g_hidden_channels': [64, 64, 64, 64, 64, 64, 64, 64], 'num_u_channels': 0, 'early_stopping': True, 'train_batch_size': 100, 'valid_batch_size': 500, 'test_batch_size': 500, 'opt': 'adam', 'lr': 0.0001, 'weight_decay': 0.0, 'logit_tf_lambda': 1e-06, 'logit_tf_scale': 256, 'dequantize': True, 'act_norm': False, 'batch_norm': True, 'batch_norm_apply_affine': True, 'batch_norm_use_running_averages': True, 'batch_norm_momentum': 0.1, 'lr_schedule': 'none', 'max_bad_valid_epochs': 50, 'max_grad_norm': None, 'max_epochs': 1000, 'epochs_per_test': 1, 'train_objective': 'iwae', 'num_train_importance_samples': 1, 'num_valid_importance_samples': 1, 'num_test_importance_samples': 1} assert (true_config == config)
class TestDiagonalGaussianDensity(unittest.TestCase): def setUp(self): self.shape = (10, 4, 2) self.mean = torch.rand(self.shape) self.stddev = (1 + (torch.rand(self.shape) ** 2)) self.density = DiagonalGaussianDensity(self.mean, self.stddev, num_fixed_samples=64) flat_mean = self.mean.flatten().numpy() flat_vars = (self.stddev ** 2).flatten().numpy() self.scipy_density = stats.multivariate_normal(mean=flat_mean, cov=flat_vars) def test_elbo(self): batch_size = 1000 num_importance_samples = 1 noise = torch.rand(batch_size, *self.shape) with torch.no_grad(): log_prob = self.density.elbo(noise, num_importance_samples=num_importance_samples)['log-w'] flat_noise = noise.flatten(start_dim=1).numpy() scipy_log_prob = self.scipy_density.logpdf(flat_noise).reshape(batch_size, 1, num_importance_samples) self.assertEqual(log_prob.shape, scipy_log_prob.shape) self.assertLessEqual(abs((log_prob.numpy() - scipy_log_prob).max()), 0.0001) def test_samples(self): num_samples = 100000 samples = self.density.sample(num_samples) self.assertEqual(samples.shape, (num_samples, *self.shape)) flat_samples = samples.flatten(start_dim=1) flat_mean = self.mean.flatten() flat_stddev = self.stddev.flatten() self._assert_moments_accurate(flat_samples, flat_mean, flat_stddev) def _assert_moments_accurate(self, flat_samples, flat_mean, flat_stddev): num_moments = 4 eps = 0.5 (_, dim) = flat_samples.shape tot_errors = 0 tot_trials = 0 for m in range(1, (num_moments + 1)): moments = torch.mean((flat_samples ** m), dim=0) for i in range(dim): tot_trials += 1 ground_truth = stats.norm.moment(m, loc=flat_mean[i], scale=flat_stddev[i]) if ((ground_truth - moments[i]).abs() > eps): tot_errors += 1 self.assertLess((tot_errors / tot_trials), 0.05)
class TestDiagonalGaussianConditionalDensity(unittest.TestCase): def setUp(self): dim = 25 cond_dim = 15 self.shape = (dim,) self.cond_shape = (cond_dim,) self.mean_log_std_map = ChunkedSharedCoupler(shift_log_scale_net=get_mlp(num_input_channels=cond_dim, hidden_channels=[10, 10, 10], num_output_channels=(2 * dim), activation=nn.Tanh)) self.density = DiagonalGaussianConditionalDensity(self.mean_log_std_map) def test_log_prob(self): batch_size = 100 inputs = torch.rand(batch_size, *self.shape) cond_inputs = torch.rand(batch_size, *self.cond_shape) with torch.no_grad(): log_prob = self.density.log_prob(inputs, cond_inputs)['log-prob'] mean_log_std = self.mean_log_std_map(cond_inputs) means = mean_log_std['shift'] stds = torch.exp(mean_log_std['log-scale']) scipy_log_probs = stats.norm.logpdf(inputs, loc=means, scale=stds) scipy_log_prob = scipy_log_probs.reshape((batch_size, (- 1))).sum(axis=1, keepdims=True) self.assertLessEqual(abs((log_prob.numpy() - scipy_log_prob).max()), 0.0001) def test_samples(self): batch_size = 10 num_samples = 10000 num_moments = 2 cond_inputs = torch.rand(batch_size, *self.cond_shape) with torch.no_grad(): result = self.density.sample(cond_inputs.repeat_interleave(num_samples, dim=0)) mean_log_std = self.mean_log_std_map(cond_inputs) samples = result['sample'] self.assertEqual(samples.shape, ((batch_size * num_samples), *self.shape)) samples = samples.view(batch_size, num_samples, *self.shape) means = mean_log_std['shift'].flatten() stds = torch.exp(mean_log_std['log-scale']).flatten() for m in range(1, (num_moments + 1)): moments = torch.mean((samples ** m), dim=1) ground_truth = torch.empty_like(moments) for (i, x) in enumerate(moments.flatten()): ground_truth.view((- 1))[i] = stats.norm.moment(m, loc=means[i], scale=stds[i]) errs = (moments - ground_truth).abs() self.assertLess(errs.max(), 0.5) self.assertLess(errs.mean(), 0.05)
class TestCIFDensity(unittest.TestCase): def test_log_prob_format(self): batch_size = 1000 x_dim = 40 input_shape = (x_dim,) u_dim = 15 num_importance_samples = 5 prior = DiagonalGaussianDensity(mean=torch.zeros(input_shape), stddev=torch.ones(input_shape)) p_u_density = self._u_density(u_dim, x_dim) bijection = ConditionalAffineBijection(x_shape=input_shape, coupler=get_coupler(input_shape=(u_dim,), num_channels_per_output=x_dim, config={'independent_nets': False, 'shift_log_scale_net': {'type': 'mlp', 'hidden_channels': [40, 30], 'activation': 'tanh'}})) q_u_density = self._u_density(u_dim, x_dim) density = CIFDensity(prior=prior, p_u_density=p_u_density, bijection=bijection, q_u_density=q_u_density) x = torch.rand(batch_size, *input_shape) elbo = density.elbo(x, num_importance_samples=num_importance_samples)['log-w'] self.assertEqual(elbo.shape, (batch_size, num_importance_samples, 1)) def _u_density(self, u_dim, x_dim): return DiagonalGaussianConditionalDensity(coupler=ChunkedSharedCoupler(shift_log_scale_net=get_mlp(num_input_channels=x_dim, hidden_channels=[10, 10, 10], num_output_channels=(2 * u_dim), activation=nn.Tanh)))
class TestConcreteConditionalDensity(unittest.TestCase): def setUp(self): self.shape = (25,) self.cond_shape = (5,) self.lam = torch.exp(torch.rand(1)) self.log_alpha_map = get_mlp(num_input_channels=np.prod(self.cond_shape), hidden_channels=[10, 10, 10], num_output_channels=np.prod(self.shape), activation=nn.Tanh) self.density = ConcreteConditionalDensity(self.log_alpha_map, self.lam) def test_samples(self): batch_size = 10 num_samples = 10000 num_moments = 2 cond_inputs = torch.rand(batch_size, *self.cond_shape) with torch.no_grad(): samples = self.density.sample(cond_inputs.repeat_interleave(num_samples, dim=0))['sample'] self.assertEqual(samples.shape, ((batch_size * num_samples), *self.shape))
def load_schema(name): with open(((Path('tests') / 'schemas') / f'{name}.json'), 'r') as f: return json.load(f)
def test_baseline_multiscale_realnvp_schema(): config = get_config(dataset='mnist', model='realnvp', use_baseline=True) schema = get_schema(config) true_schema = load_schema('realnvp_schema') assert (schema == true_schema)
def test_cif_multiscale_realnvp_schema(): config = get_config(dataset='mnist', model='realnvp', use_baseline=False) schema = get_schema(config) true_schema = load_schema('cif_realnvp_schema') assert (schema == true_schema)
def download(url, dirpath): filename = url.split('/')[(- 1)] filepath = os.path.join(dirpath, filename) u = urllib.request.urlopen(url) f = open(filepath, 'wb') filesize = int(u.headers['Content-Length']) print(('Downloading: %s Bytes: %s' % (filename, filesize))) downloaded = 0 block_sz = 8192 status_width = 70 while True: buf = u.read(block_sz) if (not buf): print('') break else: print('', end='\r') downloaded += len(buf) f.write(buf) status = ((('[%-' + str((status_width + 1))) + 's] %3.2f%%') % ((('=' * int(((float(downloaded) / filesize) * status_width))) + '>'), ((downloaded * 100.0) / filesize))) print(status, end='') sys.stdout.flush() f.close() return filepath
def download_file_from_google_drive(id, destination): URL = 'https://docs.google.com/uc?export=download' session = requests.Session() response = session.get(URL, params={'id': id}, stream=True) token = get_confirm_token(response) if token: params = {'id': id, 'confirm': token} response = session.get(URL, params=params, stream=True) save_response_content(response, destination)
def get_confirm_token(response): for (key, value) in response.cookies.items(): if key.startswith('download_warning'): return value return None
def save_response_content(response, destination, chunk_size=(32 * 1024)): total_size = int(response.headers.get('content-length', 0)) with open(destination, 'wb') as f: for chunk in tqdm(response.iter_content(chunk_size), total=total_size, unit='B', unit_scale=True, desc=destination): if chunk: f.write(chunk)
def unzip(filepath): print(('Extracting: ' + filepath)) dirpath = os.path.dirname(filepath) with zipfile.ZipFile(filepath) as zf: zf.extractall(dirpath) os.remove(filepath)
def download_celeb_a(dirpath): data_dir = 'celebA' if os.path.exists(os.path.join(dirpath, data_dir)): print('Found Celeb-A - skip') return (filename, drive_id) = ('img_align_celeba.zip', '0B7EVK8r0v71pZjFTYXZWM3FlRnM') save_path = os.path.join(dirpath, filename) if os.path.exists(save_path): print('[*] {} already exists'.format(save_path)) else: download_file_from_google_drive(drive_id, save_path) zip_dir = '' with zipfile.ZipFile(save_path) as zf: zip_dir = zf.namelist()[0] zf.extractall(dirpath) os.remove(save_path) os.rename(os.path.join(dirpath, zip_dir), os.path.join(dirpath, data_dir))
def _list_categories(tag): url = ('http://lsun.cs.princeton.edu/htbin/list.cgi?tag=' + tag) f = urllib.request.urlopen(url) return json.loads(f.read())