code stringlengths 17 6.64M |
|---|
class DiagonalGaussianDensity(Density):
def __init__(self, mean, stddev, num_fixed_samples=0):
super().__init__()
assert (mean.shape == stddev.shape)
self.register_buffer('mean', mean)
self.register_buffer('stddev', stddev)
if (num_fixed_samples > 0):
self.register_buffer('_fixed_samples', self.sample(num_fixed_samples))
@property
def shape(self):
return self.mean.shape
def p_parameters(self):
return []
def q_parameters(self):
return []
def _fix_random_u(self):
return (self, self.sample(num_samples=1)[0])
def fix_u(self, u):
assert (not u)
return self
def _elbo(self, z, detach_q_params, detach_q_samples):
log_prob = diagonal_gaussian_log_prob(z, self.mean.expand_as(z), self.stddev.expand_as(z))
return {'log-p': log_prob, 'log-q': z.new_zeros((z.shape[0], 1)), 'z': z}
def _sample(self, num_samples):
return diagonal_gaussian_sample(self.mean.expand(num_samples, *self.shape), self.stddev.expand(num_samples, *self.shape))
def _fixed_sample(self, noise):
return (noise if (noise is not None) else self._fixed_samples)
|
class MarginalDensity(Density):
def __init__(self, prior: Density, likelihood: ConditionalDensity, approx_posterior: ConditionalDensity):
super().__init__()
self.prior = prior
self.likelihood = likelihood
self.approx_posterior = approx_posterior
def p_parameters(self):
return [*self.prior.parameters(), *self.likelihood.parameters()]
def q_parameters(self):
return [*self.approx_posterior.parameters(), *self.prior.q_parameters()]
def _elbo(self, x, detach_q_params, detach_q_samples):
approx_posterior = self.approx_posterior.sample(cond_inputs=x, detach_params=detach_q_params, detach_samples=detach_q_samples)
likelihood = self.likelihood.log_prob(inputs=x, cond_inputs=approx_posterior['sample'])
prior = self.prior('elbo', approx_posterior['sample'], detach_q_params=detach_q_params, detach_q_samples=detach_q_samples)
log_p = (likelihood['log-prob'] + prior['log-p'])
log_q = (approx_posterior['log-prob'] + prior['log-q'])
return {'log-p': log_p, 'log-q': log_q}
def _sample(self, num_samples):
z = self.prior.sample(num_samples)
return self.likelihood.sample(cond_inputs=z)['sample']
def _fixed_sample(self, noise):
z = self.prior.fixed_sample(noise=noise)
return self.likelihood.sample(cond_inputs=z)['sample']
|
class SplitDensity(Density):
def __init__(self, density_1, density_2, dim):
super().__init__()
self.density_1 = density_1
self.density_2 = density_2
self.dim = dim
def _elbo(self, x, detach_q_params, detach_q_samples):
(x1, x2) = torch.chunk(x, chunks=2, dim=self.dim)
result_1 = self.density_1('elbo', x1, detach_q_params=detach_q_params, detach_q_samples=detach_q_samples)
result_2 = self.density_2('elbo', x2, detach_q_params=detach_q_params, detach_q_samples=detach_q_samples)
return {'log-p': (result_1['log-p'] + result_2['log-p']), 'log-q': (result_1['log-q'] + result_2['log-q'])}
def _fixed_sample(self, noise):
if (noise is not None):
raise NotImplementedError('Proper splitting of noise is not yet implemented')
x1 = self.density_1.fixed_sample(noise=noise)
x2 = self.density_2.fixed_sample(noise=noise)
return torch.cat((x1, x2), dim=self.dim)
def _sample(self, num_samples):
x1 = self.density_1.sample(num_samples)
x2 = self.density_2.sample(num_samples)
return torch.cat((x1, x2), dim=self.dim)
|
class WrapperDensity(Density):
def __init__(self, density):
super().__init__()
self.density = density
def p_parameters(self):
return self.density.p_parameters()
def q_parameters(self):
return self.density.q_parameters()
def elbo(self, x, num_importance_samples, detach_q_params, detach_q_samples):
return self.density.elbo(x, num_importance_samples=num_importance_samples, detach_q_params=detach_q_params, detach_q_samples=detach_q_samples)
def _elbo(self, x, detach_q_params, detach_q_samples):
assert False, 'Wrapper Densities should not be preceded by standard Density layers'
def _sample(self, num_samples):
return self.density.sample(num_samples)
def _fixed_sample(self, noise):
return self.density.fixed_sample(noise=noise)
|
class DequantizationDensity(WrapperDensity):
def elbo(self, x, num_importance_samples, detach_q_params, detach_q_samples):
return super().elbo(x.add_(torch.rand_like(x)), num_importance_samples=num_importance_samples, detach_q_params=detach_q_params, detach_q_samples=detach_q_samples)
|
class BinarizationDensity(WrapperDensity):
def __init__(self, density, scale):
super().__init__(density)
self.scale = scale
def elbo(self, x, num_importance_samples, detach_q_params, detach_q_samples):
bernoulli = dist.bernoulli.Bernoulli(probs=(x / self.scale))
return super().elbo(bernoulli.sample(), num_importance_samples=num_importance_samples, detach_q_params=detach_q_params, detach_q_samples=detach_q_samples)
|
class PassthroughBeforeEvalDensity(WrapperDensity):
def __init__(self, density, x):
super().__init__(density)
self.register_buffer('x', x)
def train(self, train_mode=True):
if (not train_mode):
self.training = True
with torch.no_grad():
self.elbo(self.x, num_importance_samples=1, detach_q_params=False, detach_q_samples=False)
super().train(train_mode)
|
class ConstantNetwork(nn.Module):
def __init__(self, value, fixed):
super().__init__()
if fixed:
self.register_buffer('value', value)
else:
self.value = nn.Parameter(value)
def forward(self, inputs):
return self.value.expand(inputs.shape[0], *self.value.shape)
|
class ResidualBlock(nn.Module):
def __init__(self, num_channels):
super().__init__()
self.bn1 = nn.BatchNorm2d(num_channels)
self.conv1 = self._get_conv3x3(num_channels)
self.bn2 = nn.BatchNorm2d(num_channels)
self.conv2 = self._get_conv3x3(num_channels)
def forward(self, inputs):
out = self.bn1(inputs)
out = torch.relu(out)
out = self.conv1(out)
out = self.bn2(out)
out = torch.relu(out)
out = self.conv2(out)
out = (out + inputs)
return out
def _get_conv3x3(self, num_channels):
return nn.Conv2d(in_channels=num_channels, out_channels=num_channels, kernel_size=3, stride=1, padding=1, bias=False)
|
class ScaledTanh2dModule(nn.Module):
def __init__(self, module, num_channels):
super().__init__()
self.module = module
self.weights = nn.Parameter(torch.ones(num_channels, 1, 1))
self.bias = nn.Parameter(torch.zeros(num_channels, 1, 1))
def forward(self, inputs):
out = self.module(inputs)
out = ((self.weights * torch.tanh(out)) + self.bias)
return out
|
def get_resnet(num_input_channels, hidden_channels, num_output_channels):
num_hidden_channels = (hidden_channels[0] if hidden_channels else num_output_channels)
layers = [nn.Conv2d(in_channels=num_input_channels, out_channels=num_hidden_channels, kernel_size=3, stride=1, padding=1, bias=False)]
for num_hidden_channels in hidden_channels:
layers.append(ResidualBlock(num_hidden_channels))
layers += [nn.BatchNorm2d(num_hidden_channels), nn.ReLU(), nn.Conv2d(in_channels=num_hidden_channels, out_channels=num_output_channels, kernel_size=1, padding=0, bias=True)]
return ScaledTanh2dModule(module=nn.Sequential(*layers), num_channels=num_output_channels)
|
def get_glow_cnn(num_input_channels, num_hidden_channels, num_output_channels, zero_init_output):
conv1 = nn.Conv2d(in_channels=num_input_channels, out_channels=num_hidden_channels, kernel_size=3, padding=1, bias=False)
bn1 = nn.BatchNorm2d(num_hidden_channels)
conv2 = nn.Conv2d(in_channels=num_hidden_channels, out_channels=num_hidden_channels, kernel_size=1, padding=0, bias=False)
bn2 = nn.BatchNorm2d(num_hidden_channels)
conv3 = nn.Conv2d(in_channels=num_hidden_channels, out_channels=num_output_channels, kernel_size=3, padding=1)
if zero_init_output:
conv3.weight.data.zero_()
conv3.bias.data.zero_()
relu = nn.ReLU()
return nn.Sequential(conv1, bn1, relu, conv2, bn2, relu, conv3)
|
def get_mlp(num_input_channels, hidden_channels, num_output_channels, activation, log_softmax_outputs=False):
layers = []
prev_num_hidden_channels = num_input_channels
for num_hidden_channels in hidden_channels:
layers.append(nn.Linear(prev_num_hidden_channels, num_hidden_channels))
layers.append(activation())
prev_num_hidden_channels = num_hidden_channels
layers.append(nn.Linear(prev_num_hidden_channels, num_output_channels))
if log_softmax_outputs:
layers.append(nn.LogSoftmax(dim=1))
return nn.Sequential(*layers)
|
class MaskedLinear(nn.Module):
def __init__(self, input_degrees, output_degrees):
super().__init__()
assert (len(input_degrees.shape) == len(output_degrees.shape) == 1)
num_input_channels = input_degrees.shape[0]
num_output_channels = output_degrees.shape[0]
self.linear = nn.Linear(num_input_channels, num_output_channels)
mask = (output_degrees.view((- 1), 1) >= input_degrees)
self.register_buffer('mask', mask.to(self.linear.weight.dtype))
def forward(self, inputs):
return F.linear(inputs, (self.mask * self.linear.weight), self.linear.bias)
|
class AutoregressiveMLP(nn.Module):
def __init__(self, num_input_channels, hidden_channels, num_output_heads, activation):
super().__init__()
self.flat_ar_mlp = self._get_flat_ar_mlp(num_input_channels, hidden_channels, num_output_heads, activation)
self.num_input_channels = num_input_channels
self.num_output_heads = num_output_heads
def _get_flat_ar_mlp(self, num_input_channels, hidden_channels, num_output_heads, activation):
assert (num_input_channels >= 2)
assert all([(num_input_channels <= d) for d in hidden_channels]), 'Random initialisation not yet implemented'
prev_degrees = torch.arange(1, (num_input_channels + 1), dtype=torch.int64)
layers = []
for hidden_channels in hidden_channels:
degrees = ((torch.arange(hidden_channels, dtype=torch.int64) % (num_input_channels - 1)) + 1)
layers.append(MaskedLinear(prev_degrees, degrees))
layers.append(activation())
prev_degrees = degrees
degrees = torch.arange(num_input_channels, dtype=torch.int64).repeat(num_output_heads)
layers.append(MaskedLinear(prev_degrees, degrees))
return nn.Sequential(*layers)
def forward(self, inputs):
assert (inputs.shape[1:] == (self.num_input_channels,))
result = self.flat_ar_mlp(inputs)
result = result.view(inputs.shape[0], self.num_output_heads, self.num_input_channels)
return result
|
class LipschitzNetwork(nn.Module):
_MODULES_TO_UPDATE = (InducedNormConv2d, InducedNormLinear)
def __init__(self, layers, max_train_lipschitz_iters, max_eval_lipschitz_iters, lipschitz_tolerance):
super().__init__()
self.layers = layers
self.net = nn.Sequential(*layers)
self.max_train_lipschitz_iters = max_train_lipschitz_iters
self.max_eval_lipschitz_iters = max_eval_lipschitz_iters
self.lipschitz_tolerance = lipschitz_tolerance
self.register_forward_pre_hook(self._update_lipschitz_constant)
self.register_full_backward_hook(self._queue_lipschitz_update)
self._requires_train_lipschitz_update = True
self._requires_eval_lipschitz_update = True
def forward(self, inputs):
return self.net(inputs)
def _queue_lipschitz_update(self, *args, **kwargs):
self._requires_train_lipschitz_update = True
self._requires_eval_lipschitz_update = True
def _update_lipschitz_constant(self, *args, **kwargs):
if self.training:
if self._requires_train_lipschitz_update:
self._update_lipschitz(max_iterations=self.max_train_lipschitz_iters)
self._requires_train_lipschitz_update = False
elif self._requires_eval_lipschitz_update:
self._update_lipschitz(max_iterations=self.max_eval_lipschitz_iters)
self._requires_eval_lipschitz_update = False
self._requires_train_lipschitz_update = False
def _update_lipschitz(self, max_iterations):
for m in self._modules_to_update():
m.compute_weight(update=True, n_iterations=max_iterations, atol=self.lipschitz_tolerance, rtol=self.lipschitz_tolerance)
def _modules_to_update(self):
for m in self.layers:
if isinstance(m, self._MODULES_TO_UPDATE):
(yield m)
|
def get_lipschitz_mlp(num_input_channels, hidden_channels, num_output_channels, lipschitz_constant, max_train_lipschitz_iters, max_eval_lipschitz_iters, lipschitz_tolerance):
layers = []
prev_num_channels = num_input_channels
for (i, num_channels) in enumerate((hidden_channels + [num_output_channels])):
layers += [Swish(), _get_lipschitz_linear_layer(num_input_channels=prev_num_channels, num_output_channels=num_channels, lipschitz_constant=lipschitz_constant, max_lipschitz_iters=max_eval_lipschitz_iters, lipschitz_tolerance=lipschitz_tolerance, zero_init=(i == len(hidden_channels)))]
prev_num_channels = num_channels
return LipschitzNetwork(layers=layers, max_train_lipschitz_iters=max_train_lipschitz_iters, max_eval_lipschitz_iters=max_eval_lipschitz_iters, lipschitz_tolerance=lipschitz_tolerance)
|
def _get_lipschitz_linear_layer(num_input_channels, num_output_channels, lipschitz_constant, max_lipschitz_iters, lipschitz_tolerance, zero_init):
return InducedNormLinear(in_features=num_input_channels, out_features=num_output_channels, coeff=lipschitz_constant, domain=2, codomain=2, n_iterations=max_lipschitz_iters, atol=lipschitz_tolerance, rtol=lipschitz_tolerance, zero_init=zero_init)
|
def get_lipschitz_cnn(input_shape, num_hidden_channels, num_output_channels, lipschitz_constant, max_train_lipschitz_iters, max_eval_lipschitz_iters, lipschitz_tolerance):
assert (len(input_shape) == 3)
num_input_channels = input_shape[0]
conv1 = _get_lipschitz_conv_layer(num_input_channels=num_input_channels, num_output_channels=num_hidden_channels, kernel_size=3, padding=1, lipschitz_constant=lipschitz_constant, max_lipschitz_iters=max_eval_lipschitz_iters, lipschitz_tolerance=lipschitz_tolerance)
conv2 = _get_lipschitz_conv_layer(num_input_channels=num_hidden_channels, num_output_channels=num_hidden_channels, kernel_size=1, padding=0, lipschitz_constant=lipschitz_constant, max_lipschitz_iters=max_eval_lipschitz_iters, lipschitz_tolerance=lipschitz_tolerance)
conv3 = _get_lipschitz_conv_layer(num_input_channels=num_hidden_channels, num_output_channels=num_output_channels, kernel_size=3, padding=1, lipschitz_constant=lipschitz_constant, max_lipschitz_iters=max_eval_lipschitz_iters, lipschitz_tolerance=lipschitz_tolerance)
layers = [Swish(), conv1, Swish(), conv2, Swish(), conv3]
dummy_inputs = torch.empty(1, *input_shape)
nn.Sequential(*layers)(dummy_inputs)
return LipschitzNetwork(layers=layers, max_train_lipschitz_iters=max_train_lipschitz_iters, max_eval_lipschitz_iters=max_eval_lipschitz_iters, lipschitz_tolerance=lipschitz_tolerance)
|
def _get_lipschitz_conv_layer(num_input_channels, num_output_channels, kernel_size, padding, lipschitz_constant, max_lipschitz_iters, lipschitz_tolerance):
assert ((max_lipschitz_iters is not None) or (lipschitz_tolerance is not None))
return InducedNormConv2d(in_channels=num_input_channels, out_channels=num_output_channels, kernel_size=kernel_size, stride=1, padding=padding, bias=True, coeff=lipschitz_constant, domain=2, codomain=2, n_iterations=max_lipschitz_iters, atol=lipschitz_tolerance, rtol=lipschitz_tolerance)
|
def get_density(schema, x_train):
x_shape = x_train.shape[1:]
if (schema[0]['type'] == 'passthrough-before-eval'):
num_points = schema[0]['num_passthrough_data_points']
x_idxs = torch.randperm(x_train.shape[0])[:num_points]
return PassthroughBeforeEvalDensity(density=get_density_recursive(schema[1:], x_shape), x=x_train[x_idxs])
else:
return get_density_recursive(schema, x_shape)
|
def get_density_recursive(schema, x_shape):
if (not schema):
return get_standard_gaussian_density(x_shape=x_shape)
layer_config = schema[0]
schema_tail = schema[1:]
if (layer_config['type'] == 'dequantization'):
return DequantizationDensity(density=get_density_recursive(schema=schema_tail, x_shape=x_shape))
elif (layer_config['type'] == 'binarize'):
return BinarizationDensity(density=get_density_recursive(schema=schema_tail, x_shape=x_shape), scale=layer_config['scale'])
elif (layer_config['type'] == 'split'):
split_x_shape = ((x_shape[0] // 2), *x_shape[1:])
return SplitDensity(density_1=get_density_recursive(schema=schema_tail, x_shape=split_x_shape), density_2=get_standard_gaussian_density(x_shape=split_x_shape), dim=1)
elif (layer_config['type'] == 'passthrough-before-eval'):
assert False, '`passthrough-before-eval` must occur as the first item in a schema'
elif (layer_config['type'] in ['bernoulli-likelihood', 'gaussian-likelihood']):
return get_marginal_density(layer_config=layer_config, schema_tail=schema_tail, x_shape=x_shape)
else:
return get_bijection_density(layer_config=layer_config, schema_tail=schema_tail, x_shape=x_shape)
|
def get_marginal_density(layer_config, schema_tail, x_shape):
(likelihood, z_shape) = get_likelihood(layer_config, schema_tail, x_shape)
prior = get_density_recursive(schema_tail, z_shape)
approx_posterior = DiagonalGaussianConditionalDensity(coupler=get_coupler(input_shape=x_shape, num_channels_per_output=layer_config['num_z_channels'], config=layer_config['q_coupler']))
return MarginalDensity(prior=prior, likelihood=likelihood, approx_posterior=approx_posterior)
|
def get_likelihood(layer_config, schema_tail, x_shape):
z_shape = (layer_config['num_z_channels'], *x_shape[1:])
if (layer_config['type'] == 'gaussian-likelihood'):
likelihood = DiagonalGaussianConditionalDensity(coupler=get_coupler(input_shape=z_shape, num_channels_per_output=x_shape[0], config=layer_config['p_coupler']))
elif (layer_config['type'] == 'bernoulli-likelihood'):
likelihood = BernoulliConditionalDensity(logit_net=get_net(input_shape=z_shape, num_output_channels=x_shape[0], net_config=layer_config['logit_net']))
else:
assert False, f"Invalid layer type `{layer_config['type']}'"
return (likelihood, z_shape)
|
def get_bijection_density(layer_config, schema_tail, x_shape):
bijection = get_bijection(layer_config=layer_config, x_shape=x_shape)
prior = get_density_recursive(schema=schema_tail, x_shape=bijection.z_shape)
if (layer_config.get('num_u_channels', 0) == 0):
return FlowDensity(bijection=bijection, prior=prior)
else:
return CIFDensity(bijection=bijection, prior=prior, p_u_density=get_conditional_density(num_u_channels=layer_config['num_u_channels'], coupler_config=layer_config['p_coupler'], x_shape=x_shape), q_u_density=get_conditional_density(num_u_channels=layer_config['num_u_channels'], coupler_config=layer_config['q_coupler'], x_shape=x_shape))
|
def get_uniform_density(x_shape):
return FlowDensity(bijection=LogitBijection(x_shape=x_shape).inverse(), prior=UniformDensity(x_shape))
|
def get_standard_gaussian_density(x_shape):
return DiagonalGaussianDensity(mean=torch.zeros(x_shape), stddev=torch.ones(x_shape), num_fixed_samples=64)
|
def get_bijection(layer_config, x_shape):
if (layer_config['type'] == 'acl'):
return get_acl_bijection(config=layer_config, x_shape=x_shape)
elif (layer_config['type'] == 'squeeze'):
return Squeeze2dBijection(x_shape=x_shape, factor=layer_config['factor'])
elif (layer_config['type'] == 'logit'):
return LogitBijection(x_shape=x_shape)
elif (layer_config['type'] == 'sigmoid'):
return LogitBijection(x_shape=x_shape).inverse()
elif (layer_config['type'] == 'tanh'):
return TanhBijection(x_shape=x_shape)
elif (layer_config['type'] == 'scalar-mult'):
return ScalarMultiplicationBijection(x_shape=x_shape, value=layer_config['value'])
elif (layer_config['type'] == 'scalar-add'):
return ScalarAdditionBijection(x_shape=x_shape, value=layer_config['value'])
elif (layer_config['type'] == 'flatten'):
return ViewBijection(x_shape=x_shape, z_shape=(int(np.prod(x_shape)),))
elif (layer_config['type'] == 'made'):
assert (len(x_shape) == 1)
return MADEBijection(num_input_channels=x_shape[0], hidden_channels=layer_config['hidden_channels'], activation=get_activation(layer_config['activation']))
elif (layer_config['type'] == 'batch-norm'):
return BatchNormBijection(x_shape=x_shape, per_channel=layer_config['per_channel'], apply_affine=layer_config['apply_affine'], momentum=layer_config['momentum'])
elif (layer_config['type'] == 'act-norm'):
return ActNormBijection(x_shape=x_shape)
elif (layer_config['type'] == 'affine'):
return AffineBijection(x_shape=x_shape, per_channel=layer_config['per_channel'])
elif (layer_config['type'] == 'cond-affine'):
return ConditionalAffineBijection(x_shape=x_shape, coupler=get_coupler(input_shape=(layer_config['num_u_channels'], *x_shape[1:]), num_channels_per_output=x_shape[0], config=layer_config['st_coupler']))
elif (layer_config['type'] == 'flip'):
return FlipBijection(x_shape=x_shape, dim=1)
elif (layer_config['type'] == 'invconv'):
if layer_config['lu']:
return LUInvertible1x1ConvBijection(x_shape=x_shape)
else:
return BruteForceInvertible1x1ConvBijection(x_shape=x_shape)
elif (layer_config['type'] == 'linear'):
assert (len(x_shape) == 1)
return LULinearBijection(num_input_channels=x_shape[0])
elif (layer_config['type'] == 'rand-channel-perm'):
return RandomChannelwisePermutationBijection(x_shape=x_shape)
elif (layer_config['type'] == 'sos'):
assert (len(x_shape) == 1)
return SumOfSquaresPolynomialBijection(num_input_channels=x_shape[0], hidden_channels=layer_config['hidden_channels'], activation=get_activation(layer_config['activation']), num_polynomials=layer_config['num_polynomials'], polynomial_degree=layer_config['polynomial_degree'])
elif (layer_config['type'] == 'nsf-ar'):
assert (len(x_shape) == 1)
return AutoregressiveRationalQuadraticSplineBijection(num_input_channels=x_shape[0], num_hidden_layers=layer_config['num_hidden_layers'], num_hidden_channels=layer_config['num_hidden_channels'], num_bins=layer_config['num_bins'], tail_bound=layer_config['tail_bound'], activation=get_activation(layer_config['activation']), dropout_probability=layer_config['dropout_probability'])
elif (layer_config['type'] == 'nsf-c'):
assert (len(x_shape) == 1)
return CoupledRationalQuadraticSplineBijection(num_input_channels=x_shape[0], num_hidden_layers=layer_config['num_hidden_layers'], num_hidden_channels=layer_config['num_hidden_channels'], num_bins=layer_config['num_bins'], tail_bound=layer_config['tail_bound'], activation=get_activation(layer_config['activation']), dropout_probability=layer_config['dropout_probability'], reverse_mask=layer_config['reverse_mask'])
elif (layer_config['type'] == 'bnaf'):
assert (len(x_shape) == 1)
return BlockNeuralAutoregressiveBijection(num_input_channels=x_shape[0], num_hidden_layers=layer_config['num_hidden_layers'], hidden_channels_factor=layer_config['hidden_channels_factor'], activation=layer_config['activation'], residual=layer_config['residual'])
elif (layer_config['type'] == 'ode'):
assert (len(x_shape) == 1)
return FFJORDBijection(x_shape=x_shape, velocity_hidden_channels=layer_config['hidden_channels'], relative_tolerance=layer_config['numerical_tolerance'], absolute_tolerance=layer_config['numerical_tolerance'], num_u_channels=layer_config['num_u_channels'])
elif (layer_config['type'] == 'planar'):
assert (len(x_shape) == 1)
return PlanarBijection(num_input_channels=x_shape[0])
elif (layer_config['type'] == 'cond-planar'):
assert (len(x_shape) == 1)
return ConditionalPlanarBijection(num_input_channels=x_shape[0], num_u_channels=layer_config['num_u_channels'], cond_hidden_channels=layer_config['cond_hidden_channels'], cond_activation=get_activation(layer_config['cond_activation']))
elif (layer_config['type'] == 'resblock'):
return ResidualFlowBijection(x_shape=x_shape, lipschitz_net=get_lipschitz_net(input_shape=x_shape, num_output_channels=x_shape[0], config=layer_config['net']), reduce_memory=layer_config['reduce_memory'])
else:
assert False, f"Invalid layer type `{layer_config['type']}'"
|
def get_acl_bijection(config, x_shape):
num_x_channels = x_shape[0]
num_u_channels = config['num_u_channels']
if (config['mask_type'] == 'checkerboard'):
return Checkerboard2dAffineCouplingBijection(x_shape=x_shape, coupler=get_coupler(input_shape=((num_x_channels + num_u_channels), *x_shape[1:]), num_channels_per_output=num_x_channels, config=config['coupler']), reverse_mask=config['reverse_mask'])
else:
def coupler_factory(num_passthrough_channels):
return get_coupler(input_shape=((num_passthrough_channels + num_u_channels), *x_shape[1:]), num_channels_per_output=(num_x_channels - num_passthrough_channels), config=config['coupler'])
if (config['mask_type'] == 'alternating-channel'):
return AlternatingChannelwiseAffineCouplingBijection(x_shape=x_shape, coupler_factory=coupler_factory, reverse_mask=config['reverse_mask'])
elif (config['mask_type'] == 'split-channel'):
return SplitChannelwiseAffineCouplingBijection(x_shape=x_shape, coupler_factory=coupler_factory, reverse_mask=config['reverse_mask'])
else:
assert False, f"Invalid mask type {config['mask_type']}"
|
def get_conditional_density(num_u_channels, coupler_config, x_shape):
return DiagonalGaussianConditionalDensity(coupler=get_coupler(input_shape=x_shape, num_channels_per_output=num_u_channels, config=coupler_config))
|
def get_coupler(input_shape, num_channels_per_output, config):
if config['independent_nets']:
return get_coupler_with_independent_nets(input_shape=input_shape, num_channels_per_output=num_channels_per_output, shift_net_config=config['shift_net'], log_scale_net_config=config['log_scale_net'])
else:
return get_coupler_with_shared_net(input_shape=input_shape, num_channels_per_output=num_channels_per_output, net_config=config['shift_log_scale_net'])
|
def get_coupler_with_shared_net(input_shape, num_channels_per_output, net_config):
return ChunkedSharedCoupler(shift_log_scale_net=get_net(input_shape=input_shape, num_output_channels=(2 * num_channels_per_output), net_config=net_config))
|
def get_coupler_with_independent_nets(input_shape, num_channels_per_output, shift_net_config, log_scale_net_config):
return IndependentCoupler(shift_net=get_net(input_shape=input_shape, num_output_channels=num_channels_per_output, net_config=shift_net_config), log_scale_net=get_net(input_shape=input_shape, num_output_channels=num_channels_per_output, net_config=log_scale_net_config))
|
def get_net(input_shape, num_output_channels, net_config):
num_input_channels = input_shape[0]
if (net_config['type'] == 'mlp'):
assert (len(input_shape) == 1)
return get_mlp(num_input_channels=num_input_channels, hidden_channels=net_config['hidden_channels'], num_output_channels=num_output_channels, activation=get_activation(net_config['activation']))
elif (net_config['type'] == 'resnet'):
assert (len(input_shape) == 3)
return get_resnet(num_input_channels=num_input_channels, hidden_channels=net_config['hidden_channels'], num_output_channels=num_output_channels)
elif (net_config['type'] == 'glow-cnn'):
assert (len(input_shape) == 3)
return get_glow_cnn(num_input_channels=num_input_channels, num_hidden_channels=net_config['num_hidden_channels'], num_output_channels=num_output_channels, zero_init_output=net_config['zero_init_output'])
elif (net_config['type'] == 'constant'):
value = torch.full((num_output_channels, *input_shape[1:]), net_config['value'], dtype=torch.get_default_dtype())
return ConstantNetwork(value=value, fixed=net_config['fixed'])
elif (net_config['type'] == 'identity'):
assert (num_output_channels == num_input_channels)
return (lambda x: x)
else:
assert False, f"Invalid net type {net_config['type']}"
|
def get_activation(name):
if (name == 'tanh'):
return nn.Tanh
elif (name == 'relu'):
return nn.ReLU
else:
assert False, f'Invalid activation {name}'
|
def get_lipschitz_net(input_shape, num_output_channels, config):
if (config['type'] == 'cnn'):
return get_lipschitz_cnn(input_shape=input_shape, num_hidden_channels=config['num_hidden_channels'], num_output_channels=num_output_channels, lipschitz_constant=config['lipschitz_constant'], max_train_lipschitz_iters=config['max_train_lipschitz_iters'], max_eval_lipschitz_iters=config['max_test_lipschitz_iters'], lipschitz_tolerance=config['lipschitz_tolerance'])
elif (config['type'] == 'mlp'):
assert (len(input_shape) == 1)
return get_lipschitz_mlp(num_input_channels=input_shape[0], hidden_channels=config['hidden_channels'], num_output_channels=num_output_channels, lipschitz_constant=config['lipschitz_constant'], max_train_lipschitz_iters=config['max_train_lipschitz_iters'], max_eval_lipschitz_iters=config['max_test_lipschitz_iters'], lipschitz_tolerance=config['lipschitz_tolerance'])
else:
assert False, f"Invalid Lipschitz net type {config['net']}"
|
class AverageMetric(Metric):
_required_output_keys = ['metrics']
def reset(self):
self._sums = Counter()
self._num_examples = Counter()
def update(self, output):
(metrics,) = output
for (k, v) in metrics.items():
self._sums[k] += torch.sum(v)
self._num_examples[k] += torch.numel(v)
def compute(self):
return {k: (v / self._num_examples[k]) for (k, v) in self._sums.items()}
def completed(self, engine):
engine.state.metrics = {**engine.state.metrics, **self.compute()}
def attach(self, engine):
engine.add_event_handler(Events.EPOCH_STARTED, self.started)
engine.add_event_handler(Events.ITERATION_COMPLETED, self.iteration_completed)
engine.add_event_handler(Events.ITERATION_COMPLETED, self.completed)
|
class Trainer():
_STEPS_PER_LOSS_WRITE = 10
_STEPS_PER_GRAD_WRITE = 10
_STEPS_PER_LR_WRITE = 10
def __init__(self, module, device, train_metrics, train_loader, opts, lr_schedulers, max_epochs, max_grad_norm, test_metrics, test_loader, epochs_per_test, early_stopping, valid_loss, valid_loader, max_bad_valid_epochs, visualizer, writer, should_checkpoint_latest, should_checkpoint_best_valid, checkpoint_to_load):
self._module = module
self._device = device
self._train_metrics = train_metrics
self._train_loader = train_loader
self._opts = opts
self._lr_schedulers = lr_schedulers
self._max_epochs = max_epochs
self._max_grad_norm = max_grad_norm
self._test_metrics = test_metrics
self._test_loader = test_loader
self._epochs_per_test = epochs_per_test
self._valid_loss = valid_loss
self._valid_loader = valid_loader
self._max_bad_valid_epochs = max_bad_valid_epochs
self._best_valid_loss = float('inf')
self._num_bad_valid_epochs = 0
self._visualizer = visualizer
self._writer = writer
self._should_checkpoint_best_valid = should_checkpoint_best_valid
self._trainer = Engine(self._train_batch)
AverageMetric().attach(self._trainer)
ProgressBar(persist=True).attach(self._trainer, list(self._opts.keys()))
self._trainer.add_event_handler(Events.ITERATION_COMPLETED, TerminateOnNan())
self._trainer.add_event_handler(Events.ITERATION_COMPLETED, self._log_training_info)
if early_stopping:
self._validator = Engine(self._validate_batch)
AverageMetric().attach(self._validator)
ProgressBar(persist=False, desc='Validating').attach(self._validator)
self._trainer.add_event_handler(Events.EPOCH_COMPLETED, self._validate)
self._tester = Engine(self._test_batch)
AverageMetric().attach(self._tester)
ProgressBar(persist=False, desc='Testing').attach(self._tester)
self._trainer.add_event_handler(Events.EPOCH_COMPLETED, self._test_and_log)
if should_checkpoint_latest:
self._trainer.add_event_handler(Events.EPOCH_COMPLETED, (lambda _: self._save_checkpoint('latest')))
try:
self._load_checkpoint(checkpoint_to_load)
except FileNotFoundError:
print(f"Did not find `{checkpoint_to_load}' checkpoint.", file=sys.stderr)
def train(self):
self._trainer.run(data=self._train_loader, max_epochs=self._max_epochs)
def test(self):
self._module.eval()
return self._tester.run(data=self._test_loader).metrics
def _train_batch(self, engine, batch):
self._module.train()
(x, _) = batch
x = x.to(self._device)
for (param_name, opt) in self._opts.items():
self._set_requires_grad(param_name, True)
opt.zero_grad()
all_values = self._train_metrics(self._module, x)
for (param_name, loss) in all_values['losses'].items():
self._isolate_params(param_name)
loss.backward()
self._clip_grad_norms(param_name)
for (param_name, opt) in self._opts.items():
opt.step()
self._lr_schedulers[param_name].step()
return {'metrics': all_values['losses']}
def _isolate_params(self, param_name):
for other_param_name in self._opts:
self._set_requires_grad(other_param_name, False)
self._set_requires_grad(param_name, True)
def _set_requires_grad(self, param_name, requires_grad):
for param in self._iter_params(param_name):
param.requires_grad = requires_grad
def _clip_grad_norms(self, param_name):
if (self._max_grad_norm is not None):
for param in self._iter_params(param_name):
torch.nn.utils.clip_grad_norm_(param, self._max_grad_norm)
def _iter_params(self, param_name):
for group in self._opts[param_name].param_groups:
for param in group['params']:
(yield param)
@torch.no_grad()
def _test_and_log(self, engine):
epoch = engine.state.epoch
if (((epoch - 1) % self._epochs_per_test) == 0):
for (k, v) in self.test().items():
self._writer.write_scalar(f'test/{k}', v, global_step=engine.state.epoch)
if (not torch.isfinite(v)):
self._save_checkpoint(tag='nan_during_test')
self._visualizer.visualize(self._module, epoch)
def _test_batch(self, engine, batch):
(x, _) = batch
x = x.to(self._device)
return {'metrics': self._test_metrics(self._module, x)}
@torch.no_grad()
def _validate(self, engine):
self._module.eval()
state = self._validator.run(data=self._valid_loader)
valid_loss = state.metrics['loss']
if (valid_loss < self._best_valid_loss):
print(f'Best validation loss {valid_loss} after epoch {engine.state.epoch}')
self._num_bad_valid_epochs = 0
self._best_valid_loss = valid_loss
if self._should_checkpoint_best_valid:
self._save_checkpoint(tag='best_valid')
else:
if (not torch.isfinite(valid_loss)):
self._save_checkpoint(tag='nan_during_validation')
self._num_bad_valid_epochs += 1
if (self._num_bad_valid_epochs > self._max_bad_valid_epochs):
print(f'No validation improvement after {self._num_bad_valid_epochs} epochs. Terminating.')
self._trainer.terminate()
def _validate_batch(self, engine, batch):
(x, _) = batch
x = x.to(self._device)
return {'metrics': {'loss': self._valid_loss(self._module, x)}}
def _log_training_info(self, engine):
i = engine.state.iteration
if ((i % self._STEPS_PER_LOSS_WRITE) == 0):
for (k, v) in engine.state.output['metrics'].items():
self._writer.write_scalar(f'train/{k}', v, global_step=i)
if ((i % self._STEPS_PER_GRAD_WRITE) == 0):
for param_name in self._opts:
self._writer.write_scalar(f'train/grad-norm-{param_name}', self._get_grad_norm(param_name), global_step=i)
if ((i % self._STEPS_PER_LR_WRITE) == 0):
for param_name in self._opts:
self._writer.write_scalar(f'train/lr-{param_name}', self._get_lr(param_name), global_step=i)
def _get_grad_norm(self, param_name):
norm = 0
for param in self._iter_params(param_name):
if (param.grad is not None):
norm += (param.grad.norm().item() ** 2)
return np.sqrt(norm)
def _get_lr(self, param_name):
(param_group,) = self._opts[param_name].param_groups
return param_group['lr']
def _save_checkpoint(self, tag):
checkpoint = {'epoch': self._trainer.state.epoch, 'iteration': self._trainer.state.iteration, 'module_state_dict': self._module.state_dict(), 'opt_state_dicts': {param_name: opt.state_dict() for (param_name, opt) in self._opts.items()}, 'lr_scheduler_state_dicts': self._get_lr_scheduler_state_dicts(), 'best_valid_loss': self._best_valid_loss, 'num_bad_valid_epochs': self._num_bad_valid_epochs}
self._writer.write_checkpoint(tag, checkpoint)
def _get_lr_scheduler_state_dicts(self):
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message='Please also save or load the state of the optimizer when saving or loading the scheduler.')
return {param_name: lr_scheduler.state_dict() for (param_name, lr_scheduler) in self._lr_schedulers.items()}
def _load_checkpoint(self, tag):
checkpoint = self._writer.load_checkpoint(tag, device=self._device)
@self._trainer.on(Events.STARTED)
def resume_trainer_state(engine):
engine.state.epoch = checkpoint['epoch']
engine.state.iteration = checkpoint['iteration']
self._module.load_state_dict(checkpoint['module_state_dict'])
for (param_name, state_dict) in checkpoint['opt_state_dicts'].items():
self._opts[param_name].load_state_dict(state_dict)
for (param_name, state_dict) in checkpoint['lr_scheduler_state_dicts'].items():
self._lr_schedulers[param_name].load_state_dict(state_dict)
self._best_valid_loss = checkpoint['best_valid_loss']
self._num_bad_valid_epochs = checkpoint['num_bad_valid_epochs']
print(f"Loaded checkpoint `{tag}' after epoch {checkpoint['epoch']}", file=sys.stderr)
|
class Tee():
def __init__(self, primary_file, secondary_file):
self.primary_file = primary_file
self.secondary_file = secondary_file
self.encoding = self.primary_file.encoding
def isatty(self):
return self.primary_file.isatty()
def fileno(self):
return self.primary_file.fileno()
def write(self, data):
if isinstance(data, bytes):
data = data.decode()
self.primary_file.write(data)
self.secondary_file.write(data)
def flush(self):
self.primary_file.flush()
self.secondary_file.flush()
|
class Writer():
_STDOUT = sys.stdout
_STDERR = sys.stderr
def __init__(self, logdir, make_subdir, tag_group):
if make_subdir:
os.makedirs(logdir, exist_ok=True)
timestamp = f"{datetime.datetime.now().strftime('%b%d_%H-%M-%S')}"
logdir = os.path.join(logdir, timestamp)
self._writer = SummaryWriter(logdir=logdir)
assert (logdir == self._writer.logdir)
self._logdir = logdir
self._tag_group = tag_group
LINE_BUFFERING = 1
sys.stdout = Tee(primary_file=self._STDOUT, secondary_file=open(os.path.join(logdir, 'stdout'), 'a', buffering=LINE_BUFFERING))
sys.stderr = Tee(primary_file=self._STDERR, secondary_file=open(os.path.join(logdir, 'stderr'), 'a', buffering=LINE_BUFFERING))
def write_scalar(self, tag, scalar_value, global_step=None):
self._writer.add_scalar(self._tag(tag), scalar_value, global_step=global_step)
def write_image(self, tag, img_tensor, global_step=None):
self._writer.add_image(self._tag(tag), img_tensor, global_step=global_step)
def write_figure(self, tag, figure, global_step=None):
self._writer.add_figure(self._tag(tag), figure, global_step=global_step)
def write_hparams(self, hparam_dict=None, metric_dict=None):
self._writer.add_hparams(hparam_dict=hparam_dict, metric_dict=metric_dict)
def write_json(self, tag, data):
text = json.dumps(data, indent=4)
self._writer.add_text(self._tag(tag), ((4 * ' ') + text.replace('\n', ('\n' + (4 * ' ')))))
json_path = os.path.join(self._logdir, f'{tag}.json')
with open(json_path, 'w') as f:
f.write(text)
def write_textfile(self, tag, text):
path = os.path.join(self._logdir, f'{tag}.txt')
with open(path, 'w') as f:
f.write(text)
def write_checkpoint(self, tag, data):
os.makedirs(self._checkpoints_dir, exist_ok=True)
checkpoint_path = self._checkpoint_path(tag)
tmp_checkpoint_path = os.path.join(os.path.dirname(checkpoint_path), f'{os.path.basename(checkpoint_path)}.tmp')
torch.save(data, tmp_checkpoint_path)
os.replace(tmp_checkpoint_path, checkpoint_path)
def load_checkpoint(self, tag, device):
return torch.load(self._checkpoint_path(tag), map_location=device)
def _checkpoint_path(self, tag):
return os.path.join(self._checkpoints_dir, f'{tag}.pt')
@property
def _checkpoints_dir(self):
return os.path.join(self._logdir, 'checkpoints')
def _tag(self, tag):
return f'{self._tag_group}/{tag}'
|
class DummyWriter(Writer):
def __init__(self, logdir):
self._logdir = logdir
def write_scalar(self, tag, scalar_value, global_step=None):
pass
def write_image(self, tag, img_tensor, global_step=None):
pass
def write_figure(self, tag, figure, global_step=None):
pass
def write_hparams(self, hparam_dict=None, metric_dict=None):
pass
def write_json(self, tag, data):
pass
def write_textfile(self, tag, text):
pass
def write_checkpoint(self, tag, data):
pass
def load_checkpoint(self, tag, device):
if (self._logdir is None):
raise FileNotFoundError
else:
return super().load_checkpoint(tag, device)
|
def get_config_group(dataset):
for (group, group_data) in CONFIG_GROUPS.items():
if (dataset in group_data['datasets']):
return group
assert False, f"Dataset `{dataset}' not found"
|
def get_datasets():
result = []
for items in CONFIG_GROUPS.values():
result += items['datasets']
return result
|
def get_models():
result = []
for items in CONFIG_GROUPS.values():
result += list(items['model_configs'])
return result
|
def get_base_config(dataset, use_baseline):
return CONFIG_GROUPS[get_config_group(dataset)]['base_config'](dataset, use_baseline)
|
def get_model_config(dataset, model, use_baseline):
group = CONFIG_GROUPS[get_config_group(dataset)]
return group['model_configs'][model](dataset, model, use_baseline)
|
def get_config(dataset, model, use_baseline):
config = {**get_base_config(dataset, use_baseline), **get_model_config(dataset, model, use_baseline)}
if use_baseline:
for prefix in ['s', 't', 'st']:
config.pop(f'{prefix}_nets', None)
for prefix in ['p', 'q']:
for suffix in ['', '_mu', '_sigma']:
config.pop(f'{prefix}{suffix}_nets', None)
config = {**config, 'num_u_channels': 0, 'use_cond_affine': False, 'pure_cond_affine': False, 'num_valid_importance_samples': 1, 'num_test_importance_samples': 1}
assert ('model' not in config), "Should not specify `model' in config"
assert ('dataset' not in config), "Should not specify `dataset' in config"
return {'model': model, 'dataset': dataset, **config}
|
def expand_grid_generator(config):
if (not config):
(yield {})
return
items = list(config.items())
(first_key, first_val) = items[0]
rest = dict(items[1:])
for config in expand_grid_generator(rest):
if isinstance(first_val, GridParams):
for val in first_val:
(yield {first_key: val, **config})
elif isinstance(first_val, dict):
for sub_config in expand_grid_generator(first_val):
(yield {first_key: sub_config, **config})
else:
(yield {first_key: first_val, **config})
|
def expand_grid(config):
return list(expand_grid_generator(config))
|
def group(group, datasets):
global CURRENT_CONFIG_GROUP
assert (group not in CONFIG_GROUPS), f"Already exists group `{group}'"
for dataset in datasets:
for group_data in CONFIG_GROUPS.values():
assert (dataset not in group_data['datasets']), f"Dataset `{dataset}' already registered in group `{group}'"
CONFIG_GROUPS[group] = {'datasets': datasets, 'base_config': None, 'model_configs': {}}
CURRENT_CONFIG_GROUP = group
|
def base(f):
assert (CONFIG_GROUPS[CURRENT_CONFIG_GROUP]['base_config'] is None), 'Already exists a base config'
CONFIG_GROUPS[CURRENT_CONFIG_GROUP]['base_config'] = f
return f
|
def provides(*models):
def store_and_return(f):
assert (CURRENT_CONFIG_GROUP is not None), 'Must register a config group first'
for m in models:
assert (m not in CONFIG_GROUPS[CURRENT_CONFIG_GROUP]['model_configs']), f"Already exists model `{m}' in group `{CURRENT_CONFIG_GROUP}'"
CONFIG_GROUPS[CURRENT_CONFIG_GROUP]['model_configs'][m] = f
return f
return store_and_return
|
class GridParams():
def __init__(self, *values):
self.values = values
def __iter__(self):
return iter(self.values)
def __repr__(self):
return f"{self.__class__.__name__}({', '.join((str(v) for v in self.values))})"
|
@base
def config(dataset, use_baseline):
assert (not use_baseline), 'Cannot use baseline model for this config'
return {'pure_cond_affine': False, 'dequantize': False, 'batch_norm': False, 'act_norm': False, 'max_epochs': 2000, 'max_grad_norm': None, 'early_stopping': True, 'max_bad_valid_epochs': 50, 'train_batch_size': 1000, 'valid_batch_size': 1000, 'test_batch_size': 10000, 'opt': 'adam', 'lr': 0.01, 'lr_schedule': 'none', 'weight_decay': 0.0, 'epochs_per_test': 5, 'train_objective': 'iwae', 'num_train_importance_samples': 10, 'num_valid_importance_samples': 10, 'num_test_importance_samples': 100}
|
@provides('vae')
def vae(dataset, model, use_baseline):
return {'schema_type': 'gaussian-vae', 'use_cond_affine': False, 'num_z_channels': 1, 'p_mu_nets': [], 'p_sigma_nets': 'learned-constant', 'q_nets': [10, 10]}
|
@base
def config(dataset, use_baseline):
return {'num_u_channels': 1, 'use_cond_affine': True, 'pure_cond_affine': False, 'dequantize': True, 'act_norm': False, 'batch_norm': True, 'batch_norm_apply_affine': use_baseline, 'batch_norm_use_running_averages': True, 'batch_norm_momentum': 0.1, 'lr_schedule': 'none', 'max_bad_valid_epochs': 50, 'max_grad_norm': None, 'max_epochs': 1000, 'epochs_per_test': 1, 'early_stopping': True, 'train_objective': 'iwae', 'num_train_importance_samples': 1, 'num_valid_importance_samples': 5, 'num_test_importance_samples': 10}
|
@provides('bernoulli-vae')
def bernoulli_vae(dataset, model, use_baseline):
assert (not use_baseline), 'Cannot use baseline model for this config'
return {'schema_type': 'bernoulli-vae', 'dequantize': False, 'binarize_scale': 255, 'logit_net': ([200] * 2), 'q_nets': ([200] * 2), 'num_z_channels': 50, 'train_batch_size': 100, 'valid_batch_size': 500, 'test_batch_size': 500, 'opt': 'adam', 'lr': 0.0001, 'weight_decay': 0.0}
|
@provides('realnvp')
def realnvp(dataset, model, use_baseline):
config = {'schema_type': 'multiscale-realnvp', 'g_hidden_channels': (([64] * 8) if use_baseline else ([64] * 4)), 'st_nets': ([8] * 2), 'p_nets': ([64] * 2), 'q_nets': ([64] * 2), 'train_batch_size': 100, 'valid_batch_size': 500, 'test_batch_size': 500, 'opt': 'adam', 'lr': 0.0001, 'weight_decay': 0.0}
if (dataset in ['cifar10', 'svhn']):
config['logit_tf_lambda'] = 0.05
config['logit_tf_scale'] = 256
elif (dataset in ['mnist', 'fashion-mnist']):
config['logit_tf_lambda'] = 1e-06
config['logit_tf_scale'] = 256
return config
|
@provides('glow')
def glow(dataset, model, use_baseline):
assert (dataset in ['cifar10', 'svhn']), 'Currently only implemented for images of size 3x32x32'
warnings.warn('Glow may quickly diverge for certain random seeds - if this happens just retry. This behaviour appears to be consistent with that in https://github.com/openai/glow and https://github.com/y0ast/Glow-PyTorch')
if use_baseline:
config = {'num_scales': 3, 'num_steps_per_scale': 32, 'g_num_hidden_channels': 512, 'valid_batch_size': 500, 'test_batch_size': 500}
else:
config = {'num_scales': 2, 'num_steps_per_scale': 32, 'g_num_hidden_channels': 256, 'st_nets': 64, 'p_nets': 128, 'q_nets': 128, 'valid_batch_size': 100, 'test_batch_size': 100}
config['schema_type'] = 'glow'
config['early_stopping'] = False
config['train_batch_size'] = 64
config['opt'] = 'adamax'
config['lr'] = 0.0005
if (dataset in ['cifar10']):
config['weight_decay'] = 0.1
else:
config['weight_decay'] = 0.0
config['centering_tf_scale'] = 256
return config
|
@provides('resflow-small')
def resflow(dataset, model, use_baseline):
logit_tf_lambda = {'mnist': 1e-06, 'fashion-mnist': 1e-06, 'cifar10': 0.05, 'svhn': 0.05}[dataset]
return {'schema_type': 'multiscale-resflow', 'train_batch_size': 64, 'valid_batch_size': 128, 'test_batch_size': 128, 'epochs_per_test': 5, 'opt': 'adam', 'lr': 0.001, 'weight_decay': 0.0, 'logit_tf_lambda': logit_tf_lambda, 'logit_tf_scale': 256, 'batch_norm': False, 'act_norm': True, 'reduce_memory': True, 'scales': ([4] * 3), 'num_hidden_channels': 128, 'lipschitz_constant': 0.98, 'max_train_lipschitz_iters': None, 'max_test_lipschitz_iters': None, 'lipschitz_tolerance': 0.001, 'num_output_fc_blocks': 4, 'output_fc_hidden_channels': ([64] * 2), 'st_nets': ([32] * 2), 'p_nets': ([32] * 2), 'q_nets': ([32] * 2)}
|
def get_schema(config):
schema = get_base_schema(config=config)
if config['pure_cond_affine']:
assert config['use_cond_affine']
schema = remove_non_normalise_layers(schema=schema)
if config['use_cond_affine']:
assert (config['num_u_channels'] > 0)
schema = add_cond_affine_before_each_normalise(schema=schema, config=config)
schema = apply_pq_coupler_config_settings(schema=schema, config=config)
schema = (get_preproc_schema(config=config) + schema)
assert (not (config['batch_norm'] and config['act_norm']))
if config['batch_norm']:
schema = replace_normalise_with_batch_norm(schema=schema, config=config)
elif config['act_norm']:
schema = replace_normalise_with_act_norm(schema=schema)
else:
schema = remove_normalise_layers(schema=schema)
return schema
|
def get_preproc_schema(config):
if config['dequantize']:
schema = [{'type': 'dequantization'}]
else:
schema = []
if (config.get('binarize_scale') is not None):
schema += get_binarize_schema(config['binarize_scale'])
if ((config.get('logit_tf_lambda') is not None) and (config.get('logit_tf_scale') is not None)):
assert (config.get('centering_tf_scale') is None)
schema += get_logit_tf_schema(lam=config['logit_tf_lambda'], scale=config['logit_tf_scale'])
elif (config.get('centering_tf_scale') is not None):
assert (config.get('logit_tf_lambda') is None)
assert (config.get('logit_tf_scale') is None)
schema += get_centering_tf_schema(scale=config['centering_tf_scale'])
return schema
|
def get_base_schema(config):
ty = config['schema_type']
if (ty == 'multiscale-realnvp'):
return get_multiscale_realnvp_schema(coupler_hidden_channels=config['g_hidden_channels'])
elif (ty == 'flat-realnvp'):
return get_flat_realnvp_schema(config=config)
elif (ty == 'maf'):
return get_maf_schema(num_density_layers=config['num_density_layers'], hidden_channels=config['ar_map_hidden_channels'])
elif (ty == 'sos'):
return get_sos_schema(num_density_layers=config['num_density_layers'], hidden_channels=config['g_hidden_channels'], num_polynomials_per_layer=config['num_polynomials_per_layer'], polynomial_degree=config['polynomial_degree'])
elif (ty == 'nsf'):
return get_nsf_schema(config=config)
elif (ty == 'bnaf'):
return get_bnaf_schema(num_density_layers=config['num_density_layers'], num_hidden_layers=config['num_hidden_layers'], activation=config['activation'], hidden_channels_factor=config['hidden_channels_factor'])
elif (ty == 'glow'):
return get_glow_schema(num_scales=config['num_scales'], num_steps_per_scale=config['num_steps_per_scale'], coupler_num_hidden_channels=config['g_num_hidden_channels'], lu_decomposition=True)
elif (ty == 'ffjord'):
return get_ffjord_schema(num_density_layers=config['num_density_layers'], velocity_hidden_channels=config['hidden_channels'], numerical_tolerance=config['numerical_tolerance'], num_u_channels=config['num_u_channels'])
elif (ty == 'planar'):
return get_planar_schema(config=config)
elif (ty == 'cond-affine'):
return get_cond_affine_schema(config=config)
elif (ty == 'affine'):
return get_affine_schema(config=config)
elif (ty == 'flat-resflow'):
return get_flat_resflow_schema(config=config)
elif (ty == 'multiscale-resflow'):
return get_multiscale_resflow_schema(config=config)
elif (ty == 'bernoulli-vae'):
return get_bernoulli_vae_schema(config=config)
elif (ty == 'gaussian-vae'):
return get_gaussian_vae_schema(config=config)
else:
assert False, f"Invalid schema type `{ty}'"
|
def remove_non_normalise_layers(schema):
return [layer for layer in schema if (layer['type'] == 'normalise')]
|
def remove_normalise_layers(schema):
return [layer for layer in schema if (layer['type'] != 'normalise')]
|
def replace_normalise_with_batch_norm(schema, config):
if config['batch_norm_use_running_averages']:
new_schema = []
momentum = config['batch_norm_momentum']
else:
new_schema = [{'type': 'passthrough-before-eval', 'num_passthrough_data_points': 100000}]
momentum = 1.0
apply_affine = config['batch_norm_apply_affine']
for layer in schema:
if (layer['type'] == 'normalise'):
new_schema.append({'type': 'batch-norm', 'per_channel': True, 'momentum': momentum, 'apply_affine': config['batch_norm_apply_affine']})
else:
new_schema.append(layer)
return new_schema
|
def replace_normalise_with_act_norm(schema):
new_schema = []
for layer in schema:
if (layer['type'] == 'normalise'):
new_schema.append({'type': 'act-norm'})
else:
new_schema.append(layer)
return new_schema
|
def add_cond_affine_before_each_normalise(schema, config):
new_schema = []
flattened = False
for layer in schema:
if (layer['type'] == 'flatten'):
flattened = True
elif (layer['type'] == 'normalise'):
new_schema.append(get_cond_affine_layer(config, flattened))
new_schema.append(layer)
return new_schema
|
def apply_pq_coupler_config_settings(schema, config):
new_schema = []
flattened = False
for layer in schema:
if (layer['type'] == 'flatten'):
flattened = True
if (layer.get('num_u_channels', 0) > 0):
layer = {**layer, 'p_coupler': get_p_coupler_config(config, flattened), 'q_coupler': get_q_coupler_config(config, flattened)}
new_schema.append(layer)
return new_schema
|
def get_binarize_schema(scale):
return [{'type': 'binarize', 'scale': scale}]
|
def get_logit_tf_schema(lam, scale):
return [{'type': 'scalar-mult', 'value': ((1 - (2 * lam)) / scale)}, {'type': 'scalar-add', 'value': lam}, {'type': 'logit'}]
|
def get_centering_tf_schema(scale):
return [{'type': 'scalar-mult', 'value': (1 / scale)}, {'type': 'scalar-add', 'value': (- 0.5)}]
|
def get_cond_affine_layer(config, flattened):
return {'type': 'cond-affine', 'num_u_channels': config['num_u_channels'], 'st_coupler': get_st_coupler_config(config, flattened)}
|
def get_st_coupler_config(config, flattened):
return get_coupler_config('t', 's', 'st', config, flattened)
|
def get_p_coupler_config(config, flattened):
return get_coupler_config('p_mu', 'p_sigma', 'p', config, flattened)
|
def get_q_coupler_config(config, flattened):
return get_coupler_config('q_mu', 'q_sigma', 'q', config, flattened)
|
def get_coupler_config(shift_prefix, log_scale_prefix, shift_log_scale_prefix, config, flattened):
shift_key = f'{shift_prefix}_nets'
log_scale_key = f'{log_scale_prefix}_nets'
shift_log_scale_key = f'{shift_log_scale_prefix}_nets'
if ((shift_key in config) and (log_scale_key in config)):
assert (shift_log_scale_key not in config), 'Over-specified coupler config'
return {'independent_nets': True, 'shift_net': get_coupler_net_config(config[shift_key], flattened), 'log_scale_net': get_coupler_net_config(config[log_scale_key], flattened)}
elif (shift_log_scale_key in config):
assert ((shift_key not in config) and (log_scale_key not in config)), 'Over-specified coupler config'
return {'independent_nets': False, 'shift_log_scale_net': get_coupler_net_config(config[shift_log_scale_key], flattened)}
else:
assert False, f"Must specify either `{shift_log_scale_key}', or both `{shift_key}' and `{log_scale_key}'"
|
def get_coupler_net_config(net_spec, flattened):
if (net_spec in ['fixed-constant', 'learned-constant']):
return {'type': 'constant', 'value': 0, 'fixed': (net_spec == 'fixed-constant')}
elif (net_spec == 'identity'):
return {'type': 'identity'}
elif isinstance(net_spec, list):
if flattened:
return {'type': 'mlp', 'activation': 'tanh', 'hidden_channels': net_spec}
else:
return {'type': 'resnet', 'hidden_channels': net_spec}
elif isinstance(net_spec, int):
if flattened:
return {'type': 'mlp', 'activation': 'tanh', 'hidden_channels': ([net_spec] * 2)}
else:
return {'type': 'glow-cnn', 'num_hidden_channels': net_spec, 'zero_init_output': True}
else:
assert False, f'Invalid net specifier {net_spec}'
|
def get_multiscale_realnvp_schema(coupler_hidden_channels):
base_schema = [{'type': 'acl', 'mask_type': 'checkerboard', 'reverse_mask': False}, {'type': 'acl', 'mask_type': 'checkerboard', 'reverse_mask': True}, {'type': 'acl', 'mask_type': 'checkerboard', 'reverse_mask': False}, {'type': 'squeeze', 'factor': 2}, {'type': 'acl', 'mask_type': 'split-channel', 'reverse_mask': True}, {'type': 'acl', 'mask_type': 'split-channel', 'reverse_mask': False}, {'type': 'acl', 'mask_type': 'split-channel', 'reverse_mask': True}, {'type': 'split'}, {'type': 'acl', 'mask_type': 'checkerboard', 'reverse_mask': False}, {'type': 'acl', 'mask_type': 'checkerboard', 'reverse_mask': True}, {'type': 'acl', 'mask_type': 'checkerboard', 'reverse_mask': False}, {'type': 'acl', 'mask_type': 'checkerboard', 'reverse_mask': True}]
schema = []
for layer in base_schema:
if (layer['type'] == 'acl'):
schema += [{**layer, 'num_u_channels': 0, 'coupler': {'independent_nets': False, 'shift_log_scale_net': {'type': 'resnet', 'hidden_channels': coupler_hidden_channels}}}, {'type': 'normalise'}]
else:
schema.append(layer)
return schema
|
def get_glow_schema(num_scales, num_steps_per_scale, coupler_num_hidden_channels, lu_decomposition):
schema = []
for i in range(num_scales):
if (i > 0):
schema.append({'type': 'split'})
schema.append({'type': 'squeeze', 'factor': 2})
for _ in range(num_steps_per_scale):
schema += [{'type': 'normalise'}, {'type': 'invconv', 'lu': lu_decomposition}, {'type': 'acl', 'mask_type': 'split-channel', 'reverse_mask': False, 'coupler': {'independent_nets': False, 'shift_log_scale_net': {'type': 'glow-cnn', 'num_hidden_channels': coupler_num_hidden_channels, 'zero_init_output': True}}, 'num_u_channels': 0}]
return schema
|
def get_flat_realnvp_schema(config):
result = [{'type': 'flatten'}]
if config['coupler_shared_nets']:
coupler_config = {'independent_nets': False, 'shift_log_scale_net': {'type': 'mlp', 'hidden_channels': config['coupler_hidden_channels'], 'activation': 'tanh'}}
else:
coupler_config = {'independent_nets': True, 'shift_net': {'type': 'mlp', 'hidden_channels': config['coupler_hidden_channels'], 'activation': 'relu'}, 'log_scale_net': {'type': 'mlp', 'hidden_channels': config['coupler_hidden_channels'], 'activation': 'tanh'}}
for i in range(config['num_density_layers']):
result += [{'type': 'acl', 'mask_type': 'alternating-channel', 'reverse_mask': ((i % 2) != 0), 'coupler': coupler_config, 'num_u_channels': 0}, {'type': 'normalise'}]
return result
|
def get_maf_schema(num_density_layers, hidden_channels):
result = [{'type': 'flatten'}]
for i in range(num_density_layers):
if (i > 0):
result.append({'type': 'flip'})
result += [{'type': 'made', 'hidden_channels': hidden_channels, 'activation': 'tanh'}, {'type': 'normalise'}]
return result
|
def get_sos_schema(num_density_layers, hidden_channels, num_polynomials_per_layer, polynomial_degree):
result = [{'type': 'flatten'}]
for i in range(num_density_layers):
if (i > 0):
result.append({'type': 'flip'})
result += [{'type': 'sos', 'hidden_channels': hidden_channels, 'activation': 'tanh', 'num_polynomials': num_polynomials_per_layer, 'polynomial_degree': polynomial_degree}, {'type': 'normalise'}]
return result
|
def get_nsf_schema(config):
result = [{'type': 'flatten'}]
for i in range(config['num_density_layers']):
if (('use_linear' in config) and (not config['use_linear'])):
result += [{'type': 'rand-channel-perm'}]
else:
result += [{'type': 'rand-channel-perm'}, {'type': 'linear'}]
layer = {'type': ('nsf-ar' if config['autoregressive'] else 'nsf-c'), 'num_hidden_channels': config['num_hidden_channels'], 'num_hidden_layers': config['num_hidden_layers'], 'num_bins': config['num_bins'], 'tail_bound': config['tail_bound'], 'activation': 'relu', 'dropout_probability': config['dropout_probability']}
if (not config['autoregressive']):
layer['reverse_mask'] = ((i % 2) == 0)
result.append(layer)
result.append({'type': 'normalise'})
if (('use_linear' in config) and (not config['use_linear'])):
result += [{'type': 'rand-channel-perm'}]
else:
result += [{'type': 'rand-channel-perm'}, {'type': 'linear'}]
return result
|
def get_bnaf_schema(num_density_layers, num_hidden_layers, activation, hidden_channels_factor):
result = [{'type': 'flatten'}]
for i in range(num_density_layers):
if (i > 0):
result.append({'type': 'flip'})
result += [{'type': 'bnaf', 'num_hidden_layers': num_hidden_layers, 'hidden_channels_factor': hidden_channels_factor, 'activation': activation, 'residual': (i < (num_density_layers - 1))}, {'type': 'normalise'}]
return result
|
def get_ffjord_schema(num_density_layers, velocity_hidden_channels, numerical_tolerance, num_u_channels):
result = [{'type': 'flatten'}]
for i in range(num_density_layers):
result += [{'type': 'ode', 'hidden_channels': velocity_hidden_channels, 'numerical_tolerance': numerical_tolerance, 'num_u_channels': num_u_channels}]
return result
|
def get_planar_schema(config):
if (config['num_u_channels'] == 0):
layer = {'type': 'planar'}
else:
layer = {'type': 'cond-planar', 'num_u_channels': config['num_u_channels'], 'cond_hidden_channels': config['cond_hidden_channels'], 'cond_activation': 'tanh'}
result = ([layer, {'type': 'normalise'}] * config['num_density_layers'])
return ([{'type': 'flatten'}] + result)
|
def get_cond_affine_schema(config):
return ([{'type': 'flatten'}] + ([{'type': 'normalise'}] * config['num_density_layers']))
|
def get_affine_schema(config):
return ([{'type': 'flatten'}] + ([{'type': 'affine', 'per_channel': False}] * config['num_density_layers']))
|
def get_flat_resflow_schema(config):
result = [{'type': 'flatten'}]
for _ in range(config['num_density_layers']):
result += [{'type': 'resblock', 'net': {'type': 'mlp', 'hidden_channels': config['hidden_channels']}}, {'type': 'normalise'}]
add_lipschitz_config_to_resblocks(result, config)
return result
|
def get_multiscale_resflow_schema(config):
result = []
for (i, num_blocks) in enumerate(config['scales']):
if (i == 0):
result.append({'type': 'normalise'})
else:
result.append({'type': 'squeeze', 'factor': 2})
for j in range(num_blocks):
result += [{'type': 'resblock', 'net': {'type': 'cnn', 'num_hidden_channels': config['num_hidden_channels']}}, {'type': 'normalise'}]
result.append({'type': 'flatten'})
for _ in range(config['num_output_fc_blocks']):
result += [{'type': 'resblock', 'net': {'type': 'mlp', 'hidden_channels': config['output_fc_hidden_channels']}}, {'type': 'normalise'}]
add_lipschitz_config_to_resblocks(result, config)
return result
|
def add_lipschitz_config_to_resblocks(schema, config):
net_keys_to_copy = ['lipschitz_constant', 'max_train_lipschitz_iters', 'max_test_lipschitz_iters', 'lipschitz_tolerance']
for layer in schema:
if (layer['type'] == 'resblock'):
for key in net_keys_to_copy:
layer['net'][key] = config[key]
layer['reduce_memory'] = config['reduce_memory']
|
def get_bernoulli_vae_schema(config):
return [{'type': 'flatten'}, {'type': 'bernoulli-likelihood', 'num_z_channels': config['num_z_channels'], 'logit_net': {'type': 'mlp', 'activation': 'tanh', 'hidden_channels': config['logit_net']}, 'q_coupler': get_q_coupler_config(config, flattened=True)}]
|
def get_gaussian_vae_schema(config):
return [{'type': 'flatten'}, {'type': 'gaussian-likelihood', 'num_z_channels': config['num_z_channels'], 'p_coupler': get_p_coupler_config(config, flattened=True), 'q_coupler': get_q_coupler_config(config, flattened=True)}]
|
@base
def config(dataset, use_baseline):
num_u_channels = {'gas': 2, 'power': 2, 'hepmass': 5, 'miniboone': 10, 'bsds300': 15}[dataset]
return {'num_u_channels': num_u_channels, 'use_cond_affine': True, 'pure_cond_affine': False, 'dequantize': False, 'act_norm': False, 'batch_norm': True, 'batch_norm_apply_affine': use_baseline, 'batch_norm_use_running_averages': False, 'early_stopping': True, 'train_batch_size': 1000, 'valid_batch_size': 5000, 'test_batch_size': 5000, 'opt': 'adam', 'lr': 0.001, 'lr_schedule': 'none', 'weight_decay': 0.0, 'max_bad_valid_epochs': 50, 'max_epochs': 2000, 'max_grad_norm': None, 'epochs_per_test': 5, 'train_objective': 'iwae', 'num_train_importance_samples': 1, 'num_valid_importance_samples': 5, 'num_test_importance_samples': 10}
|
@provides('resflow')
def resflow(dataset, model, use_baseline):
config = {'schema_type': 'flat-resflow', 'num_density_layers': 10, 'hidden_channels': ([128] * 4), 'lipschitz_constant': 0.9, 'max_train_lipschitz_iters': 5, 'max_test_lipschitz_iters': 200, 'lipschitz_tolerance': None, 'reduce_memory': False, 'act_norm': False, 'batch_norm': False, 'st_nets': ([10] * 2), 'p_nets': ([10] * 2), 'q_nets': ([10] * 2)}
if (not use_baseline):
config['valid_batch_size'] = 1000
config['test_batch_size'] = 1000
return config
|
@provides('cond-affine')
def cond_affine(dataset, model, use_baseline):
assert (not use_baseline), 'Cannot use baseline model for this config'
return {'schema_type': 'cond-affine', 'num_density_layers': 10, 'batch_norm': False, 'st_nets': ([128] * 2), 'p_nets': ([128] * 2), 'q_nets': GridParams(([10] * 2), ([100] * 4))}
|
@provides('linear-cond-affine-like-resflow')
def linear_cond_affine_like_resflow(dataset, model, use_baseline):
assert (not use_baseline), 'Cannot use baseline model for this config'
assert (dataset != 'bsds300'), 'BSDS300 has not yet been tested'
num_u_channels = {'miniboone': 43, 'hepmass': 21, 'gas': 8, 'power': 6}[dataset]
config = {'schema_type': 'cond-affine', 'num_density_layers': 10, 'num_u_channels': num_u_channels, 'batch_norm': False, 's_nets': 'fixed-constant', 't_nets': 'identity', 'p_nets': ([128] * 4), 'q_nets': GridParams(([10] * 2), ([100] * 4)), 'valid_batch_size': 1000, 'test_batch_size': 1000}
return config
|
@provides('nonlinear-cond-affine-like-resflow')
def nonlinear_cond_affine_like_resflow(dataset, model, use_baseline):
assert (not use_baseline), 'Cannot use baseline model for this config'
assert (dataset != 'bsds300'), 'BSDS300 has not yet been tested'
num_u_channels = {'miniboone': 43, 'hepmass': 21, 'gas': 8, 'power': 6}[dataset]
config = {'schema_type': 'cond-affine', 'num_density_layers': 10, 'num_u_channels': num_u_channels, 'batch_norm': False, 's_nets': 'fixed-constant', 't_nets': ([128] * 2), 'p_nets': ([128] * 2), 'q_nets': GridParams(([10] * 2), ([100] * 4)), 'valid_batch_size': 1000, 'test_batch_size': 1000}
return config
|
@provides('maf')
def maf(dataset, model, use_baseline):
if (dataset in ['gas', 'power']):
config = {'num_density_layers': 10, 'ar_map_hidden_channels': (([200] * 2) if use_baseline else ([100] * 2)), 'st_nets': ([100] * 2), 'p_nets': ([200] * 2), 'q_nets': ([200] * 2)}
elif (dataset in ['hepmass', 'miniboone', 'bsds300']):
config = {'num_density_layers': 10, 'ar_map_hidden_channels': ([512] * 2), 'st_nets': ([128] * 2), 'p_nets': ([128] * 2), 'q_nets': ([128] * 2)}
config['schema_type'] = 'maf'
config['batch_norm'] = use_baseline
if (dataset == 'bsds300'):
config['lr'] = 0.0001
return config
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.