repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
DDoS | DDoS-master/models/srVAE/srVAE.py | from functools import partial
import numpy as np
import torch
import torch.nn as nn
from torchvision import transforms
from .backbone.densenet16x32 import *
from .priors.realnvp import RealNVP
# --------- Utility functions ---------
def get_shape(z_dim):
""" Given the dimentionality of the latent space,
re-shape it to an appropriate 3-D tensor.
"""
d = 8
if (z_dim%d==0) and (z_dim // (d*d) > 0): # cx8x8
H = W = d
C = z_dim // (d*d)
return (C, H, W)
raise "Latent space can not mapped to a 3-D tensor. \
Please choose another dimentionality (power of 2)."
# ----- Two Staged VAE -----
class srVAE(nn.Module):
"""
Super-Resolution Variational Auto-Encoder (srVAE).
A Two Staged Visual Processing Variational AutoEncoder.
Author:
Ioannis Gatopoulos.
"""
def __init__(self, x_shape, y_shape=(3, 16, 16), u_dim=args.u_dim, z_dim=args.z_dim, prior=args.prior, device="cuda"):
super().__init__()
self.device = device
self.x_shape = x_shape
self.y_shape = (x_shape[0], y_shape[1], y_shape[2])
self.u_shape = get_shape(u_dim)
self.z_shape = get_shape(z_dim)
# q(y|x): deterministic "compressed" transformation
self.compressed_transform = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize((self.y_shape[1], self.y_shape[2])),
transforms.ToTensor()
])
# p(u)
self.p_u = RealNVP(self.u_shape)
# q(u | y)
self.q_u = q_u(self.u_shape, self.y_shape)
# p(z | y)
self.p_z = p_z(self.z_shape, (self.y_shape, self.u_shape))
# q(z | x)
self.q_z = q_z(self.z_shape, self.x_shape)
# p(y | u)
self.p_y = p_y(self.y_shape, self.u_shape)
# p(x | y, z)
self.p_x = p_x(self.x_shape, (self.y_shape, self.z_shape))
# likelihood distribution
self.recon_loss = partial(dmol_loss)
self.sample_distribution = partial(sample_from_dmol)
def compressed_transoformation(self, input):
y = []
for x in input:
y.append(self.compressed_transform(x.cpu()))
return torch.stack(y).to(self.device)
def initialize(self, dataloader):
""" Data dependent init for weight normalization
(Automatically done during the first forward pass).
"""
with torch.no_grad():
x, _ = next(iter(dataloader))
x = x.to(self.device)
output = self.forward(x)
self.calculate_elbo(x, output)
return
@staticmethod
def reparameterize(z_mean, z_log_var):
""" z ~ N(z| z_mu, z_logvar) """
epsilon = torch.randn_like(z_mean)
return z_mean + torch.exp(0.5*z_log_var)*epsilon
@torch.no_grad()
def generate(self, n_samples=20):
# u ~ p(u)
u = self.p_u.sample(self.u_shape, n_samples=n_samples, device=self.device).to(self.device)
# p(y|u)
y_logits = self.p_y(u)
y_hat = self.sample_distribution(y_logits, nc=self.y_shape[0])
# z ~ p(z|y, u)
z_p_mean, z_p_logvar = self.p_z((y_hat, u))
z_p = self.reparameterize(z_p_mean, z_p_logvar)
# x ~ p(x|y,z)
x_logits = self.p_x((y_hat, z_p))
x_hat = self.sample_distribution(x_logits, nc=self.x_shape[0])
return x_hat, y_hat
@torch.no_grad()
def reconstruct(self, x, **kwargs):
outputs = self.forward(x)
y_hat = self.sample_distribution(outputs.get('y_logits'), nc=self.y_shape[0])
x_hat = self.sample_distribution(outputs.get('x_logits'), nc=self.x_shape[0])
return outputs.get('y'), y_hat, x_hat
@torch.no_grad()
def super_resolution(self, y):
# u ~ q(u| y)
u_q_mean, u_q_logvar = self.q_u(y)
u_q = self.reparameterize(u_q_mean, u_q_logvar)
# z ~ p(z|y)
z_p_mean, z_p_logvar = self.p_z((y, u_q))
z_p = self.reparameterize(z_p_mean, z_p_logvar)
# x ~ p(x|y,z)
x_logits = self.p_x((y, z_p))
x_hat = self.sample_distribution(x_logits)
return x_hat
def calculate_elbo(self, x, outputs, **kwargs):
# unpack variables
y, x_logits, y_logits = outputs.get('y'), outputs.get('x_logits'), outputs.get('y_logits')
u_q, u_q_mean, u_q_logvar = outputs.get('u_q'), outputs.get('u_q_mean'), outputs.get('u_q_logvar')
z_q, z_q_mean, z_q_logvar = outputs.get('z_q'), outputs.get('z_q_mean'), outputs.get('z_q_logvar')
z_p_mean, z_p_logvar = outputs.get('z_p_mean'), outputs.get('z_p_logvar')
# Reconstraction loss
RE_x = self.recon_loss(x, x_logits, nc=self.x_shape[0])
RE_y = self.recon_loss(y, y_logits, nc=self.y_shape[0])
# Regularization loss
log_p_u = self.p_u.log_p(u_q, dim=1)
log_q_u = log_normal_diag(u_q, u_q_mean, u_q_logvar)
KL_u = log_q_u - log_p_u
log_p_z = log_normal_diag(z_q, z_p_mean, z_p_logvar)
log_q_z = log_normal_diag(z_q, z_q_mean, z_q_logvar)
KL_z = log_q_z - log_p_z
# Total lower bound loss
nelbo = - (RE_x + RE_y - KL_u - KL_z).mean()
diagnostics = {
"bpd" : (nelbo.item()) / (np.prod(x.shape[1:]) * np.log(2.)),
"nelbo" : nelbo.item(),
"RE" : - (RE_x + RE_y).mean().item(),
"RE_x" : - RE_x.mean().item(),
"RE_y" : - RE_y.mean().item(),
"KL" : (KL_z + KL_u).mean().item(),
"KL_u" : KL_u.mean().item(),
"KL_z" : KL_z.mean().item(),
}
return nelbo, diagnostics
def forward(self, x, **kwargs):
""" Forward pass through the inference and the generative model. """
# y ~ f(x) (determinist)
y = self.compressed_transoformation(x)
# u ~ q(u| y)
u_q_mean, u_q_logvar = self.q_u(y)
u_q = self.reparameterize(u_q_mean, u_q_logvar)
# z ~ q(z| x, y)
z_q_mean, z_q_logvar = self.q_z(x)
z_q = self.reparameterize(z_q_mean, z_q_logvar)
# x ~ p(x| y, z)
x_logits = self.p_x((y, z_q))
# y ~ p(y| u)
y_logits = self.p_y(u_q)
# z ~ p(z| x)
z_p_mean, z_p_logvar = self.p_z((y, u_q))
return {
'u_q_mean' : u_q_mean,
'u_q_logvar' : u_q_logvar,
'u_q' : u_q,
'z_q_mean' : z_q_mean,
'z_q_logvar' : z_q_logvar,
'z_q' : z_q,
'z_p_mean' : z_p_mean,
'z_p_logvar' : z_p_logvar,
'y' : y,
'y_logits' : y_logits,
'x_logits' : x_logits
}
if __name__ == "__main__":
pass
| 6,789 | 29.3125 | 122 | py |
DDoS | DDoS-master/models/srVAE/backbone/densenet16x32.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from src.modules.nn_layers import *
from src.modules.distributions import n_embenddings
from src.utils.args import args
class q_u(nn.Module):
""" Encoder q(u|y)
"""
def __init__(self, output_shape, input_shape):
super().__init__()
nc_in = input_shape[0]
nc_out = 2 * output_shape[0]
self.core_nn = nn.Sequential(
DenselyEncoder(
in_channels=nc_in,
out_channels=nc_out,
growth_rate=64,
steps=3,
scale_factor=1)
)
def forward(self, input):
mu, logvar = self.core_nn(input).chunk(2, 1)
return mu, F.hardtanh(logvar, min_val=-7, max_val=7.)
class p_y(nn.Module):
""" Dencoder p(y|u)
"""
def __init__(self, output_shape, input_shape):
super().__init__()
nc_in = input_shape[0]
nc_out = n_embenddings(output_shape[0])
self.core_nn = nn.Sequential(
DenselyDecoder(
in_channels=nc_in,
out_channels=nc_out,
growth_rate=128,
steps=4,
scale_factor=1)
)
def forward(self, input):
logits = self.core_nn(input)
return logits
class q_z(nn.Module):
""" Encoder q(z|x)
"""
def __init__(self, output_shape, input_shape):
super().__init__()
nc_in = input_shape[0]
nc_out = 2 * output_shape[0]
self.core_nn = nn.Sequential(
DenselyEncoder(
in_channels=nc_in,
out_channels=nc_out,
growth_rate=16,
steps=4,
scale_factor=2)
)
def forward(self, input):
mu, logvar = self.core_nn(input).chunk(2, 1)
return mu, F.hardtanh(logvar, min_val=-7, max_val=7.)
class p_z(nn.Module):
""" Encoder p(z| y, u)
"""
def __init__(self, output_shape, input_shape):
super().__init__()
nc_y_in, nc_u_in = input_shape[0][0], input_shape[1][0]
nc_out = 2 * output_shape[0]
self.y_nn = nn.Sequential(
DenselyEncoder(
in_channels=nc_y_in,
out_channels=nc_out//2,
growth_rate=32,
steps=5,
scale_factor=1),
nn.ELU(inplace=True)
)
self.u_nn = nn.Sequential(
DenselyNetwork(
in_channels=nc_u_in,
out_channels=nc_out//2,
growth_rate=64,
steps=3,
blocks=3,
act=True)
)
self.core_nn = nn.Sequential(
DenselyNetwork(
in_channels=nc_out,
out_channels=nc_out,
growth_rate=64,
steps=3,
blocks=3,
act=None)
)
def forward(self, input):
y, u = input[0], input[1]
y_out = self.y_nn(y)
u_out = self.u_nn(u)
joint = torch.cat((y_out, u_out), 1)
mu, logvar = self.core_nn(joint).chunk(2, 1)
return mu, F.hardtanh(logvar, min_val=-7, max_val=7.)
class p_x(nn.Module):
""" p(x| y, z)
"""
def __init__(self, output_shape, input_shape):
super().__init__()
nc_y_in, nc_z_in = input_shape[0][0], input_shape[1][0]
nc_out = n_embenddings(output_shape[0])
self.z_nn = nn.Sequential(
DenselyDecoder(
in_channels=nc_z_in,
out_channels=nc_out,
growth_rate=64,
steps=8,
scale_factor=2)
)
self.core_nn = nn.Sequential(
DenselyNetwork(
in_channels=nc_out + 3,
out_channels=nc_out,
growth_rate=64,
steps=5,
blocks=3,
act=None)
)
def forward(self, input):
y, z = input[0], input[1]
y_out = F.interpolate(y, size=[32, 32], align_corners=False, mode='bilinear')
z_out = self.z_nn(z)
joint = torch.cat((y_out, z_out), 1)
logits = self.core_nn(joint)
return logits
if __name__ == "__main__":
pass
| 4,289 | 23.94186 | 85 | py |
DDoS | DDoS-master/models/srVAE/priors/mog.py | import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
from .prior import Prior
from src.modules.nn_layers import *
from src.modules.distributions import *
from src.utils import args
# Modified vertion of: https://github.com/divymurli/VAEs
class MixtureOfGaussians(Prior):
def __init__(self, z_shape, num_mixtures=1000):
super().__init__()
self.z_shape = z_shape
self.z_dim = np.prod(z_shape)
self.k = num_mixtures
# Mixture of Gaussians prior
self.z_pre = torch.nn.Parameter(torch.randn(1, 2 * self.k, self.z_dim).to(args.device)
/ np.sqrt(self.k * self.z_dim))
# Uniform weighting
self.pi = torch.nn.Parameter(torch.ones(self.k).to(args.device) / self.k,
requires_grad=False)
def sample_gaussian(self, m, v):
""" Element-wise application reparameterization trick to sample from Gaussian
"""
sample = torch.randn(m.shape).to(args.device)
z = m + (v**0.5)*sample
return z
def log_sum_exp(self, x, dim=0):
""" Compute the log(sum(exp(x), dim)) in a numerically stable manner
"""
max_x = torch.max(x, dim)[0]
new_x = x - max_x.unsqueeze(dim).expand_as(x)
return max_x + (new_x.exp().sum(dim)).log()
def log_mean_exp(self, x, dim):
""" Compute the log(mean(exp(x), dim)) in a numerically stable manner
"""
return self.log_sum_exp(x, dim) - np.log(x.size(dim))
def log_normal(self, x, m, v):
""" Computes the elem-wise log probability of a Gaussian and then sum over the
last dim. Basically we're assuming all dims are batch dims except for the
last dim.
"""
const = -0.5 * x.size(-1) * torch.log(2*torch.tensor(np.pi))
log_det = -0.5 * torch.sum(torch.log(v), dim = -1)
log_exp = -0.5 * torch.sum((x - m)**2/v, dim = -1)
log_prob = const + log_det + log_exp
return log_prob
def log_normal_mixture(self, z, m, v):
""" Computes log probability of a uniformly-weighted Gaussian mixture.
"""
z = z.view(z.shape[0], 1, -1)
log_probs = self.log_normal(z, m, v)
log_prob = self.log_mean_exp(log_probs, 1)
return log_prob
def gaussian_parameters(self, h, dim=-1):
m, h = torch.split(h, h.size(dim) // 2, dim=dim)
v = F.softplus(h) + 1e-8
return m, v
def sample(self, n_samples=1, **kwargs):
idx = torch.distributions.categorical.Categorical(self.pi).sample((n_samples,))
m, v = self.gaussian_parameters(self.z_pre.squeeze(0), dim=0)
m, v = m[idx], v[idx]
z_samples = self.sample_gaussian(m, v)
return z_samples.view(z_samples.shape[0], *self.z_shape)
def log_p(self, z, **kwargs):
return self.forward(z)
def forward(self, z, dim=None, **kwargs):
"""
Computes the mixture of Gaussian prior
"""
m, v = self.gaussian_parameters(self.z_pre, dim=1)
log_p_z = self.log_normal_mixture(z, m, v)
return log_p_z
def __str__(self):
return "MixtureOfGaussians"
if __name__ == "__main__":
pass
| 3,267 | 32.010101 | 94 | py |
DDoS | DDoS-master/models/srVAE/priors/prior.py | import torch
import torch.nn as nn
class Prior(nn.Module):
def __init__(self):
super().__init__()
def sample(self, **kwargs):
raise NotImplementedError
def log_p(self, input, **kwargs):
return self.forward(z)
def forward(self, input, **kwargs):
raise NotImplementedError
def __str__(self):
raise NotImplementedError
if __name__ == "__main__":
pass
| 420 | 16.541667 | 39 | py |
DDoS | DDoS-master/models/srVAE/priors/standard_normal.py | import math
import torch
class StandardNormal:
def __init__(self, z_shape):
self.z_shape = z_shape
def sample(self, n_samples=1, **kwargs):
return torch.randn((n_samples, *self.z_shape))
def log_p(self, z, **kwargs):
return self.forward(z)
def forward(self, z, **kwargs):
""" Outputs the log p(z).
"""
log_probs = z.pow(2) + math.log(math.pi * 2.)
log_probs = -0.5 * log_probs.view(z.size(0), -1).sum(dim=1)
return log_probs
def __call__(self, z, **kwargs):
return self.forward(z, **kwargs)
def __str__(self):
return "StandardNormal"
if __name__ == "__main__":
pass
| 681 | 21 | 67 | py |
DDoS | DDoS-master/models/srVAE/priors/realnvp/distributions/mog.py | import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
from src.modules.nn_layers import *
from src.modules.distributions import *
from src.utils import args
class MixtureOfGaussians(nn.Module):
def __init__(self, z_shape, num_mixtures=10):
super().__init__()
self.z_shape = z_shape
self.z_dim = np.prod(z_shape)
self.k = num_mixtures
# Mixture of Gaussians prior
self.z_pre = torch.nn.Parameter(torch.randn(1, 2 * self.k, self.z_dim).to(args.device)
/ np.sqrt(self.k * self.z_dim))
# Uniform weighting
self.pi = torch.nn.Parameter(torch.ones(self.k).to(args.device) / self.k,
requires_grad=False)
def sample_gaussian(self, m, v):
""" Element-wise application reparameterization trick to sample from Gaussian
"""
sample = torch.randn(m.shape).to(args.device)
z = m + (v**0.5)*sample
return z
def log_sum_exp(self, x, dim=0):
""" Compute the log(sum(exp(x), dim)) in a numerically stable manner
"""
max_x = torch.max(x, dim)[0]
new_x = x - max_x.unsqueeze(dim).expand_as(x)
return max_x + (new_x.exp().sum(dim)).log()
def log_mean_exp(self, x, dim):
""" Compute the log(mean(exp(x), dim)) in a numerically stable manner
"""
return self.log_sum_exp(x, dim) - np.log(x.size(dim))
def log_normal(self, x, m, v):
""" Computes the elem-wise log probability of a Gaussian and then sum over the
last dim. Basically we're assuming all dims are batch dims except for the
last dim.
"""
const = -0.5 * x.size(-1) * torch.log(2*torch.tensor(np.pi))
log_det = -0.5 * torch.sum(torch.log(v), dim = -1)
log_exp = -0.5 * torch.sum((x - m)**2/v, dim = -1)
log_prob = const + log_det + log_exp
return log_prob
def log_normal_mixture(self, z, m, v):
""" Computes log probability of a uniformly-weighted Gaussian mixture.
"""
z = z.view(z.shape[0], 1, -1)
log_probs = self.log_normal(z, m, v)
log_prob = self.log_mean_exp(log_probs, 1)
return log_prob
def gaussian_parameters(self, h, dim=-1):
m, h = torch.split(h, h.size(dim) // 2, dim=dim)
v = F.softplus(h) + 1e-8
return m, v
def sample(self, n_samples=1, **kwargs):
idx = torch.distributions.categorical.Categorical(self.pi).sample((n_samples,))
m, v = self.gaussian_parameters(self.z_pre.squeeze(0), dim=0)
m, v = m[idx], v[idx]
z_samples = self.sample_gaussian(m, v)
return z_samples.view(z_samples.shape[0], *self.z_shape)
def log_p(self, z, **kwargs):
return self.forward(z)
def forward(self, z, dim=None, **kwargs):
"""
Computes the mixture of Gaussian prior
"""
m, v = self.gaussian_parameters(self.z_pre, dim=1)
log_p_z = self.log_normal_mixture(z, m, v)
return log_p_z
def __str__(self):
return "MixtureOfGaussians"
if __name__ == "__main__":
pass
| 3,185 | 32.536842 | 94 | py |
DDoS | DDoS-master/models/srVAE/priors/realnvp/distributions/standard_normal.py | import math
import torch
import torch.nn as nn
class StandardNormal:
"""
Isotropic Standard Normal distribution.
"""
def __init__(self, z_shape):
self.z_shape = z_shape
def sample(self, n_samples=1, **kwargs):
return torch.randn((n_samples, *self.z_shape))
def log_p(self, z, **kwargs):
return self.forward(z)
def forward(self, z, **kwargs):
""" Outputs the log p(z).
"""
log_probs = z.pow(2) + math.log(math.pi * 2.)
log_probs = -0.5 * log_probs.view(z.size(0), -1).sum(dim=1)
return log_probs
def __call__(self, z, **kwargs):
return self.forward(z, **kwargs)
def __str__(self):
return "StandardNormal"
if __name__ == "__main__":
pass
| 764 | 20.857143 | 67 | py |
DDoS | DDoS-master/models/srVAE/priors/realnvp/util/array_util.py | import torch
import torch.nn.functional as F
def squeeze_2x2(x, reverse=False, alt_order=False):
"""For each spatial position, a sub-volume of shape `1x1x(N^2 * C)`,
reshape into a sub-volume of shape `NxNxC`, where `N = block_size`.
Adapted from:
https://github.com/tensorflow/models/blob/master/research/real_nvp/real_nvp_utils.py
See Also:
- TensorFlow nn.depth_to_space: https://www.tensorflow.org/api_docs/python/tf/nn/depth_to_space
- Figure 3 of RealNVP paper: https://arxiv.org/abs/1605.08803
Args:
x (torch.Tensor): Input tensor of shape (B, C, H, W).
reverse (bool): Whether to do a reverse squeeze (unsqueeze).
alt_order (bool): Whether to use alternate ordering.
"""
block_size = 2
if alt_order:
n, c, h, w = x.size()
if reverse:
if c % 4 != 0:
raise ValueError('Number of channels must be divisible by 4, got {}.'.format(c))
c //= 4
else:
if h % 2 != 0:
raise ValueError('Height must be divisible by 2, got {}.'.format(h))
if w % 2 != 0:
raise ValueError('Width must be divisible by 4, got {}.'.format(w))
# Defines permutation of input channels (shape is (4, 1, 2, 2)).
squeeze_matrix = torch.tensor([[[[1., 0.], [0., 0.]]],
[[[0., 0.], [0., 1.]]],
[[[0., 1.], [0., 0.]]],
[[[0., 0.], [1., 0.]]]],
dtype=x.dtype,
device=x.device)
perm_weight = torch.zeros((4 * c, c, 2, 2), dtype=x.dtype, device=x.device)
for c_idx in range(c):
slice_0 = slice(c_idx * 4, (c_idx + 1) * 4)
slice_1 = slice(c_idx, c_idx + 1)
perm_weight[slice_0, slice_1, :, :] = squeeze_matrix
shuffle_channels = torch.tensor([c_idx * 4 for c_idx in range(c)]
+ [c_idx * 4 + 1 for c_idx in range(c)]
+ [c_idx * 4 + 2 for c_idx in range(c)]
+ [c_idx * 4 + 3 for c_idx in range(c)])
perm_weight = perm_weight[shuffle_channels, :, :, :]
if reverse:
x = F.conv_transpose2d(x, perm_weight, stride=2)
else:
x = F.conv2d(x, perm_weight, stride=2)
else:
b, c, h, w = x.size()
x = x.permute(0, 2, 3, 1)
if reverse:
if c % 4 != 0:
raise ValueError('Number of channels {} is not divisible by 4'.format(c))
x = x.view(b, h, w, c // 4, 2, 2)
x = x.permute(0, 1, 4, 2, 5, 3)
x = x.contiguous().view(b, 2 * h, 2 * w, c // 4)
else:
if h % 2 != 0 or w % 2 != 0:
raise ValueError('Expected even spatial dims HxW, got {}x{}'.format(h, w))
x = x.view(b, h // 2, 2, w // 2, 2, c)
x = x.permute(0, 1, 3, 5, 2, 4)
x = x.contiguous().view(b, h // 2, w // 2, c * 4)
x = x.permute(0, 3, 1, 2)
return x
def checkerboard_mask(height, width, reverse=False, dtype=torch.float32,
device=None, requires_grad=False):
"""Get a checkerboard mask, such that no two entries adjacent entries
have the same value. In non-reversed mask, top-left entry is 0.
Args:
height (int): Number of rows in the mask.
width (int): Number of columns in the mask.
reverse (bool): If True, reverse the mask (i.e., make top-left entry 1).
Useful for alternating masks in RealNVP.
dtype (torch.dtype): Data type of the tensor.
device (torch.device): Device on which to construct the tensor.
requires_grad (bool): Whether the tensor requires gradient.
Returns:
mask (torch.tensor): Checkerboard mask of shape (1, 1, height, width).
"""
checkerboard = [[((i % 2) + j) % 2 for j in range(width)] for i in range(height)]
mask = torch.tensor(checkerboard, dtype=dtype, device=device, requires_grad=requires_grad)
if reverse:
mask = 1 - mask
# Reshape to (1, 1, height, width) for broadcasting with tensors of shape (B, C, H, W)
mask = mask.view(1, 1, height, width)
return mask
| 4,369 | 40.226415 | 103 | py |
DDoS | DDoS-master/models/srVAE/priors/realnvp/util/norm_util.py | import functools
import torch
import torch.nn as nn
def get_norm_layer(norm_type='instance'):
if norm_type == 'batch':
return functools.partial(nn.BatchNorm2d, affine=True)
elif norm_type == 'instance':
return functools.partial(nn.InstanceNorm2d, affine=False)
else:
raise NotImplementedError('Invalid normalization type: {}'.format(norm_type))
def get_param_groups(net, weight_decay, norm_suffix='weight_g', verbose=False):
"""Get two parameter groups from `net`: One named "normalized" which will
override the optimizer with `weight_decay`, and one named "unnormalized"
which will inherit all hyperparameters from the optimizer.
Args:
net (torch.nn.Module): Network to get parameters from
weight_decay (float): Weight decay to apply to normalized weights.
norm_suffix (str): Suffix to select weights that should be normalized.
For WeightNorm, using 'weight_g' normalizes the scale variables.
verbose (bool): Print out number of normalized and unnormalized parameters.
"""
norm_params = []
unnorm_params = []
for n, p in net.named_parameters():
if n.endswith(norm_suffix):
norm_params.append(p)
else:
unnorm_params.append(p)
param_groups = [{'name': 'normalized', 'params': norm_params, 'weight_decay': weight_decay},
{'name': 'unnormalized', 'params': unnorm_params}]
if verbose:
print('{} normalized parameters'.format(len(norm_params)))
print('{} unnormalized parameters'.format(len(unnorm_params)))
return param_groups
class WNConv2d(nn.Module):
"""Weight-normalized 2d convolution.
Args:
in_channels (int): Number of channels in the input.
out_channels (int): Number of channels in the output.
kernel_size (int): Side length of each convolutional kernel.
padding (int): Padding to add on edges of input.
bias (bool): Use bias in the convolution operation.
"""
def __init__(self, in_channels, out_channels, kernel_size, padding, bias=True):
super(WNConv2d, self).__init__()
self.conv = nn.utils.weight_norm(
nn.Conv2d(in_channels, out_channels, kernel_size, padding=padding, bias=bias))
def forward(self, x):
x = self.conv(x)
return x
class BatchNormStats2d(nn.Module):
"""Compute BatchNorm2d normalization statistics: `mean` and `var`.
Useful for keeping track of sum of log-determinant of Jacobians in flow models.
Args:
num_features (int): Number of features in the input (i.e., `C` in `(N, C, H, W)`).
eps (float): Added to the denominator for numerical stability.
decay (float): The value used for the running_mean and running_var computation.
Different from conventional momentum, see `nn.BatchNorm2d` for more.
"""
def __init__(self, num_features, eps=1e-5, decay=0.1):
super(BatchNormStats2d, self).__init__()
self.eps = eps
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.ones(num_features))
self.decay = decay
def forward(self, x, training):
# Get mean and variance per channel
if training:
channels = x.transpose(0, 1).contiguous().view(x.size(1), -1)
used_mean, used_var = channels.mean(-1), channels.var(-1)
curr_mean, curr_var = used_mean, used_var
# Update variables
self.running_mean = self.running_mean - self.decay * (self.running_mean - curr_mean)
self.running_var = self.running_var - self.decay * (self.running_var - curr_var)
else:
used_mean = self.running_mean
used_var = self.running_var
used_var += self.eps
# Reshape to (N, C, H, W)
used_mean = used_mean.view(1, x.size(1), 1, 1).expand_as(x)
used_var = used_var.view(1, x.size(1), 1, 1).expand_as(x)
return used_mean, used_var
| 4,052 | 37.971154 | 96 | py |
DDoS | DDoS-master/models/srVAE/priors/realnvp/model/real_nvp.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from .coupling_layer import CouplingLayer, MaskType
from ..util import squeeze_2x2
from ..distributions import StandardNormal
# Modified vertion of: https://github.com/chrischute/real-nvp
class RealNVP(nn.Module):
"""RealNVP Model
Codebase from Chris Chute:
https://github.com/chrischute/real-nvp
Based on the paper:
"Density estimation using Real NVP"
by Laurent Dinh, Jascha Sohl-Dickstein, and Samy Bengio
(https://arxiv.org/abs/1605.08803).
Args:
num_scales (int): Number of scales in the RealNVP model.
in_channels (int): Number of channels in the input.
mid_channels (int): Number of channels in the intermediate layers.
num_blocks (int): Number of residual blocks in the s and t network of
`Coupling` layers.
"""
def __init__(self, input_shape, mid_channels=64, num_blocks=5, num_scales=2, prior='std_normal'):
super().__init__()
self.flows = _RealNVP(0, num_scales, input_shape[0], mid_channels, num_blocks)
# self.nbits = 8.
if prior=='std_normal':
self.prior = StandardNormal(input_shape)
elif prior=='mog':
self.prior = MixtureOfGaussians(input_shape)
@torch.no_grad()
def sample(self, z_shape, n_samples, device, **kwargs):
"""Sample from RealNVP model.
Args:
z_shape (tuple):
n_samples (int): Number of samples to generate.
device (torch.device): Device to use.
"""
z = self.prior.sample(n_samples).to(device)
x, _ = self.forward(z, reverse=True)
return x
def log_p(self, x, **kwargs):
""" returns the log likelihood.
"""
z, sldj = self.forward(x, reverse=False)
ll = (self.prior.log_p(z) + sldj)
# prior_ll = -0.5 * (z ** 2 + np.log(2 * np.pi))
# prior_ll = prior_ll.flatten(1).sum(-1) - np.log(2**self.nbits) * np.prod(z.size()[1:])
# ll = prior_ll + sldj
# ll = ll.mean()
return ll
def forward(self, x, reverse=False):
sldj = None
if not reverse:
sldj = 0 # we do not quintize !
# quintize !
# x = (x * (2**self.nbits - 1) + torch.rand_like(x)) / (2**self.nbits)
x, sldj = self.flows(x, sldj, reverse)
return x, sldj
class _RealNVP(nn.Module):
"""Recursive builder for a `RealNVP` model.
Each `_RealNVPBuilder` corresponds to a single scale in `RealNVP`,
and the constructor is recursively called to build a full `RealNVP` model.
Args:
scale_idx (int): Index of current scale.
num_scales (int): Number of scales in the RealNVP model.
in_channels (int): Number of channels in the input.
mid_channels (int): Number of channels in the intermediate layers.
num_blocks (int): Number of residual blocks in the s and t network of
`Coupling` layers.
"""
def __init__(self, scale_idx, num_scales, in_channels, mid_channels, num_blocks):
super(_RealNVP, self).__init__()
self.is_last_block = scale_idx == num_scales - 1
self.in_couplings = nn.ModuleList([
CouplingLayer(in_channels, mid_channels, num_blocks, MaskType.CHECKERBOARD, reverse_mask=False),
CouplingLayer(in_channels, mid_channels, num_blocks, MaskType.CHECKERBOARD, reverse_mask=True),
CouplingLayer(in_channels, mid_channels, num_blocks, MaskType.CHECKERBOARD, reverse_mask=False)
])
if self.is_last_block:
self.in_couplings.append(
CouplingLayer(in_channels, mid_channels, num_blocks, MaskType.CHECKERBOARD, reverse_mask=True))
else:
self.out_couplings = nn.ModuleList([
CouplingLayer(4 * in_channels, 2 * mid_channels, num_blocks, MaskType.CHANNEL_WISE, reverse_mask=False),
CouplingLayer(4 * in_channels, 2 * mid_channels, num_blocks, MaskType.CHANNEL_WISE, reverse_mask=True),
CouplingLayer(4 * in_channels, 2 * mid_channels, num_blocks, MaskType.CHANNEL_WISE, reverse_mask=False)
])
self.next_block = _RealNVP(scale_idx + 1, num_scales, 2 * in_channels, 2 * mid_channels, num_blocks)
def forward(self, x, sldj, reverse=False):
if reverse:
if not self.is_last_block:
# Re-squeeze -> split -> next block
x = squeeze_2x2(x, reverse=False, alt_order=True)
x, x_split = x.chunk(2, dim=1)
x, sldj = self.next_block(x, sldj, reverse)
x = torch.cat((x, x_split), dim=1)
x = squeeze_2x2(x, reverse=True, alt_order=True)
# Squeeze -> 3x coupling (channel-wise)
x = squeeze_2x2(x, reverse=False)
for coupling in reversed(self.out_couplings):
x, sldj = coupling(x, sldj, reverse)
x = squeeze_2x2(x, reverse=True)
for coupling in reversed(self.in_couplings):
x, sldj = coupling(x, sldj, reverse)
else:
for coupling in self.in_couplings:
x, sldj = coupling(x, sldj, reverse)
if not self.is_last_block:
# Squeeze -> 3x coupling (channel-wise)
x = squeeze_2x2(x, reverse=False)
for coupling in self.out_couplings:
x, sldj = coupling(x, sldj, reverse)
x = squeeze_2x2(x, reverse=True)
# Re-squeeze -> split -> next block
x = squeeze_2x2(x, reverse=False, alt_order=True)
x, x_split = x.chunk(2, dim=1)
x, sldj = self.next_block(x, sldj, reverse)
x = torch.cat((x, x_split), dim=1)
x = squeeze_2x2(x, reverse=True, alt_order=True)
return x, sldj
| 5,949 | 37.636364 | 120 | py |
DDoS | DDoS-master/models/srVAE/priors/realnvp/model/coupling_layer.py | import torch
import torch.nn as nn
from enum import IntEnum
from ..util import checkerboard_mask
from src.modules.nn_layers import *
class MaskType(IntEnum):
CHECKERBOARD = 0
CHANNEL_WISE = 1
class CouplingLayer(nn.Module):
"""Coupling layer in RealNVP.
Args:
in_channels (int): Number of channels in the input.
mid_channels (int): Number of channels in the `s` and `t` network.
num_blocks (int): Number of residual blocks in the `s` and `t` network.
mask_type (MaskType): One of `MaskType.CHECKERBOARD` or `MaskType.CHANNEL_WISE`.
reverse_mask (bool): Whether to reverse the mask. Useful for alternating masks.
"""
def __init__(self, in_channels, mid_channels, num_blocks, mask_type, reverse_mask):
super(CouplingLayer, self).__init__()
# Save mask info
self.mask_type = mask_type
self.reverse_mask = reverse_mask
if self.mask_type == MaskType.CHANNEL_WISE:
in_channels //= 2
# Build scale and translate network
growth_rate, steps = 64, 5
self.st_net = nn.Sequential(
DenseNetLayer(inplanes=in_channels,
growth_rate=growth_rate,
steps=steps),
Conv2d(in_channels + growth_rate*steps, 2*in_channels,
kernel_size=3, stride=1, padding=1)
)
# Learnable scale for s
self.rescale = nn.utils.weight_norm(Rescale(in_channels))
def forward(self, x, sldj=None, reverse=True):
if self.mask_type == MaskType.CHECKERBOARD:
# Checkerboard mask
b = checkerboard_mask(x.size(2), x.size(3), self.reverse_mask, device=x.device)
x_b = x * b
st = self.st_net(x_b)
s, t = st.chunk(2, dim=1)
s = self.rescale(torch.tanh(s))
s = s * (1 - b)
t = t * (1 - b)
# Scale and translate
if reverse:
inv_exp_s = s.mul(-1).exp()
if torch.isnan(inv_exp_s).any():
raise RuntimeError('Scale factor has NaN entries')
x = x * inv_exp_s - t
else:
exp_s = s.exp()
if torch.isnan(exp_s).any():
raise RuntimeError('Scale factor has NaN entries')
x = (x + t) * exp_s
# Add log-determinant of the Jacobian
sldj += s.view(s.size(0), -1).sum(-1)
else:
# Channel-wise mask
if self.reverse_mask:
x_id, x_change = x.chunk(2, dim=1)
else:
x_change, x_id = x.chunk(2, dim=1)
st = self.st_net(x_id)
s, t = st.chunk(2, dim=1)
s = self.rescale(torch.tanh(s))
# Scale and translate
if reverse:
inv_exp_s = s.mul(-1).exp()
if torch.isnan(inv_exp_s).any():
raise RuntimeError('Scale factor has NaN entries')
x_change = x_change * inv_exp_s - t
else:
exp_s = s.exp()
if torch.isnan(exp_s).any():
raise RuntimeError('Scale factor has NaN entries')
x_change = (x_change + t) * exp_s
# Add log-determinant of the Jacobian
sldj += s.view(s.size(0), -1).sum(-1)
if self.reverse_mask:
x = torch.cat((x_id, x_change), dim=1)
else:
x = torch.cat((x_change, x_id), dim=1)
return x, sldj
class Rescale(nn.Module):
"""Per-channel rescaling. Need a proper `nn.Module` so we can wrap it
with `torch.nn.utils.weight_norm`.
Args:
num_channels (int): Number of channels in the input.
"""
def __init__(self, num_channels):
super(Rescale, self).__init__()
self.weight = nn.Parameter(torch.ones(num_channels, 1, 1))
def forward(self, x):
x = self.weight * x
return x
| 4,031 | 32.04918 | 91 | py |
DDoS | DDoS-master/models/ShuffleUNet/icnr.py | import torch
import torch.nn as nn
def ICNR(tensor, upscale_factor=2, inizializer=nn.init.kaiming_normal_):
new_shape = [int(tensor.shape[0] / (upscale_factor ** 2))] + list(tensor.shape[1:])
subkernel = torch.zeros(new_shape)
subkernel = inizializer(subkernel)
subkernel = subkernel.transpose(0, 1)
subkernel = subkernel.contiguous().view(subkernel.shape[0],
subkernel.shape[1], -1)
kernel = subkernel.repeat(1, 1, upscale_factor ** 2)
transposed_shape = [tensor.shape[1]] + [tensor.shape[0]] + list(tensor.shape[2:])
kernel = kernel.contiguous().view(transposed_shape)
kernel = kernel.transpose(0, 1)
return kernel
| 708 | 31.227273 | 87 | py |
DDoS | DDoS-master/models/ShuffleUNet/pixel_shuffle.py | import torch.nn as nn
from . import icnr
def _pixel_shuffle(input, upscale_factor):
r"""Rearranges elements in a Tensor of shape :math:`(N, C, d_{1}, d_{2}, ..., d_{n})` to a
tensor of shape :math:`(N, C/(r^n), d_{1}*r, d_{2}*r, ..., d_{n}*r)`.
Where :math:`n` is the dimensionality of the data.
See :class:`~torch.nn.PixelShuffle` for details.
Args:
input (Variable): Input
upscale_factor (int): factor to increase spatial resolution by
Examples::
# 1D example
#>>> input = torch.Tensor(1, 4, 8)
#>>> output = F.pixel_shuffle(input, 2)
#>>> print(output.size())
torch.Size([1, 2, 16])
# 2D example
#>>> input = torch.Tensor(1, 9, 8, 8)
#>>> output = F.pixel_shuffle(input, 3)
#>>> print(output.size())
torch.Size([1, 1, 24, 24])
# 3D example
#>>> input = torch.Tensor(1, 8, 16, 16, 16)
#>>> output = F.pixel_shuffle(input, 2)
#>>> print(output.size())
torch.Size([1, 1, 32, 32, 32])
"""
input_size = list(input.size())
dimensionality = len(input_size) - 2
input_size[1] //= (upscale_factor ** dimensionality)
output_size = [dim * upscale_factor for dim in input_size[2:]]
input_view = input.contiguous().view(
input_size[0], input_size[1],
*(([upscale_factor] * dimensionality) + input_size[2:])
)
indicies = list(range(2, 2 + 2 * dimensionality))
indicies = indicies[1::2] + indicies[0::2]
shuffle_out = input_view.permute(0, 1, *(indicies[::-1])).contiguous()
return shuffle_out.view(input_size[0], input_size[1], *output_size)
class PixelShuffle(nn.Module):
def __init__(self, in_c, out_c, kernel, stride, bias=True, d=3):
super(PixelShuffle, self).__init__()
if d==3:
self.conv = nn.Conv3d(in_c, out_c, kernel_size=kernel, stride=stride, bias=bias, padding=kernel//2)
else:
self.conv = nn.Conv2d(in_c, out_c, kernel_size=kernel, stride=stride, bias=bias, padding=kernel//2)
self.icnr_weights = icnr.ICNR(self.conv.weight, 2)
self.conv.weight.data.copy_(self.icnr_weights)
def forward(self, x):
x = self.conv(x)
x = _pixel_shuffle(x, 2)
return x
| 2,276 | 35.142857 | 111 | py |
DDoS | DDoS-master/models/ShuffleUNet/net.py | import sys
import torch
import torch.nn as nn
from . import pixel_shuffle, pixel_unshuffle
# -------------------------------------------------------------------------------------------------------------------------------------------------##
class _double_conv(nn.Module):
"""
Double Convolution Block
"""
def __init__(self, in_channels, out_channels, k_size, stride, bias=True, conv_layer=nn.Conv3d):
super(_double_conv, self).__init__()
self.conv_1 = conv_layer(in_channels=in_channels, out_channels=out_channels, kernel_size=k_size,
stride=stride, padding=k_size // 2, bias=bias)
self.conv_2 = conv_layer(in_channels=out_channels, out_channels=out_channels, kernel_size=k_size,
stride=stride, padding=k_size // 2, bias=bias)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv_1(x)
x = self.relu((x))
x = self.conv_2(x)
x = self.relu((x))
return x
class _conv_decomp(nn.Module):
"""
Convolutional Decomposition Block
"""
def __init__(self, in_channels, out_channels, k_size, stride, bias=True, conv_layer=nn.Conv3d):
super(_conv_decomp, self).__init__()
self.conv1 = conv_layer(in_channels=in_channels, out_channels=out_channels, kernel_size=k_size,
stride=stride, padding=k_size // 2, bias=bias)
self.conv2 = conv_layer(in_channels=in_channels, out_channels=out_channels, kernel_size=k_size,
stride=stride, padding=k_size // 2, bias=bias)
self.conv3 = conv_layer(in_channels=in_channels, out_channels=out_channels, kernel_size=k_size,
stride=stride, padding=k_size // 2, bias=bias)
self.conv4 = conv_layer(in_channels=in_channels, out_channels=out_channels, kernel_size=k_size,
stride=stride, padding=k_size // 2, bias=bias)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x1 = self.conv1(x)
x1 = self.relu((x1))
x2 = self.conv2(x)
x2 = self.relu((x2))
x3 = self.conv3(x)
x3 = self.relu((x3))
x4 = self.conv4(x)
x4 = self.relu((x4))
return x1, x2, x3, x4
class _concat(nn.Module):
"""
Skip-Addition block
"""
def __init__(self):
super(_concat, self).__init__()
def forward(self, e1, e2, e3, e4, d1, d2, d3, d4):
self.X1 = e1 + d1
self.X2 = e2 + d2
self.X3 = e3 + d3
self.X4 = e4 + d4
x = torch.cat([self.X1, self.X2, self.X3, self.X4], dim=1)
return x
# -------------------------------------------------------------------------------------------------------------------------------------------------##
class ShuffleUNet(nn.Module):
def __init__(self, d=3, in_ch=1, num_features=64, n_levels=3, out_ch=1, kernel_size=3, stride=1):
super(ShuffleUNet, self).__init__()
self.n_levels = n_levels
num_features = num_features
filters = [num_features]
for _ in range(n_levels):
filters.append(filters[-1]*2)
if d==3:
conv_layer = nn.Conv3d
ps_fact = (2 ** 2)
elif d==2:
conv_layer = nn.Conv2d
ps_fact = 2
else:
sys.exit("Invalid d")
# Input
self.conv_inp = _double_conv(in_ch, filters[0], kernel_size, stride, conv_layer=conv_layer)
#Contraction path
self.wave_down = nn.ModuleList()
self.pix_unshuff = nn.ModuleList()
self.conv_enc = nn.ModuleList()
for i in range(0, n_levels):
self.wave_down.append(_conv_decomp(filters[i], filters[i], kernel_size, stride, conv_layer=conv_layer))
self.pix_unshuff.append(pixel_unshuffle.PixelUnshuffle(num_features * (2**i), num_features * (2**i), kernel_size, stride, d=d))
self.conv_enc.append(_double_conv(filters[i], filters[i+1], kernel_size, stride, conv_layer=conv_layer))
#Expansion path
self.cat = _concat()
self.pix_shuff = nn.ModuleList()
self.wave_up = nn.ModuleList()
self.convup = nn.ModuleList()
for i in range(n_levels-1,-1,-1):
self.pix_shuff.append(pixel_shuffle.PixelShuffle(num_features * (2**(i+1)), num_features * (2**(i+1)) * ps_fact, kernel_size, stride, d=d))
self.wave_up.append(_conv_decomp(filters[i], filters[i], kernel_size, stride, conv_layer=conv_layer))
self.convup.append(_double_conv(filters[i] * 5, filters[i], kernel_size, stride, conv_layer=conv_layer))
#FC
self.out = conv_layer(filters[0], out_ch, kernel_size=1, stride=1, padding=0, bias=True)
#Weight init
for m in self.modules():
if isinstance(m, conv_layer):
weight = nn.init.kaiming_normal_(m.weight, nonlinearity='relu')
m.weight.data.copy_(weight)
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x):
encs = [self.conv_inp(x)]
waves = []
for i in range(self.n_levels):
waves.append(self.wave_down[i](encs[-1]))
_tmp = self.pix_unshuff[i](waves[-1][-1])
encs.append(self.conv_enc[i](_tmp))
dec = encs.pop()
for i in range(self.n_levels):
_tmp = self.pix_shuff[i](dec)
_tmp_waves = self.wave_up[i](_tmp) + waves.pop()
_tmp_cat = self.cat(*_tmp_waves)
dec = self.convup[i](torch.cat([encs.pop(), _tmp_cat], dim=1))
return self.out(dec) | 5,659 | 36.733333 | 151 | py |
DDoS | DDoS-master/models/ShuffleUNet/pixel_unshuffle.py | import torch.nn as nn
from . import icnr
class _double_conv_3d(nn.Module):
"""
Convolution Block
"""
def __init__(self, in_channels, out_channels, k_size, stride, bias=True):
super(_double_conv_3d, self).__init__()
self.conv = nn.Sequential(
nn.Conv3d(in_channels=in_channels, out_channels=out_channels, kernel_size=k_size,
stride=stride, padding=k_size//2, bias=bias),
nn.BatchNorm3d(num_features=out_channels),
nn.ReLU(inplace=True),
nn.Conv3d(in_channels=out_channels, out_channels=out_channels, kernel_size=k_size,
stride=stride, padding=k_size//2, bias=bias),
nn.BatchNorm3d(num_features=out_channels),
nn.ReLU(inplace=True)
)
def forward(self, x):
x = self.conv(x)
return x
class _double_conv_2d(nn.Module):
"""
Convolution Block
"""
def __init__(self, in_channels, out_channels, k_size, stride, bias=True):
super(_double_conv_2d, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=k_size,
stride=stride, padding=k_size//2, bias=bias),
nn.BatchNorm2d(num_features=out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=k_size,
stride=stride, padding=k_size//2, bias=bias),
nn.BatchNorm2d(num_features=out_channels),
nn.ReLU(inplace=True)
)
def forward(self, x):
x = self.conv(x)
return x
def _pixel_unshuffle_3d(input, upscale_factor):
r"""Rearranges elements in a Tensor of shape :math:(C, rH, rW) to a
tensor of shape :math:(*, r^2C, H, W).
written by: Zhaoyi Yan, https://github.com/Zhaoyi-Yan
and Kai Zhang, https://github.com/cszn/FFDNet
01/01/2019
"""
batch_size, channels, depth, in_height, in_width = input.size()
depth_final = depth // upscale_factor
out_height = in_height // upscale_factor
out_width = in_width // upscale_factor
input_view = input.contiguous().view(
batch_size, channels, depth_final, upscale_factor, out_height, upscale_factor,
out_width, upscale_factor)
channels *= upscale_factor ** 3
unshuffle_out = input_view.permute(0, 1, 3, 5, 7, 2, 4, 6).contiguous()
return unshuffle_out.view(batch_size, channels, depth_final, out_height, out_width)
def _pixel_unshuffle_2d(input, upscale_factor):
r"""Rearranges elements in a Tensor of shape :math:(C, rH, rW) to a
tensor of shape :math:(*, r^2C, H, W).
written by: Zhaoyi Yan, https://github.com/Zhaoyi-Yan
and Kai Zhang, https://github.com/cszn/FFDNet
01/01/2019
"""
batch_size, channels, in_height, in_width = input.size()
out_height = in_height // upscale_factor
out_width = in_width // upscale_factor
input_view = input.contiguous().view(
batch_size, channels, out_height, upscale_factor,
out_width, upscale_factor)
channels *= upscale_factor ** 2
unshuffle_out = input_view.permute(0, 1, 3, 5, 2, 4).contiguous()
return unshuffle_out.view(batch_size, channels, out_height, out_width)
class PixelUnshuffle(nn.Module):
def __init__(self, in_c, out_c, kernel, stride, bias=True, d=3):
super(PixelUnshuffle, self).__init__()
if d == 3:
self.conv = nn.Conv3d(in_c, out_c, kernel_size=kernel, stride=stride, bias=bias, padding=kernel//2)
self.down_conv = _double_conv_3d(out_c*8, out_c, kernel, stride, bias)
self.pu = _pixel_unshuffle_3d
else:
self.conv = nn.Conv2d(in_c, out_c, kernel_size=kernel, stride=stride, bias=bias, padding=kernel//2)
self.down_conv = _double_conv_2d(out_c*4, out_c, kernel, stride, bias)
self.pu = _pixel_unshuffle_2d
self.icnr_weights = icnr.ICNR(self.conv.weight, 2)
self.conv.weight.data.copy_(self.icnr_weights)
def forward(self, x):
x = self.conv(x)
x = self.down_conv(self.pu(x, 2))
return x
| 4,185 | 37.054545 | 111 | py |
DDoS | DDoS-master/visualisation/num4trilinear.py | from glob import glob
import torch
from tqdm import tqdm
import os
import nibabel as nib
import numpy as np
import pandas as pd
import torch.nn.functional as F
from utils.utilities import calc_metircs
fully_root = "/mnt/MEMoRIAL/MEMoRIAL_SharedStorage_M1.2+4+7/Chompunuch/PhD/Data/3DDynTest/MickAbdomen3DDyn/DynProtocol3/Filtered/hrTestDynConST"
under_root = "/mnt/MEMoRIAL/MEMoRIAL_SharedStorage_M1.2+4+7/Chompunuch/PhD/Data/3DDynTest/MickAbdomen3DDyn/DynProtocol3/Filtered/usTestDynConST"
interp = "trilinear"
files = sorted(glob(f"{under_root}/**/*.nii.gz", recursive=True))
metrics = []
for f in tqdm(files):
if ("Center" not in f and "Centre" not in f) or "TP00" in f or "WoPad" not in f:
continue
fully_parts = f.replace(under_root, fully_root).split(os.path.sep)
undersampling = fully_parts[-3]
del fully_parts[-3]
f_fully = os.path.sep.join(fully_parts)
vol_under = np.array(nib.load(f).get_fdata())
vol_fully = np.array(nib.load(f_fully).get_fdata())
vol_under /= vol_under.max()
vol_fully /= vol_fully.max()
vol_under = F.interpolate(torch.from_numpy(vol_under).unsqueeze(0).unsqueeze(0), size=vol_fully.shape, mode=interp, align_corners=False).squeeze().numpy()
inp_metrics, inp_ssimMAP = calc_metircs(vol_fully, vol_under, tag="ZPad")
inp_metrics["file"] = fully_parts[-2] + "_" + fully_parts[-1]
inp_metrics["subject"] = fully_parts[-6] + "_" + fully_parts[-5]
inp_metrics["undersampling"] = undersampling + "WoPad"
inp_metrics["model"] = interp.capitalize()
inp_metrics["DiffZPad"] = np.std(vol_fully - vol_under)
metrics.append(inp_metrics)
df = pd.DataFrame.from_dict(metrics)
df.to_csv(f"{os.path.dirname(under_root)}/noprevnorm_metrics_{interp}.csv") | 1,754 | 38 | 158 | py |
DDoS | DDoS-master/utils/elastic_transform.py | #!/usr/bin/env python
'''
Purpose :
'''
from numbers import Number
from typing import Optional, Tuple, Union
import numpy as np
import torch
import torch as th
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
__author__ = "Kartik Prabhu, Mahantesh Pattadkal, and Soumick Chatterjee"
__copyright__ = "Copyright 2022, Faculty of Computer Science, Otto von Guericke University Magdeburg, Germany"
__credits__ = ["Kartik Prabhu", "Mahantesh Pattadkal", "Soumick Chatterjee"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Soumick Chatterjee"
__email__ = "soumick.chatterjee@ovgu.de"
__status__ = "Production"
from torchio.utils import to_tuple
try:
from torchio import RandomElasticDeformation
except:
from torchio.transforms.augmentation import RandomElasticDeformation
from torch.cuda.amp import autocast
from airlab import utils as tu
from airlab.transformation.pairwise import _KernelTransformation
from airlab.transformation.utils import compute_grid
from airlab.utils import kernelFunction as utils
SPLINE_ORDER = 3
"""
Warp image with displacement
* input(tensor) : input of shape (N, C, H_\text{in}, W_\text{in})(N,C,H,W) (4-D case)(N,C,D,H ,W) (5-D case)
* grid(tensor): flow-field of shape (N, H_\text{out}, W_\text{out}, 2)(N,H ,W,2) (4-D case) or (N, D, H, W, 3)(N,D,H,W,3) (5-D case)
* mult = true if batched input
"""
def warp_image(image, displacement, multi=False):
image_size = image.size() #[B, D, H, W]
batch_size = image_size[0]
if multi:
image_size = image_size[2:]#[D, H, W]
grid = compute_grid(image_size, dtype=image.dtype, device=image.device)
grid = displacement + grid
grid = torch.cat([grid] * batch_size, dim=0) # batch number of times
# warp image
if multi:
warped_image = F.grid_sample(image, grid) #[B, C, D, H, W]
else:
warped_image = F.grid_sample(image.unsqueeze(0).unsqueeze(0), grid) #[B, C, D, H, W], unsqueeze to give batch and channel dimension
return warped_image #[B, C, D, H, W]
"""
Base class for kernel transformations
"""
class _ParameterizedKernelTransformation(_KernelTransformation):
def __init__(self, image_size, rnd_grid_params=None, diffeomorphic=False, dtype=th.float32, device='cpu'):
super(_ParameterizedKernelTransformation, self).__init__(image_size, diffeomorphic, dtype, device)
self.rnd_grid_params = rnd_grid_params
def get_coarse_field(self,
grid_shape,
max_displacement,
num_locked_borders,
):
coarse_field = th.rand(self._dim, *grid_shape) # [0, 1)
coarse_field -= 0.5 # [-0.5, 0.5)
coarse_field *= 2 # [-1, 1]
for dimension in range(3):
# [-max_displacement, max_displacement)
coarse_field[dimension, ...] *= max_displacement[dimension]
# Set displacement to 0 at the borders
for i in range(num_locked_borders):
coarse_field[:, i, :] = 0
coarse_field[:, -1 - i, :] = 0
coarse_field[:, :, i] = 0
coarse_field[:, :, -1 - i] = 0
return coarse_field.unsqueeze(0)
def _initialize(self):
cp_grid = np.ceil(np.divide(self._image_size, self._stride)).astype(dtype=int)
# new image size after convolution
inner_image_size = np.multiply(self._stride, cp_grid) - (self._stride - 1)
# add one control point at each side
cp_grid = cp_grid + 2
# image size with additional control points
new_image_size = np.multiply(self._stride, cp_grid) - (self._stride - 1)
# center image between control points
image_size_diff = inner_image_size - self._image_size
image_size_diff_floor = np.floor((np.abs(image_size_diff)/2))*np.sign(image_size_diff)
self._crop_start = image_size_diff_floor + np.remainder(image_size_diff, 2)*np.sign(image_size_diff)
self._crop_end = image_size_diff_floor
# create transformation parameters
if self.rnd_grid_params is None:
cp_grid = [1, self._dim] + cp_grid.tolist()
self.trans_parameters = Parameter(th.Tensor(*cp_grid))
self.trans_parameters.data.fill_(0)
else:
self.trans_parameters = Parameter(self.get_coarse_field(cp_grid, self.rnd_grid_params['max_displacement'], self.rnd_grid_params['num_locked_borders']))
# copy to gpu if needed
self.to(dtype=self._dtype, device=self._device)
# convert to integer
self._padding = self._padding.astype(dtype=int).tolist()
self._stride = self._stride.astype(dtype=int).tolist()
self._crop_start = self._crop_start.astype(dtype=int)
self._crop_end = self._crop_end.astype(dtype=int)
size = [1, 1] + new_image_size.astype(dtype=int).tolist()
self._displacement_tmp = th.empty(*size, dtype=self._dtype, device=self._device)
size = [1, 1] + self._image_size.astype(dtype=int).tolist()
self._displacement = th.empty(*size, dtype=self._dtype, device=self._device)
"""
bspline kernel transformation
"""
class ParameterizedBsplineTransformation(_ParameterizedKernelTransformation):
def __init__(self, image_size, sigma, rnd_grid_params=None, diffeomorphic=False, order=2, dtype=th.float32, device='cpu'):
super(ParameterizedBsplineTransformation, self).__init__(image_size, rnd_grid_params, diffeomorphic, dtype, device)
self._stride = np.array(sigma)
# compute bspline kernel
self._kernel = utils.bspline_kernel(sigma, dim=self._dim, order=order, asTensor=True, dtype=dtype)
self._padding = (np.array(self._kernel.size()) - 1) / 2
self._kernel.unsqueeze_(0).unsqueeze_(0)
self._kernel = self._kernel.expand(self._dim, *((np.ones(self._dim + 1, dtype=int)*-1).tolist()))
self._kernel = self._kernel.to(dtype=dtype, device=self._device)
self._initialize()
class RandomElasticDeformation(nn.Module):
def __init__(
self,
num_control_points: Union[int, Tuple[int, int, int]] = 7,
max_displacement: Union[float, Tuple[float, float, float]] = 7.5,
locked_borders: int = 2,
):
super().__init__()
self.num_control_points = to_tuple(num_control_points, length=3)
self.parse_control_points(self.num_control_points)
self.max_displacement = to_tuple(max_displacement, length=3)
self.parse_max_displacement(self.max_displacement)
self.num_locked_borders = locked_borders
if locked_borders not in (0, 1, 2):
raise ValueError('locked_borders must be 0, 1, or 2')
if locked_borders == 2 and 4 in self.num_control_points:
message = (
'Setting locked_borders to 2 and using less than 5 control'
'points results in an identity transform. Lock fewer borders'
' or use more control points.'
)
raise ValueError(message)
self.bspline_params = {'max_displacement':self.max_displacement, 'num_locked_borders':self.num_locked_borders}
@staticmethod
def parse_control_points(
num_control_points: Tuple[int, int, int],
) -> None:
for axis, number in enumerate(num_control_points):
if not isinstance(number, int) or number < 4:
message = (
f'The number of control points for axis {axis} must be'
f' an integer greater than 3, not {number}'
)
raise ValueError(message)
@staticmethod
def parse_max_displacement(
max_displacement: Tuple[float, float, float],
) -> None:
for axis, number in enumerate(max_displacement):
if not isinstance(number, Number) or number < 0:
message = (
'The maximum displacement at each control point'
f' for axis {axis} must be'
f' a number greater or equal to 0, not {number}'
)
raise ValueError(message)
"""
Images: shape of [N,D,H,W] or [N,H,W]
"""
def forward(self, images):
bspline_transform = ParameterizedBsplineTransformation(images.size()[2:], #ignore batch and channel dim
sigma=self.num_control_points,
rnd_grid_params=self.bspline_params,
diffeomorphic=True,
order=SPLINE_ORDER,
device=images.device)
displacement = bspline_transform.get_displacement()
inv_displacement = bspline_transform.get_inverse_displacement()
warped_images = warp_image(images, displacement, multi=True)
return warped_images, displacement, inv_displacement | 9,147 | 40.022422 | 163 | py |
DDoS | DDoS-master/utils/datasets_dyn.py | # from __future__ import self.logger.debug_function, division
import fnmatch
import glob
import os
import sys
from random import randint, random, seed
import nibabel
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
import torchvision.transforms as transforms
from torch.utils.data import Dataset
from utils.customutils import createCenterRatioMask, performUndersampling
__author__ = "Soumick Chatterjee"
__copyright__ = "Copyright 2022, Faculty of Computer Science, Otto von Guericke University Magdeburg, Germany"
__credits__ = ["Soumick Chatterjee", "Chompunuch Sarasaen"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Soumick Chatterjee"
__email__ = "soumick.chatterjee@ovgu.de"
__status__ = "Production"
torch.manual_seed(2020)
np.random.seed(2020)
seed(2020)
class SRDataset(Dataset):
def __init__(self,logger, patch_size, dir_path, label_dir_path, stride_depth=16, stride_length=32, stride_width=32,
Size=4000, fly_under_percent=None, patch_size_us=None, return_coords=False, pad_patch=True, pre_interpolate=None, norm_data=True, pre_load=False, dyn=True, noncumulative=False):
self.patch_size = patch_size #-1 = full vol
self.stride_depth = stride_depth
self.stride_length = stride_length
self.stride_width = stride_width
self.size = Size
self.logger = logger
self.fly_under_percent = fly_under_percent #if None, then use already undersampled data. Gets priority over patch_size_us. They are both mutually exclusive
self.return_coords = return_coords
self.pad_patch = pad_patch
self.pre_interpolate = pre_interpolate
if patch_size == patch_size_us:
patch_size_us = None
if patch_size!=-1 and patch_size_us is not None:
stride_length_us = stride_length // (patch_size//patch_size_us)
stride_width_us = stride_width // (patch_size//patch_size_us)
self.stride_length_us = stride_length_us
self.stride_width_us = stride_width_us
elif patch_size==-1:
patch_size_us = None
if self.fly_under_percent is not None:
patch_size_us = None
self.patch_size_us = patch_size_us #If already downsampled data is supplied, then this can be used. Calculate already based on the downsampling size.
self.norm_data = norm_data
self.pre_load = pre_load
self.dyn = dyn
self.noncumulative = noncumulative
self.pre_loaded_lbl = {}
self.pre_loaded_img = {}
if not self.norm_data:
print("No Norm") #TODO remove
# Constants
self.IMAGE_FILE_NAME = "imageFilename"
self.IMAGE_FILE_SHAPE = "imageFileShape"
self.IMAGE_FILE_MAXVAL = "imageFileMaxVal"
self.LABEL_FILE_NAME = "labelFilename"
self.LABEL_FILE_SHAPE = "labelFileShape"
self.LABEL_FILE_MAXVAL = "labelFileMaxVal"
self.LABEL_PREV_FILE_NAME = "labelPrevFilename"
self.LABEL_PREV_FILE_SHAPE = "labelPrevFileShape"
self.LABEL_PREV_FILE_MAXVAL = "labelPrevFileMaxVal"
self.STARTINDEX_DEPTH = "startIndex_depth"
self.STARTINDEX_LENGTH = "startIndex_length"
self.STARTINDEX_WIDTH = "startIndex_width"
self.STARTINDEX_DEPTH_US = "startIndex_depth_us"
self.STARTINDEX_LENGTH_US = "startIndex_length_us"
self.STARTINDEX_WIDTH_US = "startIndex_width_us"
self.trans = transforms.ToTensor() # used to convert tiffimagefile to tensor
dataDict = { self.IMAGE_FILE_NAME: [], self.IMAGE_FILE_SHAPE: [], self.IMAGE_FILE_MAXVAL:[], self.LABEL_FILE_NAME: [], self.LABEL_FILE_SHAPE: [], self.LABEL_FILE_MAXVAL:[], self.STARTINDEX_DEPTH: [],self.STARTINDEX_LENGTH: [],self.STARTINDEX_WIDTH: [],
self.STARTINDEX_DEPTH_US: [],self.STARTINDEX_LENGTH_US: [],self.STARTINDEX_WIDTH_US: []}
column_names = [ self.IMAGE_FILE_NAME, self.IMAGE_FILE_SHAPE, self.IMAGE_FILE_MAXVAL, self.LABEL_FILE_NAME, self.LABEL_FILE_SHAPE, self.LABEL_FILE_MAXVAL, self.STARTINDEX_DEPTH, self.STARTINDEX_LENGTH,self.STARTINDEX_WIDTH,
self.STARTINDEX_DEPTH_US, self.STARTINDEX_LENGTH_US,self.STARTINDEX_WIDTH_US]
self.data = pd.DataFrame(columns=column_names)
files_us = glob.glob(dir_path+'/**/*.nii', recursive = True)
files_us += glob.glob(dir_path+'/**/*.nii.gz', recursive = True)
for imageFileName in files_us:
labelFileName = imageFileName.replace(dir_path[:-1], label_dir_path[:-1]) #[:-1] is needed to remove the trailing slash for shitty windows
if imageFileName == labelFileName:
sys.exit('Input and Output save file')
if not(os.path.isfile(imageFileName) and os.path.isfile(labelFileName)):
#trick to include the other file extension
if labelFileName.endswith('.nii.nii.gz'):
labelFileName = labelFileName.replace('.nii.nii.gz', '.nii.gz')
elif labelFileName.endswith('.nii.gz'):
labelFileName = labelFileName.replace('.nii.gz', '.nii')
else:
labelFileName = labelFileName.replace('.nii', '.nii.gz')
#check again, after replacing the file extension
if not(os.path.isfile(imageFileName) and os.path.isfile(labelFileName)):
self.logger.debug("skipping file as label for the corresponding image doesn't exist :"+ str(imageFileName))
continue
imageFile = nibabel.load(imageFileName) # shape (Length X Width X Depth X Channels)
header_shape_us = imageFile.header.get_data_shape()
imageFile_data = imageFile.get_data()
imageFile_max = imageFile_data.max()
labelFile = nibabel.load(labelFileName) # shape (Length X Width X Depth X Channels) - changed to label file name as input image can have different (lower) size
header_shape = labelFile.header.get_data_shape()
labelFile_data = labelFile.get_data()
labelFile_max = labelFile_data.max()
self.logger.debug(header_shape)
n_depth,n_length,n_width = header_shape[2],header_shape[0],header_shape[1] # gives depth which is no. of slices
n_depth_us,n_length_us,n_width_us = header_shape_us[2],header_shape_us[0],header_shape_us[1] # gives depth which is no. of slices
if self.pre_load:
self.pre_loaded_img[imageFileName] = imageFile_data
self.pre_loaded_lbl[labelFileName] = labelFile_data
if patch_size!=1 and (n_depth<patch_size or n_length<patch_size or n_width<patch_size):
self.logger.debug("skipping file because of its size being less than the patch size :"+ str(imageFileName))
continue
############ Following the fully sampled size
if patch_size != -1:
depth_i =0
ranger_depth = int((n_depth-patch_size)/stride_depth)+1
for depth_index in range(ranger_depth if n_depth%patch_size==0 else ranger_depth+1): # iterate through the whole image voxel, and extract patch
length_i = 0
# self.logger.debug("depth")
# self.logger.debug(depth_i)
ranger_length = int((n_length-patch_size)/stride_length)+1
for length_index in range(ranger_length if n_length%patch_size==0 else ranger_length+1):
width_i = 0
# self.logger.debug("length")
# self.logger.debug(length_i)
ranger_width = int((n_width - patch_size)/stride_width)+1
for width_index in range(ranger_width if n_width%patch_size==0 else ranger_width+1):
# self.logger.debug("width")
# self.logger.debug(width_i)
dataDict[self.IMAGE_FILE_NAME].append(imageFileName)
dataDict[self.IMAGE_FILE_SHAPE].append(header_shape_us)
dataDict[self.IMAGE_FILE_MAXVAL].append(imageFile_max)
dataDict[self.LABEL_FILE_NAME].append(labelFileName)
dataDict[self.LABEL_FILE_SHAPE].append(header_shape)
dataDict[self.LABEL_FILE_MAXVAL].append(labelFile_max)
dataDict[self.STARTINDEX_DEPTH].append(depth_i)
dataDict[self.STARTINDEX_LENGTH].append(length_i)
dataDict[self.STARTINDEX_WIDTH].append(width_i)
if patch_size_us is None: #data is zero padded
dataDict[self.STARTINDEX_DEPTH_US].append(depth_i)
dataDict[self.STARTINDEX_LENGTH_US].append(length_i)
dataDict[self.STARTINDEX_WIDTH_US].append(width_i)
width_i += stride_width
length_i += stride_length
depth_i += stride_depth
else:
dataDict[self.IMAGE_FILE_NAME].append(imageFileName)
dataDict[self.IMAGE_FILE_SHAPE].append(header_shape_us)
dataDict[self.IMAGE_FILE_MAXVAL].append(imageFile_max)
dataDict[self.LABEL_FILE_NAME].append(labelFileName)
dataDict[self.LABEL_FILE_SHAPE].append(header_shape)
dataDict[self.LABEL_FILE_MAXVAL].append(labelFile_max)
dataDict[self.STARTINDEX_DEPTH].append(0)
dataDict[self.STARTINDEX_LENGTH].append(0)
dataDict[self.STARTINDEX_WIDTH].append(0)
dataDict[self.STARTINDEX_DEPTH_US].append(0)
dataDict[self.STARTINDEX_LENGTH_US].append(0)
dataDict[self.STARTINDEX_WIDTH_US].append(0)
############ Following the undersampled size, only if patch_size_us has been provied
if patch_size_us is not None:
depth_i =0
ranger_depth = int((n_depth_us-patch_size_us)/stride_depth)+1
for depth_index in range(ranger_depth if n_depth_us%patch_size_us==0 else ranger_depth+1): # iterate through the whole image voxel, and extract patch
length_i = 0
# self.logger.debug("depth")
# self.logger.debug(depth_i)
ranger_length = int((n_length_us-patch_size_us)/stride_length_us)+1
for length_index in range(ranger_length if n_length_us%patch_size_us==0 else ranger_length+1):
width_i = 0
# self.logger.debug("length")
# self.logger.debug(length_i)
ranger_width = int((n_width_us - patch_size_us)/stride_width_us)+1
for width_index in range(ranger_width if n_width_us%patch_size_us==0 else ranger_width+1):
# self.logger.debug("width")
# self.logger.debug(width_i)
dataDict[self.STARTINDEX_DEPTH_US].append(depth_i)
dataDict[self.STARTINDEX_LENGTH_US].append(length_i)
dataDict[self.STARTINDEX_WIDTH_US].append(width_i)
width_i += stride_width_us
length_i += stride_length_us
depth_i += stride_depth
self.data = pd.DataFrame.from_dict(dataDict)
self.logger.debug(len(self.data))
if self.dyn:
inp_dicts, files_inp = self._process_TPs(files_us)
files_gt = glob.glob(label_dir_path+'/**/*.nii', recursive = True)
files_gt += glob.glob(label_dir_path+'/**/*.nii.gz', recursive = True)
gt_dicts, _ = self._process_TPs(files_gt)
tp_dicts = []
for filename in files_inp:
inp_files = [d for d in inp_dicts if filename in d['filename']]
gt_files = [d for d in gt_dicts if filename in d['filename']]
tps = list(set(dic["tp"] for dic in inp_files))
tp_prev = tps.pop(0)
for tp in tps:
# inp_tp_prev = [d for d in inp_files if tp_prev == d['tp']]
gt_tp_prev = [d for d in gt_files if tp_prev == d['tp']]
inp_tp = [d for d in inp_files if tp == d['tp']]
# gt_tp = [d for d in gt_files if tp == d['tp']]
tp_prev = tp if not self.noncumulative else tp_prev
gt_tp_prev_datum = self.data[self.data[self.LABEL_FILE_NAME] == gt_tp_prev[0]['path']]
tp_dict = {
self.LABEL_PREV_FILE_NAME: gt_tp_prev[0]['path'],
self.LABEL_PREV_FILE_MAXVAL: gt_tp_prev_datum[self.LABEL_FILE_MAXVAL].iloc[0],
self.LABEL_PREV_FILE_SHAPE: gt_tp_prev_datum[self.LABEL_FILE_SHAPE].iloc[0],
# "inp_tp_prev": inp_tp_prev[0]['path'],
# "gt": gt_tp[0]['path'],
"inp_tpkey": inp_tp[0]['path'],
"subject_filename": filename,
"tpID":tp
}
tp_dicts.append(tp_dict)
self.tp_data = pd.DataFrame.from_dict(tp_dicts)
self.data = pd.merge(self.tp_data, self.data, how="left", left_on="inp_tpkey", right_on=self.IMAGE_FILE_NAME)
if Size is not None and len(self.data) > Size:
self.logger.debug('Dataset is larger tham supplied size. Choosing s subset randomly of size '+str(Size))
self.data = self.data.sample(n = Size, replace = False, random_state=2020)
if patch_size!=-1 and fly_under_percent is not None:
self.mask = createCenterRatioMask(np.zeros((patch_size,patch_size,patch_size)), fly_under_percent)
def _process_TPs(self, files):
f_dicts = []
for f in files:
f_info = {"path": f}
f_parts = os.path.normpath(f).split(os.sep)
tp = fnmatch.filter(f_parts, "TP*")[0]
f_info["filename"] = "_".join(f_parts[f_parts.index(tp)+1:])
f_info["tp"] = int(tp[2:])
f_dicts.append(f_info)
f_dicts = sorted(f_dicts, key=lambda k: k['tp'])
filenames = list(set(dic["filename"] for dic in f_dicts))
return f_dicts, filenames
def __len__(self):
return len(self.data)
def __getitem__(self, index):
imageFile_max = self.data.iloc[index][self.IMAGE_FILE_MAXVAL]
labelFile_max = self.data.iloc[index][self.LABEL_FILE_MAXVAL]
if self.pre_load:
groundTruthImages = self.pre_loaded_lbl[self.data.iloc[index][self.LABEL_FILE_NAME]]
groundTruthImages_handler = groundTruthImages
else:
groundTruthImages = nibabel.load(self.data.iloc[index][self.LABEL_FILE_NAME])
groundTruthImages_handler = groundTruthImages.dataobj
startIndex_depth = self.data.iloc[index][self.STARTINDEX_DEPTH]
startIndex_length = self.data.iloc[index][self.STARTINDEX_LENGTH]
startIndex_width = self.data.iloc[index][self.STARTINDEX_WIDTH]
start_coords = [(startIndex_depth, startIndex_length, startIndex_width)]
if self.patch_size_us is not None:
startIndex_depth_us = self.data.iloc[index][self.STARTINDEX_DEPTH_US]
startIndex_length_us = self.data.iloc[index][self.STARTINDEX_LENGTH_US]
startIndex_width_us = self.data.iloc[index][self.STARTINDEX_WIDTH_US]
start_coords = start_coords + [(startIndex_depth_us, startIndex_length_us, startIndex_width_us)]
if self.patch_size != -1:
if len(groundTruthImages.shape) == 4: #don't know why, but an additional dim is noticed in some of the fully-sampled NIFTIs
target_voxel = groundTruthImages_handler[startIndex_length:startIndex_length+self.patch_size, startIndex_width:startIndex_width+self.patch_size, 0, startIndex_depth:startIndex_depth+self.patch_size]#.squeeze()
else:
target_voxel = groundTruthImages_handler[startIndex_length:startIndex_length+self.patch_size, startIndex_width:startIndex_width+self.patch_size, startIndex_depth:startIndex_depth+self.patch_size]#.squeeze()
else:
if len(groundTruthImages.shape) == 4: #don't know why, but an additional dim is noticed in some of the fully-sampled NIFTIs
target_voxel = groundTruthImages_handler[:, :, 0, :]#.squeeze()
else:
target_voxel = groundTruthImages_handler[...]#.squeeze()
if self.fly_under_percent is not None:
if self.patch_size != -1:
voxel = abs(performUndersampling(np.array(target_voxel).copy(), mask=self.mask, zeropad=False))
voxel = voxel[...,::2] #2 for 25% - harcoded. TODO fix it
else:
mask = createCenterRatioMask(target_voxel, self.fly_under_percent)
voxel = abs(performUndersampling(np.array(target_voxel).copy(), mask=mask, zeropad=False))
voxel = voxel[...,::2] #2 for 25% - harcoded. TODO fix it
else:
if self.pre_load:
images = self.pre_loaded_img[self.data.iloc[index][self.IMAGE_FILE_NAME]]
images_handler = images
else:
images = nibabel.load(self.data.iloc[index][self.IMAGE_FILE_NAME])
images_handler = images.dataobj
images = nibabel.load(self.data.iloc[index][self.IMAGE_FILE_NAME])
if self.patch_size_us is not None:
voxel = images_handler[startIndex_length_us:startIndex_length_us+self.patch_size_us, startIndex_width_us:startIndex_width_us+self.patch_size_us, startIndex_depth_us:startIndex_depth_us+self.patch_size]#.squeeze()
else:
if self.patch_size != -1 and self.pre_interpolate is None:
voxel = images_handler[startIndex_length:startIndex_length+self.patch_size, startIndex_width:startIndex_width+self.patch_size, startIndex_depth:startIndex_depth+self.patch_size]#.squeeze()
else:
voxel = images_handler[...]
target_slices = np.moveaxis(np.array(target_voxel), -1, 0).astype( np.float32) # get slices in range, convert to array, change axis of depth (because nibabel gives LXWXD, but we need in DXLXW)
slices = np.moveaxis(np.array(voxel),-1, 0).astype(np.float32) #get slices in range, convert to array, change axis of depth (because nibabel gives LXWXD, but we need in DXLXW)
patch = torch.from_numpy(slices)
# patch = patch/torch.max(patch)# normalisation
if self.pre_interpolate:
patch = F.interpolate(patch.unsqueeze(0).unsqueeze(0), size=tuple(np.roll(groundTruthImages.shape, 1)), mode=self.pre_interpolate, align_corners=False).squeeze()
if self.patch_size != -1:
patch = patch[startIndex_depth:startIndex_depth+self.patch_size, startIndex_length:startIndex_length+self.patch_size, startIndex_width:startIndex_width+self.patch_size]
if self.norm_data:
patch = patch/imageFile_max# normalisation
targetPatch = torch.from_numpy(target_slices)
# targetPatch = targetPatch/torch.max(targetPatch)
if self.norm_data:
targetPatch = targetPatch/labelFile_max
if self.dyn:
if self.pre_load:
prevTPImages = self.pre_loaded_lbl[self.data.iloc[index][self.LABEL_PREV_FILE_NAME]]
prevTPImages_handler = prevTPImages
else:
prevTPImages = nibabel.load(self.data.iloc[index][self.LABEL_PREV_FILE_NAME])
prevTPImages_handler = prevTPImages.dataobj
if self.patch_size != -1:
if len(prevTPImages.shape) == 4: #don't know why, but an additional dim is noticed in some of the fully-sampled NIFTIs
prevTP_voxel = prevTPImages_handler[startIndex_length:startIndex_length+self.patch_size, startIndex_width:startIndex_width+self.patch_size, 0, startIndex_depth:startIndex_depth+self.patch_size]#.squeeze()
else:
prevTP_voxel = prevTPImages_handler[startIndex_length:startIndex_length+self.patch_size, startIndex_width:startIndex_width+self.patch_size, startIndex_depth:startIndex_depth+self.patch_size]#.squeeze()
else:
if len(prevTPImages.shape) == 4: #don't know why, but an additional dim is noticed in some of the fully-sampled NIFTIs
prevTP_voxel = prevTPImages_handler[:, :, 0, :]#.squeeze()
else:
prevTP_voxel = prevTPImages_handler[...]#.squeeze()
prevTP_slices = np.moveaxis(np.array(prevTP_voxel), -1, 0).astype(np.float32)
prevTPPatch = torch.from_numpy(prevTP_slices)
# prevTPPatch = prevTPPatch/torch.max(prevTPPatch)
if self.norm_data:
prevTPPatch = prevTPPatch/self.data.iloc[index][self.LABEL_PREV_FILE_MAXVAL]
#to deal the patches which has smaller size
if self.pad_patch:
pad = ()
for dim in range(len(targetPatch.shape)):
pad_needed = self.patch_size - targetPatch.shape[dim]
pad_dim = (pad_needed//2, pad_needed-(pad_needed//2))
pad += pad_dim
if self.patch_size_us is None and self.fly_under_percent is None:
pad_us = pad
else:
pad_us = ()
if self.patch_size_us is None and self.fly_under_percent is not None:
real_patch_us = int(self.patch_size * (self.fly_under_percent*2)) #TODO: works for 25%, but not sure about others. Need to fix the logic
else:
real_patch_us = self.patch_size_us
for dim in range(len(patch.shape)):
pad_needed = real_patch_us - patch.shape[dim]
pad_dim = (pad_needed//2, pad_needed-(pad_needed//2))
pad_us += pad_dim
patch = F.pad(patch, pad_us[::-1]) #tuple has to be reveresed before using it for padding. As the tuple contains in DHW manner, and input is needed as WHD mannger
targetPatch = F.pad(targetPatch, pad[::-1])
if self.dyn:
prevTPPatch = F.pad(prevTPPatch, pad[::-1])
else:
pad = None
if self.dyn:
patch = torch.stack([prevTPPatch, patch])
else:
patch = patch.unsqueeze(0)
targetPatch = targetPatch.unsqueeze(0)
if self.return_coords is True:
lblfilename = self.data.iloc[index][self.LABEL_FILE_NAME]
return patch, targetPatch, np.array(start_coords), os.path.basename(os.path.dirname(lblfilename)) +"_"+os.path.basename(lblfilename), np.array([(self.data.iloc[index][self.LABEL_FILE_SHAPE]), (self.data.iloc[index][self.IMAGE_FILE_SHAPE])]), np.array(pad[::-1]) if pad is not None else -1
else:
return patch, targetPatch
# DATASET_FOLDER = "/nfs1/schatter/Chimp/data_3D_sr/"
# DATASET_FOLDER = r"S:\MEMoRIAL_SharedStorage_M1.2+4+7\Data\Skyra\unet_3D_sr"
# US_Folder = 'Center25Mask'
# patch_size=64
# import logging
# logger = logging.getLogger('x')
# traindataset = SRDataset(logger, patch_size, DATASET_FOLDER + '/usVal/' + US_Folder + '/', DATASET_FOLDER + '/hrVal/', stride_depth =64,
# stride_length=64, stride_width=64,Size =10, patch_size_us=None, return_coords=True)
# train_loader = torch.utils.data.DataLoader(traindataset, batch_size=8, shuffle=True)
# for epoch in range(3):
# for batch_index, (local_batch, local_labels) in enumerate(train_loader):
# self.logger.debug(str(epoch) + " "+ str(batch_index))
| 24,444 | 54.938215 | 300 | py |
DDoS | DDoS-master/utils/data.py | import fnmatch
import os
import random
from glob import glob
import numpy as np
import torch
import torchio as tio
from torchio.data.io import read_image
from .motion import MotionCorrupter
__author__ = "Soumick Chatterjee"
__copyright__ = "Copyright 2022, Faculty of Computer Science, Otto von Guericke University Magdeburg, Germany"
__credits__ = ["Soumick Chatterjee"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Soumick Chatterjee"
__email__ = "soumick.chatterjee@ovgu.de"
__status__ = "Production"
def create_trainDS(path, p=1, **kwargs):
files = glob(path+"/**/*.nii", recursive=True) + glob(path+"/**/*.nii.gz", recursive=True)
subjects = []
for file in files:
subjects.append(tio.Subject(
im=tio.ScalarImage(file),
filename=os.path.basename(file),
))
moco = MotionCorrupter(**kwargs)
transforms = [
tio.Lambda(moco.perform, p = p)
]
transform = tio.Compose(transforms)
subjects_dataset = tio.SubjectsDataset(subjects, transform=transform)
return subjects_dataset
def create_trainDS_precorrupt(path_gt, path_corrupt, p=1, norm_mode=0):
files = glob(path_gt+"/**/*.nii", recursive=True) + glob(path_gt+"/**/*.nii.gz", recursive=True)
subjects = []
for file in files:
subjects.append(tio.Subject(
im=tio.ScalarImage(file),
filename=os.path.basename(file),
))
transforms = [
ReadCorrupted(path_corrupt=path_corrupt, p=p, norm_mode=norm_mode)
]
transform = tio.Compose(transforms)
subjects_dataset = tio.SubjectsDataset(subjects, transform=transform)
return subjects_dataset
def createTIODS(path_gt, path_corrupt, is_infer=False, p=1, transforms = [], **kwargs):
files_gt = glob(path_gt+"/**/*.nii", recursive=True) + glob(path_gt+"/**/*.nii.gz", recursive=True)
if path_corrupt:
files_inp = glob(path_corrupt+"/**/*.nii", recursive=True) + glob(path_corrupt+"/**/*.nii.gz", recursive=True)
corruptFly = False
else:
files_inp = files_gt.copy()
corruptFly = True
subjects = []
for file in files_inp:
filename = os.path.basename(file)
gt_files = [f for f in files_gt if filename in f]
if len(gt_files) > 0:
gt_path = gt_files[0]
files_gt.remove(gt_path)
subjects.append(tio.Subject(
gt=tio.ScalarImage(gt_path),
inp=tio.ScalarImage(file),
filename=filename,
tag="CorruptNGT",
))
if corruptFly:
moco = MotionCorrupter(**kwargs)
transforms.append(tio.Lambda(moco.perform, p = p))
transform = tio.Compose(transforms)
subjects_dataset = tio.SubjectsDataset(subjects, transform=transform)
return subjects_dataset
def __process_TPs(files):
f_dicts = []
for f in files:
f_info = {"path": f}
f_parts = os.path.normpath(f).split(os.sep)
tp = fnmatch.filter(f_parts, "TP*")[0]
f_info["filename"] = "_".join(f_parts[f_parts.index(tp)+1:])
f_info["tp"] = int(tp[2:])
f_dicts.append(f_info)
f_dicts = sorted(f_dicts, key=lambda k: k['tp'])
filenames = list(set(dic["filename"] for dic in f_dicts))
return f_dicts, filenames
class ProcessTIOSubsTPs():
def __init__(self):
pass
def __call__(self, subject):
gt_tp_prev = subject['gt_tp_prev'][tio.DATA]
inp_tp = subject['inp'][tio.DATA]
subject["inp"][tio.DATA] = torch.cat([gt_tp_prev, inp_tp], dim=0)
return subject
def createTIODynDS(path_gt, path_corrupt, is_infer=False, p=1, transforms = [], **kwargs):
files_gt = glob(path_gt+"/**/*.nii", recursive=True) + glob(path_gt+"/**/*.nii.gz", recursive=True)
if path_corrupt:
files_inp = glob(path_corrupt+"/**/*.nii", recursive=True) + glob(path_corrupt+"/**/*.nii.gz", recursive=True)
corruptFly = False
else:
files_inp = files_gt.copy()
corruptFly = True
subjects = []
inp_dicts, files_inp = __process_TPs(files_inp)
gt_dicts, _ = __process_TPs(files_gt)
for filename in files_inp:
inp_files = [d for d in inp_dicts if filename in d['filename']]
gt_files = [d for d in gt_dicts if filename in d['filename']]
tps = list(set(dic["tp"] for dic in inp_files))
tp_prev = tps.pop(0)
for tp in tps:
inp_tp_prev = [d for d in inp_files if tp_prev == d['tp']]
gt_tp_prev = [d for d in gt_files if tp_prev == d['tp']]
inp_tp = [d for d in inp_files if tp == d['tp']]
gt_tp = [d for d in gt_files if tp == d['tp']]
tp_prev = tp
if len(gt_tp_prev) > 0 and len(gt_tp) > 0:
subjects.append(tio.Subject(
gt_tp_prev=tio.ScalarImage(gt_tp_prev[0]['path']),
inp_tp_prev=tio.ScalarImage(inp_tp_prev[0]['path']),
gt=tio.ScalarImage(gt_tp[0]['path']),
inp=tio.ScalarImage(inp_tp[0]['path']),
filename=filename,
tp=tp,
tag="CorruptNGT",
))
else:
print("Warning: Not Implemented if GT is missing. Skipping Sub-TP.")
continue
if corruptFly:
moco = MotionCorrupter(**kwargs)
transforms.append(tio.Lambda(moco.perform, p = p))
transforms.append(ProcessTIOSubsTPs())
transform = tio.Compose(transforms)
subjects_dataset = tio.SubjectsDataset(subjects, transform=transform)
return subjects_dataset
def create_patchDS(train_subs, val_subs, patch_size, patch_qlen, patch_per_vol, inference_strides):
train_queue = None
val_queue = None
if train_subs is not None:
sampler = tio.data.UniformSampler(patch_size)
train_queue = tio.Queue(
subjects_dataset=train_subs,
max_length=patch_qlen,
samples_per_volume=patch_per_vol,
sampler=sampler,
num_workers=0,
start_background=True
)
if val_subs is not None:
overlap = np.subtract(patch_size, inference_strides)
grid_samplers = []
for i in range(len(val_subs)):
grid_sampler = tio.inference.GridSampler(val_subs[i], patch_size, overlap)
grid_samplers.append(grid_sampler)
val_queue = torch.utils.data.ConcatDataset(grid_samplers)
return train_queue, val_queue
class ReadCorrupted(tio.transforms.Transform):
def __init__(self, path_corrupt, p=1, norm_mode=0):
super().__init__(p=p)
self.path_corrupt=path_corrupt
self.norm_mode = norm_mode
def apply_transform(self, subject):
corrupted_query = subject.filename.split(".")[0]+"*"
files = glob(self.path_corrupt+"/**/"+corrupted_query, recursive=True)
corrupt_path = files[random.randint(0, len(files)-1)]
transformed, _ = read_image(corrupt_path)
vol = subject['im'][tio.DATA].float()
transformed = transformed.float()
if self.norm_mode==1:
vol = vol/vol.max()
transformed = transformed/transformed.max()
elif self.norm_mode==2:
vol = (vol-vol.min())/(vol.max()-vol.min())
transformed = (transformed-transformed.min())/(transformed.max()-transformed.min())
subject['im'][tio.DATA] = torch.cat([vol,transformed], 0)
return subject
| 7,985 | 38.93 | 118 | py |
DDoS | DDoS-master/utils/interpnorm_vols.py | import os
import random
from glob import glob
import nibabel as nib
import numpy as np
import torch
import torch.nn.functional as F
from tqdm import tqdm
__author__ = "Soumick Chatterjee"
__copyright__ = "Copyright 2022, Faculty of Computer Science, Otto von Guericke University Magdeburg, Germany"
__credits__ = ["Soumick Chatterjee", "Chompunuch Sarasaen"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Soumick Chatterjee"
__email__ = "soumick.chatterjee@ovgu.de"
__status__ = "Production"
path_woZPad = "/project/schatter/Chimp/Data/usCHAOSWoT2/Center6p25MaskWoPad"
path_GT = "/project/schatter/Chimp/Data/hrCHAOS"
outpath_interpNorm = "/project/schatter/Chimp/Data/usCHAOSWoT2/Center6p25MaskWoPad_Tri_Norm"
outpath_GTNorm = "/project/schatter/Chimp/Data/hrCHAOS_Norm"
files = glob(path_woZPad+"/**/*.nii", recursive=True) + glob(path_woZPad+"/**/*.nii.gz", recursive=True)
files_gt = glob(path_GT+"/**/*.nii", recursive=True) + glob(path_GT+"/**/*.nii.gz", recursive=True)
def SaveNIFTI(data, path):
os.makedirs(os.path.split(path)[0], exist_ok=True)
nib.save(nib.Nifti1Image(data, np.eye(4)), path)
for file in tqdm(files):
filename = os.path.basename(file)
gt_files = [f for f in files_gt if filename in f]
gt_path = gt_files[0]
gt = nib.load(gt_path).dataobj[...]
gt_max = gt.max()
gt = (gt.astype(np.float32))/gt_max
SaveNIFTI(gt, gt_path.replace(path_GT, outpath_GTNorm))
images = nib.load(file).dataobj[...]
img_max = images.max()
images = torch.from_numpy(images.astype(np.float32))
images = F.interpolate(images.unsqueeze(0).unsqueeze(0), mode="trilinear", size=gt.shape).squeeze()
images = (images/img_max).numpy()
SaveNIFTI(images, file.replace(path_woZPad, outpath_interpNorm))
| 1,778 | 35.306122 | 110 | py |
DDoS | DDoS-master/utils/utilities.py | import os
from copy import deepcopy
from statistics import median
import random
import nibabel as nib
import numpy as np
import torch
import torch.nn.functional as F
import torchcomplex.nn.functional as cF
import torchio as tio
import torchvision.utils as vutils
from scipy import ndimage
import wandb
from pynufft import NUFFT
from sewar.full_ref import ssim as SSIM2DCalc
from sewar.full_ref import uqi as UQICalc
from skimage.metrics import (normalized_root_mse, peak_signal_noise_ratio,
structural_similarity)
from torchcomplex.utils.signaltools import resample
from tricorder.math.transforms.fourier import fftNc_np, ifftNc_np
from utils.elastic_transform import RandomElasticDeformation, warp_image
__author__ = "Soumick Chatterjee"
__copyright__ = "Copyright 2022, Faculty of Computer Science, Otto von Guericke University Magdeburg, Germany"
__credits__ = ["Soumick Chatterjee", "Chompunuch Sarasaen"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Soumick Chatterjee"
__email__ = "soumick.chatterjee@ovgu.de"
__status__ = "Production"
class Interpolator():
def __init__(self, mode=None):
if mode in ["sinc", "nearest", "linear", "bilinear", "bicubic", "trilinear", "area"]:
self.mode = mode
else:
self.mode = None
def perform_sinc(self, images, out_shape):
axes = np.argwhere(np.equal(images.shape[2:], out_shape) == False).squeeze(1) #2 dims for batch and channel
out_shape = [out_shape[i] for i in axes]
return resample(images, out_shape, axis=axes+2) #2 dims for batch and channel
def __call__(self, images, out_shape):
if self.mode is None:
return images
elif images.is_complex():
return cF.interpolate(images, size=out_shape, mode=self.mode)
elif self.mode == "sinc":
return self.perform_sinc(images, out_shape)
else:
return F.interpolate(images, size=out_shape, mode=self.mode)
def tensorboard_images(writer, inputs, outputs, targets, epoch, section='train'):
writer.add_image('{}/output'.format(section),
vutils.make_grid(outputs[0, 0, ...],
normalize=True,
scale_each=True),
epoch)
if inputs is not None:
writer.add_image('{}/input'.format(section),
vutils.make_grid(inputs[0, 0, ...],
normalize=True,
scale_each=True),
epoch)
if targets is not None:
writer.add_image('{}/target'.format(section),
vutils.make_grid(targets[0, 0, ...],
normalize=True,
scale_each=True),
epoch)
def SaveNIFTI(data, file_path):
"""Save a NIFTI file using given file path from an array
Using: NiBabel"""
if(np.iscomplex(data).any()):
data = abs(data)
nii = nib.Nifti1Image(data, np.eye(4))
nib.save(nii, file_path)
def sharpTP(vol, alpha=0.5):
filteredVOL = ndimage.gaussian_filter(vol, 1)
return vol + alpha * (vol - filteredVOL)
def applyDCS(output, fully, under_mask=None, missing_mask=None, mat=None, isCartesian=True, norm=True):
if norm:
fully /= fully.max()
output /= output.max()
if isCartesian:
fullyK = fftNc_np(fully, axes=(0,1))
underK = fullyK*under_mask
outK = fftNc_np(output, axes=(0,1))
missingK = outK*missing_mask
finalK = underK+missingK
return abs(ifftNc_np(finalK, axes=(0,1)))
else:
om = mat['om']
invom = mat['invom']
fullom = mat['fullom']
dcfFullRes = mat['dcfFullRes'].squeeze()
imageSize = fully.shape[0]
baseresolution = imageSize*2
interpolationSize4NUFFT = 6
NufftObjOM = NUFFT()
NufftObjInvOM = NUFFT()
NufftObjFullOM = NUFFT()
Nd = (baseresolution, baseresolution) # image size
Kd = (baseresolution*2, baseresolution*2) # k-space size - TODO: multiply back by 2
Jd = (interpolationSize4NUFFT, interpolationSize4NUFFT) # interpolation size
NufftObjOM.plan(om, Nd, Kd, Jd)
NufftObjInvOM.plan(invom, Nd, Kd, Jd)
NufftObjFullOM.plan(fullom, Nd, Kd, Jd)
for slc in range(fully.shape[-1]):
oversam_fully = np.zeros((baseresolution,baseresolution), dtype=fully.dtype)
oversam_fully[imageSize//2:imageSize+imageSize//2,imageSize//2:imageSize+imageSize//2] = fully[...,slc]
oversam_output = np.zeros((baseresolution,baseresolution), dtype=output.dtype)
oversam_output[imageSize//2:imageSize+imageSize//2,imageSize//2:imageSize+imageSize//2] = output[...,slc]
yUnder = NufftObjOM.forward(oversam_fully)
yMissing = NufftObjInvOM.forward(oversam_output)
yCorrected = np.concatenate((yUnder,yMissing))
yCorrected = np.multiply(dcfFullRes,yCorrected)
oversam_output_corrected = NufftObjFullOM.adjoint(yCorrected)
output_corrected = oversam_output_corrected[imageSize//2:imageSize+imageSize//2,imageSize//2:imageSize+imageSize//2]
output[...,slc] = abs(output_corrected).astype(fully.dtype)
return output
def process_DDoS_SRPrev(SRPrev, start_coords, patch_size, pad, lr_imgs):
for i in range(lr_imgs.shape[0]):
(startIndex_depth, startIndex_length, startIndex_width) = start_coords[i][0].numpy()
if patch_size != -1:
prevTP_voxel = SRPrev[startIndex_length:startIndex_length+patch_size, startIndex_width:startIndex_width+patch_size, startIndex_depth:startIndex_depth+patch_size]#.squeeze()
else:
prevTP_voxel = SRPrev[...]#.squeeze()
prevTP_slices = np.moveaxis(np.array(prevTP_voxel), -1, 0).astype(np.float32)
prevTPPatch = torch.from_numpy(prevTP_slices)
prevTPPatch = prevTPPatch/SRPrev.max()
lr_imgs[i,0] = F.pad(prevTPPatch, tuple(pad[i]))
return lr_imgs
def process_valBatch(batch):
inp = []
gt = []
gt_flag = []
for i in range(len(batch['tag'])):
gt_flag.append(True)
batch_tag = batch['tag'][i]
if batch_tag == "CorruptNGT" or batch_tag == "GTOnly":
inp.append(batch['inp'][tio.DATA][i,...])
gt.append(batch['gt'][tio.DATA][i,...])
elif batch_tag == "FlyCorrupt":
gt.append(batch['im'][tio.DATA][i,0,...].unsqueeze(1))
if batch['im'][tio.DATA].shape[1] == 2:
inp.append(batch['im'][tio.DATA][i,1,...].unsqueeze(1))
else: #Use motion free image
inp.append(deepcopy(gt[-1]))
elif batch_tag == "CorruptOnly":
inp.append(batch['inp'][tio.DATA][i,...])
gt.append(batch['inp'][tio.DATA][i,...])
gt_flag[-1] = False
inp = torch.stack(inp,dim=0)
gt = torch.stack(gt,dim=0)
return inp, gt, gt_flag
def getSSIM(gt, out, gt_flag=None, data_range=1):
if gt_flag is None:
gt_flag = np.ones(gt.shape[0])
vals = []
for i in range(gt.shape[0]):
if not bool(gt_flag[i]):
continue
for j in range(gt.shape[1]):
vals.append(structural_similarity(gt[i,j,...], out[i,j,...], data_range=data_range))
return median(vals)
def calc_metircs(gt, out, tag):
ssim, ssimMAP = structural_similarity(gt, out, data_range=1, full=True)
nrmse = normalized_root_mse(gt, out)
psnr = peak_signal_noise_ratio(gt, out, data_range=1)
uqi = UQICalc(gt, out)
metrics = {
"SSIM"+tag: ssim,
"NRMSE"+tag: nrmse,
"PSNR"+tag: psnr,
"UQI"+tag: uqi
}
return metrics, ssimMAP
def MinMax(data):
return (data-data.min())/(data.max()-data.min())
def convert_image(img, source, target):
"""
Convert an image from a source format to a target format.
:param img: image
:param source: source format, one of 'pil' (PIL image), '[0, 1]' or '[-1, 1]' (pixel value ranges)
:param target: target format, one of 'pil' (PIL image), '[0, 255]', '[0, 1]', '[-1, 1]' (pixel value ranges),
'imagenet-norm' (pixel values standardized by imagenet mean and std.),
'y-channel' (luminance channel Y in the YCbCr color format, used to calculate PSNR and SSIM)
:return: converted image
"""
assert source in {'pil', '[0, 1]', '[-1, 1]'}, "Cannot convert from source format %s!" % source
assert target in {'pil', '[0, 255]', '[0, 1]', '[-1, 1]', 'imagenet-norm',
'y-channel'}, "Cannot convert to target format %s!" % target
# Convert from source to [0, 1]
if source == 'pil':
img = FT.to_tensor(img)
elif source == '[0, 1]':
pass # already in [0, 1]
elif source == '[-1, 1]':
img = (img + 1.) / 2.
# Convert from [0, 1] to target
if target == 'pil':
img = FT.to_pil_image(img)
elif target == '[0, 255]':
img = 255. * img
elif target == '[0, 1]':
pass # already in [0, 1]
elif target == '[-1, 1]':
img = 2. * img - 1.
elif target == 'imagenet-norm':
if img.ndimension() == 3:
img = (img - imagenet_mean) / imagenet_std
elif img.ndimension() == 4:
img = (img - imagenet_mean_cuda) / imagenet_std_cuda
elif target == 'y-channel':
# Based on definitions at https://github.com/xinntao/BasicSR/wiki/Color-conversion-in-SR
# torch.dot() does not work the same way as numpy.dot()
# So, use torch.matmul() to find the dot product between the last dimension of an 4-D tensor and a 1-D tensor
img = torch.matmul(255. * img.permute(0, 2, 3, 1)[:, 4:-4, 4:-4, :], rgb_weights) / 255. + 16.
return img
class ResSaver():
def __init__(self, out_path, save_inp=False, save_out=True, analyse_out=True, do_norm=False):
self.out_path = out_path
self.save_inp = save_inp
self.do_norm = do_norm
self.save_out = save_out
self.analyse_out = analyse_out
def CalcNSave(self, out, inp, gt, outfolder, already_numpy=False):
outpath = os.path.join(self.out_path, outfolder)
os.makedirs(outpath, exist_ok=True)
if not already_numpy:
inp = inp.numpy()
out = out.numpy()
if self.save_out:
SaveNIFTI(out, os.path.join(outpath, "out.nii.gz"))
if self.save_inp:
SaveNIFTI(inp, os.path.join(outpath, "inp.nii.gz"))
if gt is not None:
if not already_numpy:
gt = gt.numpy()
if self.do_norm:
inp = convert_image(inp, source='[-1, 1]', target='[0, 1]') #inp/inp.max()
gt = convert_image(gt, source='[-1, 1]', target='[0, 1]') #gt/gt.max()
if self.analyse_out:
out = convert_image(out, source='[-1, 1]', target='[0, 1]') #out/out.max()
out_metrics, out_ssimMAP = calc_metircs(gt, out, tag="Out")
SaveNIFTI(out_ssimMAP, os.path.join(outpath, "ssimMAPOut.nii.gz"))
else:
out_metrics = {}
inp_metrics, inp_ssimMAP = calc_metircs(gt, inp, tag="Inp")
SaveNIFTI(inp_ssimMAP, os.path.join(outpath, "ssimMAPInp.nii.gz"))
metrics = {**out_metrics, **inp_metrics}
return metrics
#The WnB functions are here, but not been tested (even not finished)
def WnB_ArtefactLog_DS(run, datasets, meta={}, names = ["training", "validation", "test"], description="Train-Val(-Test) Split"):
raw_data = wandb.Artifact("DSSplit",
type="dataset",
description=description,
metadata={"sizes": [len(dataset) for dataset in datasets], **meta})
for name, dataset in zip(names, datasets):
with raw_data.new_file(name + ".npz", mode="wb") as file:
np.savez(file, ds=dataset)
run.log_artifact(raw_data)
def WnB_ReadArtefact_DS(run, tag="latest", names = ["training", "validation", "test"]):
raw_data_artifact = run.use_artifact('DSSplit:'+tag)
raw_dataset = raw_data_artifact.download()
datasets = []
for split in names:
raw_split = np.load(os.path.join(raw_dataset, split + ".npz"))['ds']
datasets.append(raw_split)
return datasets
def WnB_ArtefactLog_Model(run, model, config, description="MyModel"):
model_artifact = wandb.Artifact("Model",
type="model",
description=description,
metadata=dict(config))
model.save("initialized_model.keras")
model_artifact.add_file("initialized_model.keras")
wandb.save("initialized_model.keras")
run.log_artifact(model_artifact)
def WnB_ReadArtefact_Model(run, tag="latest"):
model_artifact = run.use_artifact('Model:'+tag)
model_dir = model_artifact.download()
model_path = os.path.join(model_dir, "initialized_model.keras")
model = keras.models.load_model(model_path)
model_config = model_artifact.metadata
return model, model_config
def deformOTF(input_batch):
elastic = RandomElasticDeformation(
num_control_points=random.choice([5, 6, 7]),
max_displacement=random.uniform(0.7, 2.0),
locked_borders=2
)
elastic.cuda()
input_batch_transformed, _, _ = elastic(input_batch)
input_batch_transformed = torch.nan_to_num(input_batch_transformed)
return input_batch_transformed #/ torch.amax(input_batch_transformed, dim=[1,2,3,4]) | 13,876 | 38.991354 | 184 | py |
DDoS | DDoS-master/utils/datasets.py | # from __future__ import self.logger.debug_function, division
import glob
import os
import sys
from random import randint, random, seed
import nibabel
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
import torchvision.transforms as transforms
from torch.utils.data import Dataset
from utils.customutils import createCenterRatioMask, performUndersampling
__author__ = "Soumick Chatterjee"
__copyright__ = "Copyright 2022, Faculty of Computer Science, Otto von Guericke University Magdeburg, Germany"
__credits__ = ["Soumick Chatterjee", "Chompunuch Sarasaen"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Soumick Chatterjee"
__email__ = "soumick.chatterjee@ovgu.de"
__status__ = "Production"
torch.manual_seed(2020)
np.random.seed(2020)
seed(2020)
class SRDataset(Dataset):
def __init__(self,logger, patch_size, dir_path, label_dir_path, stride_depth=16, stride_length=32, stride_width=32,
Size=4000, fly_under_percent=None, patch_size_us=None, return_coords=False, pad_patch=True, pre_interpolate=None, norm_data=True, pre_load=False):
self.patch_size = patch_size #-1 = full vol
self.stride_depth = stride_depth
self.stride_length = stride_length
self.stride_width = stride_width
self.size = Size
self.logger = logger
self.fly_under_percent = fly_under_percent #if None, then use already undersampled data. Gets priority over patch_size_us. They are both mutually exclusive
self.return_coords = return_coords
self.pad_patch = pad_patch
self.pre_interpolate = pre_interpolate
if patch_size == patch_size_us:
patch_size_us = None
if patch_size!=-1 and patch_size_us is not None:
stride_length_us = stride_length // (patch_size//patch_size_us)
stride_width_us = stride_width // (patch_size//patch_size_us)
self.stride_length_us = stride_length_us
self.stride_width_us = stride_width_us
elif patch_size==-1:
patch_size_us = None
if self.fly_under_percent is not None:
patch_size_us = None
self.patch_size_us = patch_size_us #If already downsampled data is supplied, then this can be used. Calculate already based on the downsampling size.
self.norm_data = norm_data
self.pre_load = pre_load
self.pre_loaded_lbl = {}
self.pre_loaded_img = {}
if not self.norm_data:
print("No Norm") #TODO remove
# Constants
self.IMAGE_FILE_NAME = "imageFilename"
self.IMAGE_FILE_SHAPE = "imageFileShape"
self.IMAGE_FILE_MAXVAL = "imageFileMaxVal"
self.LABEL_FILE_NAME = "labelFilename"
self.LABEL_FILE_SHAPE = "labelFileShape"
self.LABEL_FILE_MAXVAL = "labelFileMaxVal"
self.STARTINDEX_DEPTH = "startIndex_depth"
self.STARTINDEX_LENGTH = "startIndex_length"
self.STARTINDEX_WIDTH = "startIndex_width"
self.STARTINDEX_DEPTH_US = "startIndex_depth_us"
self.STARTINDEX_LENGTH_US = "startIndex_length_us"
self.STARTINDEX_WIDTH_US = "startIndex_width_us"
self.trans = transforms.ToTensor() # used to convert tiffimagefile to tensor
dataDict = { self.IMAGE_FILE_NAME: [], self.IMAGE_FILE_SHAPE: [], self.IMAGE_FILE_MAXVAL:[], self.LABEL_FILE_NAME: [], self.LABEL_FILE_SHAPE: [], self.LABEL_FILE_MAXVAL:[], self.STARTINDEX_DEPTH: [],self.STARTINDEX_LENGTH: [],self.STARTINDEX_WIDTH: [],
self.STARTINDEX_DEPTH_US: [],self.STARTINDEX_LENGTH_US: [],self.STARTINDEX_WIDTH_US: []}
column_names = [ self.IMAGE_FILE_NAME, self.IMAGE_FILE_SHAPE, self.IMAGE_FILE_MAXVAL, self.LABEL_FILE_NAME, self.LABEL_FILE_SHAPE, self.LABEL_FILE_MAXVAL, self.STARTINDEX_DEPTH, self.STARTINDEX_LENGTH,self.STARTINDEX_WIDTH,
self.STARTINDEX_DEPTH_US, self.STARTINDEX_LENGTH_US,self.STARTINDEX_WIDTH_US]
self.data = pd.DataFrame(columns=column_names)
files_us = glob.glob(dir_path+'/**/*.nii', recursive = True)
files_us += glob.glob(dir_path+'/**/*.nii.gz', recursive = True)
for imageFileName in files_us:
labelFileName = imageFileName.replace(dir_path[:-1], label_dir_path[:-1]) #[:-1] is needed to remove the trailing slash for shitty windows
if imageFileName == labelFileName:
sys.exit('Input and Output save file')
if not(os.path.isfile(imageFileName) and os.path.isfile(labelFileName)):
#trick to include the other file extension
if labelFileName.endswith('.nii.nii.gz'):
labelFileName = labelFileName.replace('.nii.nii.gz', '.nii.gz')
elif labelFileName.endswith('.nii.gz'):
labelFileName = labelFileName.replace('.nii.gz', '.nii')
else:
labelFileName = labelFileName.replace('.nii', '.nii.gz')
#check again, after replacing the file extension
if not(os.path.isfile(imageFileName) and os.path.isfile(labelFileName)):
self.logger.debug("skipping file as label for the corresponding image doesn't exist :"+ str(imageFileName))
continue
imageFile = nibabel.load(imageFileName) # shape (Length X Width X Depth X Channels)
header_shape_us = imageFile.header.get_data_shape()
imageFile_data = imageFile.get_data()
imageFile_max = imageFile_data.max()
labelFile = nibabel.load(labelFileName) # shape (Length X Width X Depth X Channels) - changed to label file name as input image can have different (lower) size
header_shape = labelFile.header.get_data_shape()
labelFile_data = labelFile.get_data()
labelFile_max = labelFile_data.max()
self.logger.debug(header_shape)
n_depth,n_length,n_width = header_shape[2],header_shape[0],header_shape[1] # gives depth which is no. of slices
n_depth_us,n_length_us,n_width_us = header_shape_us[2],header_shape_us[0],header_shape_us[1] # gives depth which is no. of slices
if self.pre_load:
self.pre_loaded_img[imageFileName] = imageFile_data
self.pre_loaded_lbl[labelFileName] = labelFile_data
if patch_size!=1 and (n_depth<patch_size or n_length<patch_size or n_width<patch_size):
self.logger.debug("skipping file because of its size being less than the patch size :"+ str(imageFileName))
continue
############ Following the fully sampled size
if patch_size != -1:
depth_i =0
ranger_depth = int((n_depth-patch_size)/stride_depth)+1
for depth_index in range(ranger_depth if n_depth%patch_size==0 else ranger_depth+1): # iterate through the whole image voxel, and extract patch
length_i = 0
# self.logger.debug("depth")
# self.logger.debug(depth_i)
ranger_length = int((n_length-patch_size)/stride_length)+1
for length_index in range(ranger_length if n_length%patch_size==0 else ranger_length+1):
width_i = 0
# self.logger.debug("length")
# self.logger.debug(length_i)
ranger_width = int((n_width - patch_size)/stride_width)+1
for width_index in range(ranger_width if n_width%patch_size==0 else ranger_width+1):
# self.logger.debug("width")
# self.logger.debug(width_i)
dataDict[self.IMAGE_FILE_NAME].append(imageFileName)
dataDict[self.IMAGE_FILE_SHAPE].append(header_shape_us)
dataDict[self.IMAGE_FILE_MAXVAL].append(imageFile_max)
dataDict[self.LABEL_FILE_NAME].append(labelFileName)
dataDict[self.LABEL_FILE_SHAPE].append(header_shape)
dataDict[self.LABEL_FILE_MAXVAL].append(labelFile_max)
dataDict[self.STARTINDEX_DEPTH].append(depth_i)
dataDict[self.STARTINDEX_LENGTH].append(length_i)
dataDict[self.STARTINDEX_WIDTH].append(width_i)
if patch_size_us is None: #data is zero padded
dataDict[self.STARTINDEX_DEPTH_US].append(depth_i)
dataDict[self.STARTINDEX_LENGTH_US].append(length_i)
dataDict[self.STARTINDEX_WIDTH_US].append(width_i)
width_i += stride_width
length_i += stride_length
depth_i += stride_depth
else:
dataDict[self.IMAGE_FILE_NAME].append(imageFileName)
dataDict[self.IMAGE_FILE_SHAPE].append(header_shape_us)
dataDict[self.IMAGE_FILE_MAXVAL].append(imageFile_max)
dataDict[self.LABEL_FILE_NAME].append(labelFileName)
dataDict[self.LABEL_FILE_SHAPE].append(header_shape)
dataDict[self.LABEL_FILE_MAXVAL].append(labelFile_max)
dataDict[self.STARTINDEX_DEPTH].append(0)
dataDict[self.STARTINDEX_LENGTH].append(0)
dataDict[self.STARTINDEX_WIDTH].append(0)
dataDict[self.STARTINDEX_DEPTH_US].append(0)
dataDict[self.STARTINDEX_LENGTH_US].append(0)
dataDict[self.STARTINDEX_WIDTH_US].append(0)
############ Following the undersampled size, only if patch_size_us has been provied
if patch_size_us is not None:
depth_i =0
ranger_depth = int((n_depth_us-patch_size_us)/stride_depth)+1
for depth_index in range(ranger_depth if n_depth_us%patch_size_us==0 else ranger_depth+1): # iterate through the whole image voxel, and extract patch
length_i = 0
# self.logger.debug("depth")
# self.logger.debug(depth_i)
ranger_length = int((n_length_us-patch_size_us)/stride_length_us)+1
for length_index in range(ranger_length if n_length_us%patch_size_us==0 else ranger_length+1):
width_i = 0
# self.logger.debug("length")
# self.logger.debug(length_i)
ranger_width = int((n_width_us - patch_size_us)/stride_width_us)+1
for width_index in range(ranger_width if n_width_us%patch_size_us==0 else ranger_width+1):
# self.logger.debug("width")
# self.logger.debug(width_i)
dataDict[self.STARTINDEX_DEPTH_US].append(depth_i)
dataDict[self.STARTINDEX_LENGTH_US].append(length_i)
dataDict[self.STARTINDEX_WIDTH_US].append(width_i)
width_i += stride_width_us
length_i += stride_length_us
depth_i += stride_depth
self.data = pd.DataFrame.from_dict(dataDict)
self.logger.debug(len(self.data))
if Size is not None and len(self.data) > Size:
self.logger.debug('Dataset is larger tham supplied size. Choosing s subset randomly of size '+str(Size))
self.data = self.data.sample(n = Size, replace = False, random_state=2020)
if patch_size!=-1 and fly_under_percent is not None:
self.mask = createCenterRatioMask(np.zeros((patch_size,patch_size,patch_size)), fly_under_percent)
def __len__(self):
return len(self.data)
def __getitem__(self, index):
'''
imageFilename: 0
imageFileShape: 1
imageFileMaxVal: 2
labelFilename: 3
labelFileShape: 4
labelFileMaxVal: 5
startIndex_depth : 6
startIndex_length : 7
startIndex_width : 8
startIndex_depth_us : 9
startIndex_length_us : 10
startIndex_width_us : 11
'''
imageFile_max = self.data.iloc[index, 2]
labelFile_max = self.data.iloc[index, 5]
if self.pre_load:
groundTruthImages = self.pre_loaded_lbl[self.data.iloc[index, 3]]
groundTruthImages_handler = groundTruthImages
else:
groundTruthImages = nibabel.load(self.data.iloc[index, 3])
groundTruthImages_handler = groundTruthImages.dataobj
startIndex_depth = self.data.iloc[index, 6]
startIndex_length = self.data.iloc[index, 7]
startIndex_width = self.data.iloc[index, 8]
start_coords = [(startIndex_depth, startIndex_length, startIndex_width)]
if self.patch_size_us is not None:
startIndex_depth_us = self.data.iloc[index, 9]
startIndex_length_us = self.data.iloc[index, 10]
startIndex_width_us = self.data.iloc[index, 11]
start_coords = start_coords + [(startIndex_depth_us, startIndex_length_us, startIndex_width_us)]
if self.patch_size != -1:
if len(groundTruthImages.shape) == 4: #don't know why, but an additional dim is noticed in some of the fully-sampled NIFTIs
target_voxel = groundTruthImages_handler[startIndex_length:startIndex_length+self.patch_size, startIndex_width:startIndex_width+self.patch_size, 0, startIndex_depth:startIndex_depth+self.patch_size]#.squeeze()
else:
target_voxel = groundTruthImages_handler[startIndex_length:startIndex_length+self.patch_size, startIndex_width:startIndex_width+self.patch_size, startIndex_depth:startIndex_depth+self.patch_size]#.squeeze()
else:
if len(groundTruthImages.shape) == 4: #don't know why, but an additional dim is noticed in some of the fully-sampled NIFTIs
target_voxel = groundTruthImages_handler[:, :, 0, :]#.squeeze()
else:
target_voxel = groundTruthImages_handler[...]#.squeeze()
if self.fly_under_percent is not None:
if self.patch_size != -1:
voxel = abs(performUndersampling(np.array(target_voxel).copy(), mask=self.mask, zeropad=False))
voxel = voxel[...,::2] #2 for 25% - harcoded. TODO fix it
else:
mask = createCenterRatioMask(target_voxel, self.fly_under_percent)
voxel = abs(performUndersampling(np.array(target_voxel).copy(), mask=mask, zeropad=False))
voxel = voxel[...,::2] #2 for 25% - harcoded. TODO fix it
else:
if self.pre_load:
images = self.pre_loaded_img[self.data.iloc[index, 0]]
images_handler = images
else:
images = nibabel.load(self.data.iloc[index, 0])
images_handler = images.dataobj
images = nibabel.load(self.data.iloc[index, 0])
if self.patch_size_us is not None:
voxel = images_handler[startIndex_length_us:startIndex_length_us+self.patch_size_us, startIndex_width_us:startIndex_width_us+self.patch_size_us, startIndex_depth_us:startIndex_depth_us+self.patch_size]#.squeeze()
else:
if self.patch_size != -1 and self.pre_interpolate is None:
voxel = images_handler[startIndex_length:startIndex_length+self.patch_size, startIndex_width:startIndex_width+self.patch_size, startIndex_depth:startIndex_depth+self.patch_size]#.squeeze()
else:
voxel = images_handler[...]
target_slices = np.moveaxis(np.array(target_voxel), -1, 0).astype( np.float32) # get slices in range, convert to array, change axis of depth (because nibabel gives LXWXD, but we need in DXLXW)
slices = np.moveaxis(np.array(voxel),-1, 0).astype(np.float32) #get slices in range, convert to array, change axis of depth (because nibabel gives LXWXD, but we need in DXLXW)
patch = torch.from_numpy(slices)
# patch = patch/torch.max(patch)# normalisation
if self.pre_interpolate:
patch = F.interpolate(patch.unsqueeze(0).unsqueeze(0), size=tuple(np.roll(groundTruthImages.shape, 1)), mode=self.pre_interpolate, align_corners=False).squeeze()
if self.patch_size != -1:
patch = patch[startIndex_depth:startIndex_depth+self.patch_size, startIndex_length:startIndex_length+self.patch_size, startIndex_width:startIndex_width+self.patch_size]
if self.norm_data:
patch = patch/imageFile_max# normalisation
targetPatch = torch.from_numpy(target_slices)
# targetPatch = targetPatch/torch.max(targetPatch)
if self.norm_data:
targetPatch = targetPatch/labelFile_max
#to deal the patches which has smaller size
if self.pad_patch:
pad = ()
for dim in range(len(targetPatch.shape)):
pad_needed = self.patch_size - targetPatch.shape[dim]
pad_dim = (pad_needed//2, pad_needed-(pad_needed//2))
pad += pad_dim
if self.patch_size_us is None and self.fly_under_percent is None:
pad_us = pad
else:
pad_us = ()
if self.patch_size_us is None and self.fly_under_percent is not None:
real_patch_us = int(self.patch_size * (self.fly_under_percent*2)) #TODO: works for 25%, but not sure about others. Need to fix the logic
else:
real_patch_us = self.patch_size_us
for dim in range(len(patch.shape)):
pad_needed = real_patch_us - patch.shape[dim]
pad_dim = (pad_needed//2, pad_needed-(pad_needed//2))
pad_us += pad_dim
patch = F.pad(patch, pad_us[::-1]) #tuple has to be reveresed before using it for padding. As the tuple contains in DHW manner, and input is needed as WHD mannger
targetPatch = F.pad(targetPatch, pad[::-1])
else:
pad = None
if self.return_coords is True:
return patch, targetPatch, np.array(start_coords), os.path.basename(self.data.iloc[index, 3]), np.array([(self.data.iloc[index, 4]), (self.data.iloc[index, 1])]), np.array(pad[::-1]) if pad is not None else -1
else:
return patch, targetPatch
# DATASET_FOLDER = "/nfs1/schatter/Chimp/data_3D_sr/"
# DATASET_FOLDER = r"S:\MEMoRIAL_SharedStorage_M1.2+4+7\Data\Skyra\unet_3D_sr"
# US_Folder = 'Center25Mask'
# patch_size=64
# import logging
# logger = logging.getLogger('x')
# traindataset = SRDataset(logger, patch_size, DATASET_FOLDER + '/usVal/' + US_Folder + '/', DATASET_FOLDER + '/hrVal/', stride_depth =64,
# stride_length=64, stride_width=64,Size =10, patch_size_us=None, return_coords=True)
# train_loader = torch.utils.data.DataLoader(traindataset, batch_size=8, shuffle=True)
# for epoch in range(3):
# for batch_index, (local_batch, local_labels) in enumerate(train_loader):
# self.logger.debug(str(epoch) + " "+ str(batch_index))
| 19,535 | 53.266667 | 261 | py |
DDoS | DDoS-master/utils/motion.py | import math
import multiprocessing.dummy as multiprocessing
import random
from collections import defaultdict
from typing import List
import numpy as np
import SimpleITK as sitk
import torch
import torchio as tio
from scipy.ndimage import affine_transform
from torchio.transforms import Motion, RandomMotion
from torchio.transforms.interpolation import Interpolation
# import multiprocessing
__author__ = "Soumick Chatterjee, Alessandro Sciarra"
__copyright__ = "Copyright 2022, Faculty of Computer Science, Otto von Guericke University Magdeburg, Germany"
__credits__ = ["Soumick Chatterjee", "Alessandro Sciarra"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Soumick Chatterjee"
__email__ = "soumick.chatterjee@ovgu.de"
__status__ = "Production"
class CustomMotion(Motion):
def __init__(self, noise_dir=2, **kargs):
super(CustomMotion, self).__init__(**kargs)
self.noise_dir = noise_dir
def add_artifact(
self,
image: sitk.Image,
transforms: List[sitk.Euler3DTransform],
times: np.ndarray,
interpolation: Interpolation,
):
images = self.resample_images(image, transforms, interpolation)
arrays = [sitk.GetArrayViewFromImage(im) for im in images]
arrays = [array.transpose() for array in arrays] # ITK to NumPy
spectra = [self.fourier_transform(array) for array in arrays]
self.sort_spectra(spectra, times)
result_spectrum = np.empty_like(spectra[0])
noise_dir = self.noise_dir
if noise_dir == -1:
noise_dir = random.randint(0,1)
last_index = result_spectrum.shape[noise_dir] #it can be 0, 1 or 2
indices = (last_index * times).astype(int).tolist()
indices.append(last_index)
ini = 0
for spectrum, fin in zip(spectra, indices):
if noise_dir == 0:
result_spectrum[..., ini:fin,:,:] = spectrum[..., ini:fin,:,:] #depending upon last_index value, move ini:fin left or right [at the end :,: for 0, : for 1, none for 2]
elif noise_dir == 1:
result_spectrum[..., ini:fin,:] = spectrum[..., ini:fin,:]
else: #original
result_spectrum[..., ini:fin] = spectrum[..., ini:fin]
ini = fin
result_image = np.real(self.inv_fourier_transform(result_spectrum))
return result_image.astype(np.float32)
class CustomRandomMotion(RandomMotion):
def __init__(self, noise_dir=2, **kwargs):
super(CustomRandomMotion, self).__init__(**kwargs)
self.noise_dir = noise_dir
def apply_transform(self, subject):
arguments = defaultdict(dict)
for name, image in self.get_images_dict(subject).items():
params = self.get_params(
self.degrees_range,
self.translation_range,
self.num_transforms,
is_2d=image.is_2d(),
)
times_params, degrees_params, translation_params = params
arguments['times'][name] = times_params
arguments['degrees'][name] = degrees_params
arguments['translation'][name] = translation_params
arguments['image_interpolation'][name] = self.image_interpolation
transform = CustomMotion(noise_dir=self.noise_dir,**self.add_include_exclude(arguments))
transformed = transform(subject)
return transformed
class RealityMotion():
def __init__(self, n_threads = 4, mu = 0, sigma = 0.1, random_sigma=True):
self.n_threads = n_threads
self.mu = mu
self.sigma = sigma
self.sigma_limit = sigma
self.random_sigma = random_sigma
def __perform_singlePE(self, idx):
rot_x = np.random.normal(self.mu, self.sigma, 1)*random.randint(-1,1)
rot_y = np.random.normal(self.mu, self.sigma, 1)*random.randint(-1,1)
rot_z = np.random.normal(self.mu, self.sigma, 1)*random.randint(-1,1)
tran_x = int(np.random.normal(self.mu, self.sigma, 1)*random.randint(-1,1))
tran_y = int(np.random.normal(self.mu, self.sigma, 1)*random.randint(-1,1))
tran_z = int(np.random.normal(self.mu, self.sigma, 1)*random.randint(-1,1))
temp_vol = self.__rot_tran_3d(self.in_vol, rot_x, rot_y, rot_z, tran_x, tran_y, tran_z)
temp_k = np.fft.fftn(temp_vol)
for slc in range(self.in_vol.shape[2]):
self.out_k[idx,:,slc]=temp_k[idx,:,slc]
def __call__(self, vol):
if self.random_sigma:
self.sigma = random.uniform(0, self.sigma_limit)
shape = vol.shape
device = vol.device
self.in_vol = vol.squeeze().cpu().numpy()
self.in_vol = self.in_vol/self.in_vol.max()
self.out_k = np.zeros((self.in_vol.shape)) + 0j
if self.n_threads > 0:
pool = multiprocessing.Pool(self.n_threads)
pool.map(self.__perform_singlePE, range(self.in_vol.shape[0]))
else:
for idx in range(self.in_vol.shape[0]):
self.__perform_singlePE(idx)
vol = np.abs(np.fft.ifftn(self.out_k))
vol = torch.from_numpy(vol).view(shape).to(device)
del self.in_vol, self.out_k
return vol
def __x_rotmat(self, theta):
cos_t = math.cos(theta)
sin_t = math.sin(theta)
return np.array([[1, 0, 0],
[0, cos_t, -sin_t],
[0, sin_t, cos_t]])
def __y_rotmat(self, theta):
cos_t = math.cos(theta)
sin_t = math.sin(theta)
return np.array([[cos_t, 0, sin_t],
[0, 1, 0],
[-sin_t, 0, cos_t]])
def __z_rotmat(self, theta):
cos_t = math.cos(theta)
sin_t = math.sin(theta)
return np.array([[cos_t, -sin_t, 0],
[sin_t, cos_t, 0],
[0, 0, 1]])
def __rot_tran_3d(self, J, rot_x, rot_y, rot_z, tran_x, tran_y, tran_z):
M = self.__x_rotmat(rot_x) * self.__y_rotmat(rot_y) * self.__z_rotmat(rot_z)
translation = ([tran_x, tran_y, tran_z])
K = affine_transform(J, M, translation, order=1)
return K/(K.max()+1e-16)
class MotionCorrupter():
def __init__(self, mode=0, degrees=10, translation=10, num_transforms=2, image_interpolation='linear', norm_mode=0, noise_dir=2, mu=0, sigma=0.1, random_sigma=False, n_threads=4):
self.mode = mode #0: TorchIO's version, 1: Custom direction specific motion
self.degrees = degrees
self.translation = translation
self.num_transforms = num_transforms
self.image_interpolation = image_interpolation
self.norm_mode = norm_mode #0: No Norm, 1: Divide by Max, 2: MinMax
self.noise_dir = noise_dir #0, 1 or 2 - which direction the motion is generated, only for custom random
self.mu = mu #Only for Reality Motion
self.sigma = sigma #Only for Reality Motion
self.random_sigma = random_sigma #Only for Reality Motion - to randomise the sigma value, treating the provided sigma as upper limit and 0 as lower
self.n_threads = n_threads #Only for Reality Motion - to apply motion for each thread encoding line parallel, max thread controlled by this. Set to 0 to perform serially.
if mode==0: #TorchIO's version
self.corrupter = tio.transforms.RandomMotion(degrees=degrees, translation=translation, num_transforms=num_transforms, image_interpolation=image_interpolation)
elif mode==1: #Custom Motion
self.corrupter = CustomRandomMotion(degrees=degrees, translation=translation, num_transforms=num_transforms, image_interpolation=image_interpolation, noise_dir=noise_dir)
elif mode==2: #Reality motion.
self.corrupter = RealityMotion(n_threads=n_threads, mu=mu, sigma=sigma, random_sigma=random_sigma)
def perform(self, vol):
vol = vol.float()
transformed = self.corrupter(vol)
if self.norm_mode==1:
vol = vol/vol.max()
transformed = transformed/transformed.max()
elif self.norm_mode==2:
vol = (vol-vol.min())/(vol.max()-vol.min())
transformed = (transformed-transformed.min())/(transformed.max()-transformed.min())
return torch.cat([vol,transformed], 0)
| 8,325 | 44.497268 | 183 | py |
DDoS | DDoS-master/utils/padding.py | #parital source: https://github.com/c22n/unet-pytorch
from typing import Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Function, Variable
from torch.nn.modules.utils import _ntuple
__author__ = "Soumick Chatterjee"
__copyright__ = "Copyright 2022, Soumick Chatterjee & OvGU:ESF:MEMoRIAL"
__credits__ = ["Soumick Chatterjee"]
__license__ = "GPL"
__version__ = "1.0.0"
__email__ = "soumick.chatterjee@ovgu.de"
__status__ = "Production"
def flip(x: Variable, dim: int) -> Variable:
"""Flip torch Variable along given dimension axis."""
xsize = x.size()
dim = x.dim() + dim if dim < 0 else dim
x = x.contiguous().view(-1, *xsize[dim:])
x = x.view(x.size(0), x.size(1), -1)[:,
getattr(torch.arange(x.size(1)-1, -1, -1),
('cpu', 'cuda')[x.is_cuda])().long(), :]
return x.view(xsize)
class ReflectionPad3d(nn.Module):
"""Wrapper for ReflectionPadNd function in 3 dimensions."""
def __init__(self, padding: Union[int, Tuple[int]]):
super(ReflectionPad3d, self).__init__()
self.padding = _ntuple(6)(padding)
def forward(self, input: Variable) -> Variable:
return ReflectionPadNd.apply(input, self.padding)
def __repr__(self) -> str:
return self.__class__.__name__ + '(' \
+ str(self.padding) + ')'
class ReflectionPadNd(Function):
"""Padding for same convolutional layer."""
# @staticmethod
# def symbolic(g, input: Variable, padding: Union[int, Tuple[int]]):
# paddings = prepare_onnx_paddings(len(input.type().sizes()), pad)
# return g.op("Pad", input, pads_i=paddings, mode_s="reflect")
@staticmethod
def forward(ctx: Function, input: Variable, pad: Tuple[int]) -> Variable:
ctx.pad = pad
ctx.input_size = input.size()
ctx.l_inp = len(input.size())
ctx.pad_tup = tuple([(a, b)
for a, b in zip(pad[:-1:2], pad[1::2])]
[::-1])
ctx.l_pad = len(ctx.pad_tup)
ctx.l_diff = ctx.l_inp - ctx.l_pad
assert ctx.l_inp >= ctx.l_pad
new_dim = tuple([sum((d,) + ctx.pad_tup[i])
for i, d in enumerate(input.size()[-ctx.l_pad:])])
assert all([d > 0 for d in new_dim]), 'input is too small'
# Create output tensor by concatenating with reflected chunks.
output = input.new(input.size()[:(ctx.l_diff)] + new_dim).zero_()
c_input = input
for i, p in zip(range(ctx.l_inp)[-ctx.l_pad:], ctx.pad_tup):
if p[0] > 0:
chunk1 = flip(c_input.narrow(i, 0, pad[0]), i)
c_input = torch.cat((chunk1, c_input), i)
if p[1] > 0:
chunk2 = flip(c_input.narrow(i, c_input.shape[i]-p[1], p[1]), i)
c_input = torch.cat((c_input, chunk2), i)
output.copy_(c_input)
return output
@staticmethod
def backward(ctx: Function, grad_output: Variable) -> Variable:
grad_input = Variable(grad_output.data.new(ctx.input_size).zero_())
grad_input_slices = [slice(0, x,) for x in ctx.input_size]
cg_output = grad_output
for i_s, p in zip(range(ctx.l_inp)[-ctx.l_pad:], ctx.pad_tup):
if p[0] > 0:
cg_output = cg_output.narrow(i_s, p[0],
cg_output.size(i_s) - p[0])
if p[1] > 0:
cg_output = cg_output.narrow(i_s, 0,
cg_output.size(i_s) - p[1])
gis = tuple(grad_input_slices)
grad_input[gis] = cg_output
return grad_input, None, None
| 3,703 | 36.04 | 80 | py |
DDoS | DDoS-master/utils/pLoss/Resnet2D.py | #!/usr/bin/env python
"""
Original file Resnet2Dv2b14 of NCC1701
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
#from utils.TorchAct.pelu import PELU_oneparam as PELU
__author__ = "Soumick Chatterjee"
__copyright__ = "Copyright 2018, Soumick Chatterjee & OvGU:ESF:MEMoRIAL"
__credits__ = ["Soumick Chatterjee"]
__license__ = "GPL"
__version__ = "1.0.0"
__email__ = "soumick.chatterjee@ovgu.de"
__status__ = "Production"
class ResidualBlock(nn.Module):
def __init__(self, in_features, relu, norm):
super(ResidualBlock, self).__init__()
conv_block = [ nn.ReflectionPad2d(1),
nn.Conv2d(in_features, in_features, 3),
norm(in_features),
relu(),
nn.Dropout2d(p=0.2),
nn.ReflectionPad2d(1),
nn.Conv2d(in_features, in_features, 3),
norm(in_features) ]
self.conv_block = nn.Sequential(*conv_block)
def forward(self, x):
return x + self.conv_block(x)
class ResNet(nn.Module):
def __init__(self, in_channels=1, out_channels=1, res_blocks=14, starting_n_features=64, updown_blocks=2, is_relu_leaky=True, final_out_sigmoid=True, do_batchnorm=True): #should use 14 as that gives number of trainable parameters close to number of possible pixel values in a image 256x256
super(ResNet, self).__init__()
if is_relu_leaky:
relu = nn.PReLU
else:
relu = nn.ReLU
if do_batchnorm:
norm = nn.BatchNorm2d
else:
norm = nn.InstanceNorm2d
# Initial convolution block
model = [ nn.ReflectionPad2d(3),
nn.Conv2d(in_channels, starting_n_features, 7),
norm(starting_n_features),
relu() ]
# Downsampling
in_features = starting_n_features
out_features = in_features*2
for _ in range(updown_blocks):
model += [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),
norm(out_features),
relu() ]
in_features = out_features
out_features = in_features*2
# Residual blocks
for _ in range(res_blocks):
model += [ResidualBlock(in_features, relu, norm)]
# Upsampling
out_features = in_features//2
for _ in range(updown_blocks):
model += [ nn.ConvTranspose2d(in_features, out_features, 3, stride=2, padding=1, output_padding=1),
norm(out_features),
relu() ]
in_features = out_features
out_features = in_features//2
# Output layer
model += [ nn.ReflectionPad2d(3),
nn.Conv2d(starting_n_features, out_channels, 7)]
#final activation
if final_out_sigmoid:
model += [ nn.Sigmoid(), ]
else:
model += [ relu(), ]
self.model = nn.Sequential(*model)
def forward(self, input):
return self.model(input)
| 3,150 | 31.484536 | 294 | py |
DDoS | DDoS-master/utils/pLoss/VesselSeg_UNet3d_DeepSup.py | # -*- coding: utf-8 -*-
"""
"""
# from __future__ import print_function, division
import torch
import torch.nn as nn
import torch.utils.data
#from Utils.wta import KWinnersTakeAll
__author__ = "Kartik Prabhu, Mahantesh Pattadkal, and Soumick Chatterjee"
__copyright__ = "Copyright 2022, Faculty of Computer Science, Otto von Guericke University Magdeburg, Germany"
__credits__ = ["Kartik Prabhu", "Mahantesh Pattadkal", "Soumick Chatterjee"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Soumick Chatterjee"
__email__ = "soumick.chatterjee@ovgu.de"
__status__ = "Production"
class conv_block(nn.Module):
"""
Convolution Block
"""
def __init__(self, in_channels, out_channels, k_size=3, stride=1, padding=1, bias=True):
super(conv_block, self).__init__()
self.conv = nn.Sequential(
nn.Conv3d(in_channels=in_channels, out_channels=out_channels, kernel_size=k_size,
stride=stride, padding=padding, bias=bias),
nn.BatchNorm3d(num_features=out_channels),
nn.ReLU(inplace=True),
nn.Conv3d(in_channels=out_channels, out_channels=out_channels, kernel_size=k_size,
stride=stride, padding=padding, bias=bias),
nn.BatchNorm3d(num_features=out_channels),
nn.ReLU(inplace=True)
)
def forward(self, x):
x = self.conv(x)
return x
class conv_block_v2(nn.Module):
"""
Convolution Block
With WTA
"""
def __init__(self, in_channels, out_channels, k_size=3, stride=1, padding=1, bias=True):
super(conv_block_v2, self).__init__()
self.conv = nn.Sequential(
nn.Conv3d(in_channels=in_channels, out_channels=out_channels, kernel_size=k_size,
stride=stride, padding=padding, bias=bias),
nn.BatchNorm3d(num_features=out_channels),
nn.ReLU(inplace=True),
nn.Conv3d(in_channels=out_channels, out_channels=out_channels, kernel_size=k_size,
stride=stride, padding=padding, bias=bias),
nn.BatchNorm3d(num_features=out_channels),
nn.ReLU(inplace=True),
#KWinnersTakeAll(0.02)
)
def forward(self, x):
x = self.conv(x)
return x
class up_conv(nn.Module):
"""
Up Convolution Block
"""
# def __init__(self, in_ch, out_ch):
def __init__(self, in_channels, out_channels, k_size=3, stride=1, padding=1, bias=True):
super(up_conv, self).__init__()
self.up = nn.Sequential(
nn.Upsample(scale_factor=2),
nn.Conv3d(in_channels=in_channels, out_channels=out_channels, kernel_size=k_size,
stride=stride, padding=padding, bias=bias),
nn.BatchNorm3d(num_features=out_channels),
nn.ReLU(inplace=True))
def forward(self, x):
x = self.up(x)
return x
class U_Net_DeepSup(nn.Module):
"""
UNet - Basic Implementation
Input _ [batch * channel(# of channels of each image) * depth(# of frames) * height * width].
Paper : https://arxiv.org/abs/1505.04597
"""
def __init__(self, in_ch=1, out_ch=1):
super(U_Net_DeepSup, self).__init__()
n1 = 64 #TODO: original paper starts with 64
filters = [n1, n1 * 2, n1 * 4, n1 * 8, n1 * 16] # 64,128,256,512,1024
self.Maxpool1 = nn.MaxPool3d(kernel_size=2, stride=2)
self.Maxpool2 = nn.MaxPool3d(kernel_size=2, stride=2)
self.Maxpool3 = nn.MaxPool3d(kernel_size=2, stride=2)
self.Maxpool4 = nn.MaxPool3d(kernel_size=2, stride=2)
self.Conv1 = conv_block(in_ch, filters[0])
self.Conv2 = conv_block(filters[0], filters[1])
self.Conv3 = conv_block(filters[1], filters[2])
self.Conv4 = conv_block(filters[2], filters[3])
self.Conv5 = conv_block(filters[3], filters[4])
#1x1x1 Convolution for Deep Supervision
self.Conv_d3 = conv_block(filters[1], 1)
self.Conv_d4 = conv_block(filters[2], 1)
self.Up5 = up_conv(filters[4], filters[3])
self.Up_conv5 = conv_block(filters[4], filters[3])
self.Up4 = up_conv(filters[3], filters[2])
self.Up_conv4 = conv_block(filters[3], filters[2])
self.Up3 = up_conv(filters[2], filters[1])
self.Up_conv3 = conv_block(filters[2], filters[1])
self.Up2 = up_conv(filters[1], filters[0])
self.Up_conv2 = conv_block(filters[1], filters[0])
self.Conv = nn.Conv3d(filters[0], out_ch, kernel_size=1, stride=1, padding=0)
# self.active = torch.nn.Sigmoid()
def forward(self, x):
# print("unet")
# print(x.shape)
# print(padded.shape)
e1 = self.Conv1(x)
# print("conv1:")
# print(e1.shape)
e2 = self.Maxpool1(e1)
e2 = self.Conv2(e2)
# print("conv2:")
# print(e2.shape)
e3 = self.Maxpool2(e2)
e3 = self.Conv3(e3)
# print("conv3:")
# print(e3.shape)
e4 = self.Maxpool3(e3)
e4 = self.Conv4(e4)
# print("conv4:")
# print(e4.shape)
e5 = self.Maxpool4(e4)
e5 = self.Conv5(e5)
# print("conv5:")
# print(e5.shape)
d5 = self.Up5(e5)
# print("d5:")
# print(d5.shape)
# print("e4:")
# print(e4.shape)
d5 = torch.cat((e4, d5), dim=1)
d5 = self.Up_conv5(d5)
# print("upconv5:")
# print(d5.size)
d4 = self.Up4(d5)
# print("d4:")
# print(d4.shape)
d4 = torch.cat((e3, d4), dim=1)
d4 = self.Up_conv4(d4)
d4_out = self.Conv_d4(d4)
# print("upconv4:")
# print(d4.shape)
d3 = self.Up3(d4)
d3 = torch.cat((e2, d3), dim=1)
d3 = self.Up_conv3(d3)
d3_out = self.Conv_d3(d3)
# print("upconv3:")
# print(d3.shape)
d2 = self.Up2(d3)
d2 = torch.cat((e1, d2), dim=1)
d2 = self.Up_conv2(d2)
# print("upconv2:")
# print(d2.shape)
out = self.Conv(d2)
# print("out:")
# print(out.shape)
# d1 = self.active(out)
return out, d3_out , d4_out
class U_Net_DeepSup_level4(nn.Module):
"""
UNet - Basic Implementation
Input _ [batch * channel(# of channels of each image) * depth(# of frames) * height * width].
Paper : https://arxiv.org/abs/1505.04597
"""
def __init__(self, in_ch=1, out_ch=1):
super(U_Net_DeepSup_level4, self).__init__()
n1 = 64 #TODO: original paper starts with 64
filters = [n1, n1 * 2, n1 * 4, n1 * 8, n1 * 16] # 64,128,256,512,1024
self.Maxpool1 = nn.MaxPool3d(kernel_size=2, stride=2)
self.Maxpool2 = nn.MaxPool3d(kernel_size=2, stride=2)
self.Maxpool3 = nn.MaxPool3d(kernel_size=2, stride=2)
self.Maxpool4 = nn.MaxPool3d(kernel_size=2, stride=2)
self.Conv1 = conv_block(in_ch, filters[0])
self.Conv2 = conv_block(filters[0], filters[1])
self.Conv3 = conv_block(filters[1], filters[2])
self.Conv4 = conv_block(filters[2], filters[3])
self.Conv5 = conv_block(filters[3], filters[4])
#1x1x1 Convolution for Deep Supervision
self.Conv_d3 = conv_block(filters[1], 1)
self.Conv_d4 = conv_block(filters[2], 1)
self.Conv_d5 = conv_block(filters[3], 1)
self.Up5 = up_conv(filters[4], filters[3])
self.Up_conv5 = conv_block(filters[4], filters[3])
self.Up4 = up_conv(filters[3], filters[2])
self.Up_conv4 = conv_block(filters[3], filters[2])
self.Up3 = up_conv(filters[2], filters[1])
self.Up_conv3 = conv_block(filters[2], filters[1])
self.Up2 = up_conv(filters[1], filters[0])
self.Up_conv2 = conv_block(filters[1], filters[0])
self.Conv = nn.Conv3d(filters[0], out_ch, kernel_size=1, stride=1, padding=0)
# self.active = torch.nn.Sigmoid()
def forward(self, x):
# print("unet")
# print(x.shape)
# print(padded.shape)
e1 = self.Conv1(x)
# print("conv1:")
# print(e1.shape)
e2 = self.Maxpool1(e1)
e2 = self.Conv2(e2)
# print("conv2:")
# print(e2.shape)
e3 = self.Maxpool2(e2)
e3 = self.Conv3(e3)
# print("conv3:")
# print(e3.shape)
e4 = self.Maxpool3(e3)
e4 = self.Conv4(e4)
# print("conv4:")
# print(e4.shape)
e5 = self.Maxpool4(e4)
e5 = self.Conv5(e5)
# print("conv5:")
# print(e5.shape)
d5 = self.Up5(e5)
# print("d5:")
# print(d5.shape)
# print("e4:")
# print(e4.shape)
d5 = torch.cat((e4, d5), dim=1)
d5 = self.Up_conv5(d5)
# print("upconv5:")
# print(d5.size)
d5_out = self.Conv_d5(d5)
d4 = self.Up4(d5)
# print("d4:")
# print(d4.shape)
d4 = torch.cat((e3, d4), dim=1)
d4 = self.Up_conv4(d4)
d4_out = self.Conv_d4(d4)
# print("upconv4:")
# print(d4.shape)
d3 = self.Up3(d4)
d3 = torch.cat((e2, d3), dim=1)
d3 = self.Up_conv3(d3)
d3_out = self.Conv_d3(d3)
# print("upconv3:")
# print(d3.shape)
d2 = self.Up2(d3)
d2 = torch.cat((e1, d2), dim=1)
d2 = self.Up_conv2(d2)
# print("upconv2:")
# print(d2.shape)
out = self.Conv(d2)
# print("out:")
# print(out.shape)
# d1 = self.active(out)
return [out, d3_out , d4_out , d5_out]
class U_Net_DeepSup_level4_wta(nn.Module):
"""
UNet - Basic Implementation
Input _ [batch * channel(# of channels of each image) * depth(# of frames) * height * width].
Paper : https://arxiv.org/abs/1505.04597
"""
def __init__(self, in_ch=1, out_ch=1):
super(U_Net_DeepSup_level4_wta, self).__init__()
n1 = 64 # TODO: original paper starts with 64
filters = [n1, n1 * 2, n1 * 4, n1 * 8, n1 * 16] # 64,128,256,512,1024
self.Maxpool1 = nn.MaxPool3d(kernel_size=2, stride=2)
self.Maxpool2 = nn.MaxPool3d(kernel_size=2, stride=2)
self.Maxpool3 = nn.MaxPool3d(kernel_size=2, stride=2)
self.Maxpool4 = nn.MaxPool3d(kernel_size=2, stride=2)
self.Conv1 = conv_block_v2(in_ch, filters[0])
self.Conv2 = conv_block_v2(filters[0], filters[1])
self.Conv3 = conv_block_v2(filters[1], filters[2])
self.Conv4 = conv_block_v2(filters[2], filters[3])
self.Conv5 = conv_block_v2(filters[3], filters[4])
# 1x1x1 Convolution for Deep Supervision
self.Conv_d3 = conv_block_v2(filters[1], 1)
self.Conv_d4 = conv_block_v2(filters[2], 1)
self.Conv_d5 = conv_block_v2(filters[3], 1)
self.Up5 = up_conv(filters[4], filters[3])
self.Up_conv5 = conv_block(filters[4], filters[3])
self.Up4 = up_conv(filters[3], filters[2])
self.Up_conv4 = conv_block(filters[3], filters[2])
self.Up3 = up_conv(filters[2], filters[1])
self.Up_conv3 = conv_block(filters[2], filters[1])
self.Up2 = up_conv(filters[1], filters[0])
self.Up_conv2 = conv_block(filters[1], filters[0])
self.Conv = nn.Conv3d(filters[0], out_ch, kernel_size=1, stride=1, padding=0)
# self.active = torch.nn.Sigmoid()
def forward(self, x):
# print("unet")
# print(x.shape)
# print(padded.shape)
e1 = self.Conv1(x)
# print("conv1:")
# print(e1.shape)
e2 = self.Maxpool1(e1)
e2 = self.Conv2(e2)
# print("conv2:")
# print(e2.shape)
e3 = self.Maxpool2(e2)
e3 = self.Conv3(e3)
# print("conv3:")
# print(e3.shape)
e4 = self.Maxpool3(e3)
e4 = self.Conv4(e4)
# print("conv4:")
# print(e4.shape)
e5 = self.Maxpool4(e4)
e5 = self.Conv5(e5)
# print("conv5:")
# print(e5.shape)
d5 = self.Up5(e5)
# print("d5:")
# print(d5.shape)
# print("e4:")
# print(e4.shape)
d5 = torch.cat((e4, d5), dim=1)
d5 = self.Up_conv5(d5)
# print("upconv5:")
# print(d5.size)
d5_out = self.Conv_d5(d5)
d4 = self.Up4(d5)
# print("d4:")
# print(d4.shape)
d4 = torch.cat((e3, d4), dim=1)
d4 = self.Up_conv4(d4)
d4_out = self.Conv_d4(d4)
# print("upconv4:")
# print(d4.shape)
d3 = self.Up3(d4)
d3 = torch.cat((e2, d3), dim=1)
d3 = self.Up_conv3(d3)
d3_out = self.Conv_d3(d3)
# print("upconv3:")
# print(d3.shape)
d2 = self.Up2(d3)
d2 = torch.cat((e1, d2), dim=1)
d2 = self.Up_conv2(d2)
# print("upconv2:")
# print(d2.shape)
out = self.Conv(d2)
# print("out:")
# print(out.shape)
# d1 = self.active(out)
return [out, d3_out, d4_out, d5_out]
| 13,151 | 29.09611 | 110 | py |
DDoS | DDoS-master/utils/pLoss/perceptual_loss.py | import math
import torch
import torch.nn as nn
import torchvision
# from utils.utils import *
# from pytorch_msssim import SSIM
from .Resnet2D import ResNet
from .simpleunet import UNet
from .VesselSeg_UNet3d_DeepSup import U_Net_DeepSup
__author__ = "Soumick Chatterjee"
__copyright__ = "Copyright 2022, Faculty of Computer Science, Otto von Guericke University Magdeburg, Germany"
__credits__ = ["Soumick Chatterjee"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Soumick Chatterjee"
__email__ = "soumick.chatterjee@ovgu.de"
__status__ = "Production"
class PerceptualLoss(torch.nn.Module): #currently configured for 1 channel only, with datarange as 1 for SSIM
def __init__(self, device="cuda:0", loss_model="densenet161", n_level=math.inf, resize=None, loss_type="L1", mean=[], std=[]):
super(PerceptualLoss, self).__init__()
blocks = []
if loss_model == "resnet2D": #TODO: not finished
model = ResNet(in_channels=1, out_channels=1).to(device)
chk = torch.load(r"./utils/pLoss/ResNet14_IXIT2_Base_d1p75_t0_n10_dir01_5depth_L1Loss_best.pth.tar", map_location=device)
model.load_state_dict(chk['state_dict'])
elif loss_model == "unet2D":
model = UNet(in_channels=1, out_channels=1, depth=5, wf=6, padding=True,
batch_norm=False, up_mode='upsample', droprate=0.0, is3D=False,
returnBlocks=False, downPath=True, upPath=True).to(device)
chk = torch.load(r"./utils/pLoss/SimpleU_IXIT2_Base_d1p75_t0_n10_dir01_5depth_L1Loss_best.pth.tar", map_location=device)
model.load_state_dict(chk['state_dict'])
blocks.append(model.down_path[0].block.eval())
if n_level >= 2:
blocks.append(
nn.Sequential(
nn.AvgPool2d(2),
model.down_path[1].block.eval()
)
)
if n_level >= 3:
blocks.append(
nn.Sequential(
nn.AvgPool2d(2),
model.down_path[2].block.eval()
)
)
if n_level >= 4:
blocks.append(
nn.Sequential(
nn.AvgPool2d(2),
model.down_path[3].block.eval()
)
)
elif loss_model == "unet3Dds":
model = U_Net_DeepSup().to(device)
chk = torch.load(r"./utils/pLoss/VesselSeg_UNet3d_DeepSup.pth", map_location=device)
model.load_state_dict(chk['state_dict'])
blocks.append(model.Conv1.conv.eval())
if n_level >= 2:
blocks.append(
nn.Sequential(
model.Maxpool1.eval(),
model.Conv2.conv.eval()
)
)
if n_level >= 3:
blocks.append(
nn.Sequential(
model.Maxpool2.eval(),
model.Conv3.conv.eval()
)
)
if n_level >= 4:
blocks.append(
nn.Sequential(
model.Maxpool3.eval(),
model.Conv4.conv.eval()
)
)
if n_level >= 5:
blocks.append(
nn.Sequential(
model.Maxpool4.eval(),
model.Conv5.conv.eval()
)
)
elif loss_model == "resnext1012D":
model = torchvision.models.resnext101_32x8d()
model.conv1 = nn.Conv2d(1, model.conv1.out_channels, kernel_size=model.conv1.kernel_size,
stride=model.conv1.stride, padding=model.conv1.padding, bias=False if model.conv1.bias is None else True)
model.fc = nn.Linear(in_features=model.fc.in_features, out_features=33, bias=False if model.fc.bias is None else True)
model.to(device)
# chk = torch.load(r"./utils/pLoss/ResNet14_IXIT2_Base_d1p75_t0_n10_dir01_5depth_L1Loss_best.pth.tar", map_location=device)
# model.load_state_dict(chk['state_dict'])
blocks.append(
nn.Sequential(
model.conv1.eval(),
model.bn1.eval(),
model.relu.eval(),
)
)
if n_level >= 2:
blocks.append(
nn.Sequential(
model.maxpool.eval(),
model.layer1.eval()
)
)
if n_level >= 3:
blocks.append(model.layer2.eval())
if n_level >= 4:
blocks.append(model.layer3.eval())
if n_level >= 5:
blocks.append(model.layer4.eval())
elif loss_model == "densenet161":
model = torchvision.models.densenet161()
model.features.conv0 = nn.Conv2d(1, model.features.conv0.out_channels, kernel_size=model.features.conv0.kernel_size,
stride=model.features.conv0.stride, padding=model.features.conv0.padding,
bias=False if model.features.conv0.bias is None else True)
model.classifier = nn.Linear(in_features=model.classifier.in_features, out_features=33, bias=False if model.classifier.bias is None else True)
model.to(device)
# chk = torch.load(r"./utils/pLoss/ResNet14_IXIT2_Base_d1p75_t0_n10_dir01_5depth_L1Loss_best.pth.tar", map_location=device)
# model.load_state_dict(chk['state_dict'])
model = model.features
blocks.append(
nn.Sequential(
model.conv0.eval(),
model.norm0.eval(),
model.relu0.eval(),
)
)
if n_level >= 2:
blocks.append(
nn.Sequential(
model.pool0.eval(),
model.denseblock1.eval()
)
)
if n_level >= 3:
blocks.append(model.denseblock2.eval())
if n_level >= 4:
blocks.append(model.denseblock3.eval())
if n_level >= 5:
blocks.append(model.denseblock4.eval())
for bl in blocks:
for params in bl.parameters():
params.requires_grad = False
self.blocks = nn.ModuleList(blocks)
self.transform = nn.functional.interpolate
if (mean is not None and len(mean) > 1) and (std is not None and len(std) > 1) and (len(mean) == len(std)):
self.mean = nn.Parameter(torch.tensor(mean).view(1,len(mean),1,1))
self.std = nn.Parameter(torch.tensor(std).view(1,len(std),1,1))
else:
self.mean = None
self.std = None
self.resize = resize
if loss_type == "L1":
self.loss_func = torch.nn.functional.l1_loss
elif loss_type == "MultiSSIM":
self.loss_func = MultiSSIM(reduction='mean').to(device)
elif loss_type == "SSIM3D":
self.loss_func = SSIM(data_range=1, size_average=True, channel=1, spatial_dims=3).to(device)
elif loss_type == "SSIM2D":
self.loss_func = SSIM(data_range=1, size_average=True, channel=1, spatial_dims=2).to(device)
def forward(self, input, target):
if self.mean is not None:
input = (input-self.mean) / self.std
target = (target-self.mean) / self.std
if self.resize:
input = self.transform(input, mode='trilinear' if len(input.shape) == 5 else 'bilinear', size=self.resize, align_corners=False)
target = self.transform(target, mode='trilinear' if len(input.shape) == 5 else 'bilinear', size=self.resize, align_corners=False)
loss = 0.0
x = input
y = target
for block in self.blocks:
x = block(x)
y = block(y)
loss += self.loss_func(x, y)
return loss
if __name__ == '__main__':
x = PerceptualLoss(resize=None).cuda()
a = torch.rand(2,1,24,24).cuda()
b = torch.rand(2,1,24,24).cuda()
l = x(a,b)
sdsd
| 8,538 | 42.345178 | 154 | py |
DDoS | DDoS-master/utils/pLoss/simpleunet.py | import torch
import torch.nn.functional as F
from torch import nn
__author__ = "Soumick Chatterjee"
__copyright__ = "Copyright 2022, Faculty of Computer Science, Otto von Guericke University Magdeburg, Germany"
__credits__ = ["Soumick Chatterjee", "Chompunuch Sarasaen"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Soumick Chatterjee"
__email__ = "soumick.chatterjee@ovgu.de"
__status__ = "Production"
class UNetConvBlock(nn.Module):
def __init__(self, in_size, out_size, padding, batch_norm):
super(UNetConvBlock, self).__init__()
block = []
block.append(layer_conv(in_size, out_size, kernel_size=3,
padding=int(padding)))
block.append(nn.ReLU())
if batch_norm:
block.append(layer_batchnorm(out_size))
block.append(layer_conv(out_size, out_size, kernel_size=3,
padding=int(padding)))
block.append(nn.ReLU())
if batch_norm:
block.append(layer_batchnorm(out_size))
self.block = nn.Sequential(*block)
def forward(self, x):
out = self.block(x)
return out
class UNetUpBlock(nn.Module):
def __init__(self, in_size, out_size, up_mode, padding, batch_norm):
super(UNetUpBlock, self).__init__()
if up_mode == 'upconv':
self.up = layer_convtrans(in_size, out_size, kernel_size=2,
stride=2)
elif up_mode == 'upsample':
self.up = nn.Sequential(nn.Upsample(mode=interp_mode, scale_factor=2),
layer_conv(in_size, out_size, kernel_size=1))
self.conv_block = UNetConvBlock(in_size, out_size, padding, batch_norm)
def center_crop(self, layer, target_size):
_, _, layer_depth, layer_height, layer_width = layer.size()
diff_z = (layer_depth - target_size[0]) // 2
diff_y = (layer_height - target_size[1]) // 2
diff_x = (layer_width - target_size[2]) // 2
return layer[:, :, diff_z:(diff_z + target_size[0]), diff_y:(diff_y + target_size[1]), diff_x:(diff_x + target_size[2])]
# _, _, layer_height, layer_width = layer.size() #for 2D data
# diff_y = (layer_height - target_size[0]) // 2
# diff_x = (layer_width - target_size[1]) // 2
# return layer[:, :, diff_y:(diff_y + target_size[0]), diff_x:(diff_x + target_size[1])]
def forward(self, x, bridge):
up = self.up(x)
# bridge = self.center_crop(bridge, up.shape[2:]) #sending shape ignoring 2 digit, so target size start with 0,1,2
up = F.interpolate(up, size=bridge.shape[2:], mode=interp_mode)
out = torch.cat([up, bridge], 1)
out = self.conv_block(out)
return out
class UNet(nn.Module):
"""
Implementation of
U-Net: Convolutional Networks for Biomedical Image Segmentation
(Ronneberger et al., 2015)
https://arxiv.org/abs/1505.04597
Using the default arguments will yield the exact version used
in the original paper
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
depth (int): depth of the network
wf (int): number of filters in the first layer is 2**wf
padding (bool): if True, apply padding such that the input shape
is the same as the output.
This may introduce artifacts
batch_norm (bool): Use BatchNorm after layers with an
activation function
up_mode (str): one of 'upconv' or 'upsample'.
'upconv' will use transposed convolutions for
learned upsampling.
'upsample' will use bilinear upsampling.
droprate (float): Rate of dropout. If undesired, then 0.0
is3D (bool): If a 3D or 2D version of U-net
returnBlocks (bool) : If True, it will return the blocks created during downPath. If downPath is False, then it will be ignored
downPath and upPath (bool): If only the downpath or uppath of the U-Net is needed, make the other one False
Forward call:
x (Tensor): Input Tensor
blocks (list of Tensors): If only upPath is set to True, then this will be used during the forward of the uppath. If not desired, then supply blank list
"""
def __init__(self, in_channels=1, out_channels=1, depth=3, wf=6, padding=True,
batch_norm=False, up_mode='upconv', droprate=0.0, is3D=False,
returnBlocks=False, downPath=True, upPath=True):
super(UNet, self).__init__()
layers = {}
if is3D:
layers["layer_conv"] = nn.Conv3d
layers["layer_convtrans"] = nn.ConvTranspose3d
layers["layer_batchnorm"] = nn.BatchNorm3d
layers["layer_drop"] = nn.Dropout3d
layers["func_avgpool"] = F.avg_pool3d
layers["interp_mode"] = 'trilinear'
else:
layers["layer_conv"] = nn.Conv2d
layers["layer_convtrans"] = nn.ConvTranspose2d
layers["layer_batchnorm"] = nn.BatchNorm2d
layers["layer_drop"] = nn.Dropout2d
layers["func_avgpool"] = F.avg_pool2d
layers["interp_mode"] = 'bilinear'
globals().update(layers)
self.returnBlocks = returnBlocks
self.do_down = downPath
self.do_up = upPath
self.padding = padding
self.depth = depth
self.dropout = layer_drop(p=droprate)
prev_channels = in_channels
self.down_path = nn.ModuleList()
for i in range(depth):
if self.do_down:
self.down_path.append(UNetConvBlock(prev_channels, 2**(wf+i),
padding, batch_norm))
prev_channels = 2**(wf+i)
self.latent_channels = prev_channels
self.up_path = nn.ModuleList()
for i in reversed(range(depth - 1)):
if self.do_up:
self.up_path.append(UNetUpBlock(prev_channels, 2**(wf+i), up_mode,
padding, batch_norm))
prev_channels = 2**(wf+i)
if self.do_up:
self.last = layer_conv(prev_channels, out_channels, kernel_size=1)
def forward(self, x, blocks=()):
if self.do_down:
for i, down in enumerate(self.down_path):
x = down(x)
if i != len(self.down_path)-1:
blocks += (x,)
x = func_avgpool(x, 2)
x = self.dropout(x)
if self.do_up:
for i, up in enumerate(self.up_path):
x = up(x, blocks[-i-1])
x = self.last(x)
if self.returnBlocks and self.do_down:
return x, blocks
else:
return x
if __name__ == '__main__':
print('#### Test Case ###')
from torch.autograd import Variable
x = Variable(torch.rand(2,1,64,64)).cuda()
model = UNet().cuda()
y = model(x)
print(y.shape)
| 7,121 | 39.237288 | 160 | py |
HIBPool | HIBPool-main/GIB.py | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
from __future__ import print_function
import numpy as np
import pprint as pp
from copy import deepcopy
import pickle
from numbers import Number
from collections import OrderedDict
import itertools
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn import functional as F
import torch.optim as optim
from torch.optim.lr_scheduler import ReduceLROnPlateau, LambdaLR
from torch.distributions import constraints
from torch.distributions.normal import Normal
from torch.distributions.multivariate_normal import MultivariateNormal
from torch.distributions.distribution import Distribution
from torch.distributions.utils import broadcast_all
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from pytorch_net.modules import get_Layer, load_layer_dict, Simple_2_Symbolic
from pytorch_net.util import forward, Loss_Fun, get_activation, get_criterion, get_criteria_value, get_optimizer, get_full_struct_param, plot_matrices, get_model_DL, PrecisionFloorLoss, get_list_DL, init_weight
from pytorch_net.util import Early_Stopping, Performance_Monitor, record_data, to_np_array, to_Variable, make_dir, formalize_value, RampupLR, Transform_Label, view_item, load_model, save_model, to_cpu_recur, filter_kwargs
# ## Training functionality:
# In[ ]:
def train(
model,
X = None,
y = None,
train_loader = None,
validation_data = None,
validation_loader = None,
criterion = nn.MSELoss(),
inspect_interval = 10,
isplot = False,
is_cuda = None,
**kwargs
):
"""Training function for generic models. "model" can be a single model or a ordered list of models"""
def get_regularization(model, loss_epoch, **kwargs):
"""Compute regularization."""
reg_dict = kwargs["reg_dict"] if "reg_dict" in kwargs else None
reg = to_Variable([0], is_cuda = is_cuda)
if reg_dict is not None:
for reg_type, reg_coeff in reg_dict.items():
# Setting up regularization strength:
if isinstance(reg_coeff, Number):
reg_coeff_ele = reg_coeff
else:
if loss_epoch < len(reg_coeff):
reg_coeff_ele = reg_coeff[loss_epoch]
else:
reg_coeff_ele = reg_coeff[-1]
# Accumulate regularization:
reg = reg + model.get_regularization(source=[reg_type], mode=reg_mode, **kwargs) * reg_coeff_ele
return reg
if is_cuda is None:
if X is None and y is None:
assert train_loader is not None
is_cuda = train_loader.dataset.tensors[0].is_cuda
else:
is_cuda = X.is_cuda
# Optimization kwargs:
epochs = kwargs["epochs"] if "epochs" in kwargs else 10000
lr = kwargs["lr"] if "lr" in kwargs else 5e-3
lr_rampup_steps = kwargs["lr_rampup"] if "lr_rampup" in kwargs else 200
optim_type = kwargs["optim_type"] if "optim_type" in kwargs else "adam"
optim_kwargs = kwargs["optim_kwargs"] if "optim_kwargs" in kwargs else {}
scheduler_type = kwargs["scheduler_type"] if "scheduler_type" in kwargs else "ReduceLROnPlateau"
gradient_noise = kwargs["gradient_noise"] if "gradient_noise" in kwargs else None
data_loader_apply = kwargs["data_loader_apply"] if "data_loader_apply" in kwargs else None
# Inspection kwargs:
inspect_step = kwargs["inspect_step"] if "inspect_step" in kwargs else None # Whether to inspect each step
inspect_items = kwargs["inspect_items"] if "inspect_items" in kwargs else None
inspect_items_train = get_inspect_items_train(inspect_items)
inspect_functions = kwargs["inspect_functions"] if "inspect_functions" in kwargs else None
if inspect_functions is not None:
for inspect_function_key in inspect_functions:
if inspect_function_key not in inspect_items:
inspect_items.append(inspect_function_key)
inspect_items_interval = kwargs["inspect_items_interval"] if "inspect_items_interval" in kwargs else 1000
inspect_image_interval = kwargs["inspect_image_interval"] if "inspect_image_interval" in kwargs else None
inspect_loss_precision = kwargs["inspect_loss_precision"] if "inspect_loss_precision" in kwargs else 4
callback = kwargs["callback"] if "callback" in kwargs else None
# Saving kwargs:
record_keys = kwargs["record_keys"] if "record_keys" in kwargs else ["loss"]
filename = kwargs["filename"] if "filename" in kwargs else None
if filename is not None:
make_dir(filename)
save_interval = kwargs["save_interval"] if "save_interval" in kwargs else None
save_step = kwargs["save_step"] if "save_step" in kwargs else None
logdir = kwargs["logdir"] if "logdir" in kwargs else None
data_record = {key: [] for key in record_keys}
info_to_save = kwargs["info_to_save"] if "info_to_save" in kwargs else None
if info_to_save is not None:
data_record.update(info_to_save)
patience = kwargs["patience"] if "patience" in kwargs else 20
if patience is not None:
early_stopping_epsilon = kwargs["early_stopping_epsilon"] if "early_stopping_epsilon" in kwargs else 0
early_stopping_monitor = kwargs["early_stopping_monitor"] if "early_stopping_monitor" in kwargs else "loss"
early_stopping = Early_Stopping(patience = patience, epsilon = early_stopping_epsilon, mode = "max" if early_stopping_monitor in ["accuracy"] else "min")
if logdir is not None:
from pytorch_net.logger import Logger
batch_idx = 0
logger = Logger(logdir)
logimages = kwargs["logimages"] if "logimages" in kwargs else None
reg_mode = kwargs["reg_mode"] if "reg_mode" in kwargs else "L1"
if validation_loader is not None:
assert validation_data is None
X_valid, y_valid = None, None
elif validation_data is not None:
X_valid, y_valid = validation_data
else:
X_valid, y_valid = X, y
# Setting up dynamic label noise:
label_noise_matrix = kwargs["label_noise_matrix"] if "label_noise_matrix" in kwargs else None
transform_label = Transform_Label(label_noise_matrix = label_noise_matrix, is_cuda=is_cuda)
# Setting up cotrain optimizer:
co_kwargs = kwargs["co_kwargs"] if "co_kwargs" in kwargs else None
if co_kwargs is not None:
co_optimizer = co_kwargs["co_optimizer"]
co_model = co_kwargs["co_model"]
co_criterion = co_kwargs["co_criterion"] if "co_criterion" in co_kwargs else None
co_multi_step = co_kwargs["co_multi_step"] if "co_multi_step" in co_kwargs else 1
# Get original loss:
if len(inspect_items_train) > 0:
loss_value_train = get_loss(model, train_loader, X, y, criterion=criterion, loss_epoch=-1, transform_label=transform_label, **kwargs)
info_dict_train = prepare_inspection(model, train_loader, X, y, transform_label=transform_label, **kwargs)
if "loss" in record_keys:
record_data(data_record, [loss_value_train], ["loss_tr"])
loss_original = get_loss(model, validation_loader, X_valid, y_valid, criterion=criterion, loss_epoch=-1, transform_label=transform_label, **kwargs)
if "loss" in record_keys:
record_data(data_record, [-1, loss_original], ["iter", "loss"])
if "reg" in record_keys and "reg_dict" in kwargs and len(kwargs["reg_dict"]) > 0:
reg_value = get_regularization(model, loss_epoch=0, **kwargs)
record_data(data_record, [reg_value], ["reg"])
if "param" in record_keys:
record_data(data_record, [model.get_weights_bias(W_source="core", b_source="core")], ["param"])
if "param_grad" in record_keys:
record_data(data_record, [model.get_weights_bias(W_source="core", b_source="core", is_grad=True)], ["param_grad"])
if co_kwargs is not None:
co_loss_original = get_loss(co_model, validation_loader, X_valid, y_valid, criterion=criterion, loss_epoch=-1, transform_label=transform_label, **co_kwargs)
if "co_loss" in record_keys:
record_data(data_record, [co_loss_original], ["co_loss"])
if filename is not None and save_interval is not None:
record_data(data_record, [{}], ["model_dict"])
# Setting up optimizer:
parameters = model.parameters()
num_params = len(list(model.parameters()))
if num_params == 0:
print("No parameters to optimize!")
loss_value = get_loss(model, validation_loader, X_valid, y_valid, criterion = criterion, loss_epoch = -1, transform_label=transform_label, **kwargs)
if "loss" in record_keys:
record_data(data_record, [0, loss_value], ["iter", "loss"])
if "param" in record_keys:
record_data(data_record, [model.get_weights_bias(W_source = "core", b_source = "core")], ["param"])
if "param_grad" in record_keys:
record_data(data_record, [model.get_weights_bias(W_source = "core", b_source = "core", is_grad = True)], ["param_grad"])
if co_kwargs is not None:
co_loss_value = get_loss(co_model, validation_loader, X_valid, y_valid, criterion = criterion, loss_epoch = -1, transform_label=transform_label, **co_kwargs)
record_data(data_record, [co_loss_value], ["co_loss"])
return loss_original, loss_value, data_record
optimizer = get_optimizer(optim_type, lr, parameters, **optim_kwargs) if "optimizer" not in kwargs or ("optimizer" in kwargs and kwargs["optimizer"] is None) else kwargs["optimizer"]
# Initialize inspect_items:
if inspect_items is not None:
print("{}:".format(-1), end = "")
print("\tlr: {0:.3e}\t loss:{1:.{2}f}".format(optimizer.param_groups[0]["lr"], loss_original, inspect_loss_precision), end = "")
info_dict = prepare_inspection(model, validation_loader, X_valid, y_valid, transform_label=transform_label, **kwargs)
if len(inspect_items_train) > 0:
print("\tloss_tr: {0:.{1}f}".format(loss_value_train, inspect_loss_precision), end = "")
info_dict_train = update_key_train(info_dict_train, inspect_items_train)
info_dict.update(info_dict_train)
if "reg" in record_keys and "reg_dict" in kwargs and len(kwargs["reg_dict"]) > 0:
print("\treg:{0:.{1}f}".format(to_np_array(reg_value), inspect_loss_precision), end="")
if len(info_dict) > 0:
for item in inspect_items:
if item in info_dict:
print(" \t{0}: {1:.{2}f}".format(item, info_dict[item], inspect_loss_precision), end = "")
if item in record_keys and item not in ["loss", "reg"]:
record_data(data_record, [to_np_array(info_dict[item])], [item])
if co_kwargs is not None:
co_info_dict = prepare_inspection(co_model, validation_loader, X_valid, y_valid, transform_label=transform_label, **co_kwargs)
if "co_loss" in inspect_items:
co_loss_value = get_loss(co_model, validation_loader, X_valid, y_valid, criterion=criterion, loss_epoch=-1, transform_label=transform_label, **co_kwargs)
print("\tco_loss: {}".format(formalize_value(co_loss_value, inspect_loss_precision)), end="")
if len(co_info_dict) > 0:
for item in inspect_items:
if item in co_info_dict:
print(" \t{0}: {1}".format(item, formalize_value(co_info_dict[item], inspect_loss_precision)), end="")
if item in record_keys and item != "loss":
record_data(data_record, [to_np_array(co_info_dict[item])], [item])
print("\n")
# Setting up gradient noise:
if gradient_noise is not None:
from pytorch_net.util import Gradient_Noise_Scale_Gen
scale_gen = Gradient_Noise_Scale_Gen(epochs=epochs,
gamma=gradient_noise["gamma"], # decay rate
eta=gradient_noise["eta"], # starting variance
gradient_noise_interval_epoch=1,
)
gradient_noise_scale = scale_gen.generate_scale(verbose=True)
# Set up learning rate scheduler:
if scheduler_type is not None:
if scheduler_type == "ReduceLROnPlateau":
scheduler_patience = kwargs["scheduler_patience"] if "scheduler_patience" in kwargs else 40
scheduler_factor = kwargs["scheduler_factor"] if "scheduler_factor" in kwargs else 0.1
scheduler_verbose = kwargs["scheduler_verbose"] if "scheduler_verbose" in kwargs else False
scheduler = ReduceLROnPlateau(optimizer, factor=scheduler_factor, patience=scheduler_patience, verbose=scheduler_verbose)
elif scheduler_type == "LambdaLR":
scheduler_lr_lambda = kwargs["scheduler_lr_lambda"] if "scheduler_lr_lambda" in kwargs else (lambda epoch: 0.97 ** (epoch // 2))
scheduler = LambdaLR(optimizer, lr_lambda=scheduler_lr_lambda)
else:
raise
# Ramping or learning rate for the first lr_rampup_steps steps:
if lr_rampup_steps is not None and train_loader is not None:
scheduler_rampup = RampupLR(optimizer, num_steps=lr_rampup_steps)
if hasattr(train_loader, "dataset"):
data_size = len(train_loader.dataset)
else:
data_size = kwargs["data_size"]
# Initialize logdir:
if logdir is not None:
if logimages is not None:
for tag, image_fun in logimages["image_fun"].items():
image = image_fun(model, logimages["X"], logimages["y"])
logger.log_images(tag, image, -1)
# Training:
to_stop = False
for i in range(epochs + 1):
model.train()
# Updating gradient noise:
if gradient_noise is not None:
hook_handle_list = []
if i % scale_gen.gradient_noise_interval_epoch == 0:
for h in hook_handle_list:
h.remove()
hook_handle_list = []
scale_idx = int(i / scale_gen.gradient_noise_interval_epoch)
if scale_idx >= len(gradient_noise_scale):
current_gradient_noise_scale = gradient_noise_scale[-1]
else:
current_gradient_noise_scale = gradient_noise_scale[scale_idx]
for param_group in optimizer.param_groups:
for param in param_group["params"]:
if param.requires_grad:
h = param.register_hook(lambda grad: grad + Variable(torch.normal(mean=torch.zeros(grad.size()),
std=current_gradient_noise_scale * torch.ones(grad.size()))))
hook_handle_list.append(h)
if X is not None and y is not None:
if optim_type != "LBFGS":
optimizer.zero_grad()
reg = get_regularization(model, loss_epoch=i, **kwargs)
loss = model.get_loss(X, transform_label(y), criterion=criterion, loss_epoch=i, **kwargs) + reg
loss.backward()
optimizer.step()
else:
# "LBFGS" is a second-order optimization algorithm that requires a slightly different procedure:
def closure():
optimizer.zero_grad()
reg = get_regularization(model, loss_epoch=i, **kwargs)
loss = model.get_loss(X, transform_label(y), criterion=criterion, loss_epoch=i, **kwargs) + reg
loss.backward()
return loss
optimizer.step(closure)
# Cotrain step:
if co_kwargs is not None:
if "co_warmup_epochs" not in co_kwargs or "co_warmup_epochs" in co_kwargs and i >= co_kwargs["co_warmup_epochs"]:
for _ in range(co_multi_step):
co_optimizer.zero_grad()
co_reg = get_regularization(co_model, loss_epoch=i, **co_kwargs)
co_loss = co_model.get_loss(X, transform_label(y), criterion=co_criterion, loss_epoch=i, **co_kwargs) + co_reg
co_loss.backward()
co_optimizer.step()
else:
if inspect_step is not None:
info_dict_step = {key: [] for key in inspect_items}
if "loader_process" in kwargs and kwargs["loader_process"] is not None:
train_loader = kwargs["loader_process"]("train")
for k, data_batch in enumerate(train_loader):
if isinstance(data_batch, tuple) or isinstance(data_batch, list):
X_batch, y_batch = data_batch
if data_loader_apply is not None:
X_batch, y_batch = data_loader_apply(X_batch, y_batch)
else:
X_batch, y_batch = data_loader_apply(data_batch)
if optim_type != "LBFGS":
optimizer.zero_grad()
reg = get_regularization(model, loss_epoch=i, **kwargs)
loss = model.get_loss(X_batch, transform_label(y_batch), criterion=criterion, loss_epoch=i, loss_step=k, **kwargs) + reg
loss.backward()
if logdir is not None:
batch_idx += 1
if len(model.info_dict) > 0:
for item in inspect_items:
if item in model.info_dict:
logger.log_scalar(item, model.info_dict[item], batch_idx)
optimizer.step()
else:
def closure():
optimizer.zero_grad()
reg = get_regularization(model, loss_epoch=i, **kwargs)
loss = model.get_loss(X_batch, transform_label(y_batch), criterion=criterion, loss_epoch=i, loss_step=k, **kwargs) + reg
loss.backward()
return loss
if logdir is not None:
batch_idx += 1
if len(model.info_dict) > 0:
for item in inspect_items:
if item in model.info_dict:
logger.log_scalar(item, model.info_dict[item], batch_idx)
optimizer.step(closure)
# Rampup scheduler:
if lr_rampup_steps is not None and i * data_size // len(X_batch) + k < lr_rampup_steps:
scheduler_rampup.step()
# Cotrain step:
if co_kwargs is not None:
if "co_warmup_epochs" not in co_kwargs or "co_warmup_epochs" in co_kwargs and i >= co_kwargs["co_warmup_epochs"]:
for _ in range(co_multi_step):
co_optimizer.zero_grad()
co_reg = get_regularization(co_model, loss_epoch=i, **co_kwargs)
co_loss = co_model.get_loss(X_batch, transform_label(y_batch), criterion=co_criterion, loss_epoch=i, loss_step=k, **co_kwargs) + co_reg
co_loss.backward()
if logdir is not None:
if len(co_model.info_dict) > 0:
for item in inspect_items:
if item in co_model.info_dict:
logger.log_scalar(item, co_model.info_dict[item], batch_idx)
co_optimizer.step()
# Inspect at each step:
if inspect_step is not None:
if k % inspect_step == 0:
print("s{}:".format(k), end = "")
info_dict = prepare_inspection(model, validation_loader, X_valid, y_valid, transform_label=transform_label, **kwargs)
if "loss" in inspect_items:
info_dict_step["loss"].append(loss.item())
print("\tloss: {0:.{1}f}".format(loss.item(), inspect_loss_precision), end="")
if len(info_dict) > 0:
for item in inspect_items:
if item in info_dict:
info_dict_step[item].append(info_dict[item])
print(" \t{0}: {1}".format(item, formalize_value(info_dict[item], inspect_loss_precision)), end = "")
if co_kwargs is not None:
if "co_warmup_epochs" not in co_kwargs or "co_warmup_epochs" in co_kwargs and i >= co_kwargs["co_warmup_epochs"]:
co_info_dict = prepare_inspection(co_model, validation_loader, X_valid, y_valid, transform_label=transform_label, **co_kwargs)
if "co_loss" in inspect_items:
print("\tco_loss: {0:.{1}f}".format(co_loss.item(), inspect_loss_precision), end="")
info_dict_step["co_loss"].append(co_loss.item())
if len(co_info_dict) > 0:
for item in inspect_items:
if item in co_info_dict and item != "co_loss":
info_dict_step[item].append(co_info_dict[item])
print(" \t{0}: {1}".format(item, formalize_value(co_info_dict[item], inspect_loss_precision)), end="")
print()
if k % save_step == 0:
if filename is not None:
pickle.dump(model.model_dict, open(filename[:-2] + "_model.p", "wb"))
if logdir is not None:
# Log values and gradients of the parameters (histogram summary)
# for tag, value in model.named_parameters():
# tag = tag.replace('.', '/')
# logger.log_histogram(tag, to_np_array(value), i)
# logger.log_histogram(tag + '/grad', to_np_array(value.grad), i)
if logimages is not None:
for tag, image_fun in logimages["image_fun"].items():
image = image_fun(model, logimages["X"], logimages["y"])
logger.log_images(tag, image, i)
if i % inspect_interval == 0:
model.eval()
if inspect_items is not None and i % inspect_items_interval == 0 and len(inspect_items_train) > 0:
loss_value_train = get_loss(model, train_loader, X, y, criterion = criterion, loss_epoch = i, transform_label=transform_label, **kwargs)
info_dict_train = prepare_inspection(model, train_loader, X, y, transform_label=transform_label, **kwargs)
loss_value = get_loss(model, validation_loader, X_valid, y_valid, criterion = criterion, loss_epoch = i, transform_label=transform_label, **kwargs)
reg_value = get_regularization(model, loss_epoch = i, **kwargs)
if scheduler_type is not None:
if lr_rampup_steps is None or train_loader is None or (lr_rampup_steps is not None and i * data_size // len(X_batch) + k >= lr_rampup_steps):
if scheduler_type == "ReduceLROnPlateau":
scheduler.step(loss_value)
else:
scheduler.step()
if callback is not None:
assert callable(callback)
callback(model = model,
X = X_valid,
y = y_valid,
iteration = i,
loss = loss_value,
)
if patience is not None:
if early_stopping_monitor == "loss":
to_stop = early_stopping.monitor(loss_value)
else:
info_dict = prepare_inspection(model, validation_loader, X_valid, y_valid, transform_label=transform_label, **kwargs)
to_stop = early_stopping.monitor(info_dict[early_stopping_monitor])
if inspect_items is not None:
if i % inspect_items_interval == 0:
# Get loss:
print("{}:".format(i), end = "")
print("\tlr: {0:.3e}\tloss: {1:.{2}f}".format(optimizer.param_groups[0]["lr"], loss_value, inspect_loss_precision), end = "")
info_dict = prepare_inspection(model, validation_loader, X_valid, y_valid, transform_label=transform_label, **kwargs)
if len(inspect_items_train) > 0:
print("\tloss_tr: {0:.{1}f}".format(loss_value_train, inspect_loss_precision), end = "")
info_dict_train = update_key_train(info_dict_train, inspect_items_train)
info_dict.update(info_dict_train)
if "reg" in inspect_items and "reg_dict" in kwargs and len(kwargs["reg_dict"]) > 0:
print("\treg:{0:.{1}f}".format(to_np_array(reg_value), inspect_loss_precision), end="")
# Print and record:
if len(info_dict) > 0:
for item in inspect_items:
if item + "_val" in info_dict:
print(" \t{0}: {1}".format(item, formalize_value(info_dict[item + "_val"], inspect_loss_precision)), end = "")
if item in record_keys and item not in ["loss", "reg"]:
record_data(data_record, [to_np_array(info_dict[item + "_val"])], [item])
# logger:
if logdir is not None:
for item in inspect_items:
if item + "_val" in info_dict:
logger.log_scalar(item + "_val", info_dict[item + "_val"], i)
# Co_model:
if co_kwargs is not None:
co_loss_value = get_loss(co_model, validation_loader, X_valid, y_valid, criterion = criterion, loss_epoch = i, transform_label=transform_label, **co_kwargs)
co_info_dict = prepare_inspection(co_model, validation_loader, X_valid, y_valid, transform_label=transform_label, **co_kwargs)
if "co_loss" in inspect_items:
print("\tco_loss: {0:.{1}f}".format(co_loss_value, inspect_loss_precision), end="")
if len(co_info_dict) > 0:
for item in inspect_items:
if item + "_val" in co_info_dict:
print(" \t{0}: {1}".format(item, formalize_value(co_info_dict[item + "_val"], inspect_loss_precision)), end="")
if item in record_keys and item != "co_loss":
record_data(data_record, [to_np_array(co_info_dict[item + "_val"])], [item])
if "co_loss" in record_keys:
record_data(data_record, [co_loss_value], ["co_loss"])
# Training metrics:
if inspect_step is not None:
for item in info_dict_step:
if len(info_dict_step[item]) > 0:
print(" \t{0}_s: {1}".format(item, formalize_value(np.mean(info_dict_step[item]), inspect_loss_precision)), end = "")
if item in record_keys and item != "loss":
record_data(data_record, [np.mean(info_dict_step[item])], ["{}_s".format(item)])
# Record loss:
if "loss" in record_keys:
record_data(data_record, [i, loss_value], ["iter", "loss"])
if "reg" in record_keys and "reg_dict" in kwargs and len(kwargs["reg_dict"]) > 0:
record_data(data_record, [reg_value], ["reg"])
if "param" in record_keys:
record_data(data_record, [model.get_weights_bias(W_source = "core", b_source = "core")], ["param"])
if "param_grad" in record_keys:
record_data(data_record, [model.get_weights_bias(W_source = "core", b_source = "core", is_grad = True)], ["param_grad"])
print("\n")
try:
sys.stdout.flush()
except:
pass
if isplot:
if inspect_image_interval is not None and hasattr(model, "plot"):
if i % inspect_image_interval == 0:
if gradient_noise is not None:
print("gradient_noise: {0:.9f}".format(current_gradient_noise_scale))
plot_model(model, data_loader = validation_loader, X = X_valid, y = y_valid, transform_label=transform_label, data_loader_apply=data_loader_apply)
if co_kwargs is not None and "inspect_image_interval" in co_kwargs and co_kwargs["inspect_image_interval"] and hasattr(co_model, "plot"):
if i % co_kwargs["inspect_image_interval"] == 0:
plot_model(co_model, data_loader = validation_loader, X = X_valid, y = y_valid, transform_label=transform_label, data_loader_apply=data_loader_apply)
if save_interval is not None:
if i % save_interval == 0:
record_data(data_record, [model.model_dict], ["model_dict"])
if co_kwargs is not None:
record_data(data_record, [co_model.model_dict], ["co_model_dict"])
if filename is not None:
pickle.dump(data_record, open(filename, "wb"))
if to_stop:
break
loss_value = get_loss(model, validation_loader, X_valid, y_valid, criterion=criterion, loss_epoch=epochs, transform_label=transform_label, **kwargs)
if isplot:
import matplotlib.pylab as plt
for key, item in data_record.items():
if isinstance(item, Number) or len(data_record["iter"]) != len(item):
continue
if key not in ["iter", "model_dict"]:
if key in ["accuracy"]:
plt.figure(figsize = (8,6))
plt.plot(data_record["iter"], data_record[key])
plt.xlabel("epoch")
plt.ylabel(key)
plt.title(key)
plt.show()
else:
plt.figure(figsize = (8,6))
plt.semilogy(data_record["iter"], data_record[key])
plt.xlabel("epoch")
plt.ylabel(key)
plt.title(key)
plt.show()
return loss_original, loss_value, data_record
def train_simple(model, X, y, validation_data = None, inspect_interval = 5, **kwargs):
"""minimal version of training. "model" can be a single model or a ordered list of models"""
def get_regularization(model, **kwargs):
reg_dict = kwargs["reg_dict"] if "reg_dict" in kwargs else None
reg = to_Variable([0], is_cuda = X.is_cuda)
for model_ele in model:
if reg_dict is not None:
for reg_type, reg_coeff in reg_dict.items():
reg = reg + model_ele.get_regularization(source = [reg_type], mode = "L1", **kwargs) * reg_coeff
return reg
if not(isinstance(model, list) or isinstance(model, tuple)):
model = [model]
epochs = kwargs["epochs"] if "epochs" in kwargs else 2000
lr = kwargs["lr"] if "lr" in kwargs else 5e-3
optim_type = kwargs["optim_type"] if "optim_type" in kwargs else "adam"
optim_kwargs = kwargs["optim_kwargs"] if "optim_kwargs" in kwargs else {}
loss_type = kwargs["loss_type"] if "loss_type" in kwargs else "mse"
early_stopping_epsilon = kwargs["early_stopping_epsilon"] if "early_stopping_epsilon" in kwargs else 0
patience = kwargs["patience"] if "patience" in kwargs else 40
record_keys = kwargs["record_keys"] if "record_keys" in kwargs else ["loss", "mse", "data_DL", "model_DL"]
scheduler_type = kwargs["scheduler_type"] if "scheduler_type" in kwargs else "ReduceLROnPlateau"
loss_precision_floor = kwargs["loss_precision_floor"] if "loss_precision_floor" in kwargs else PrecisionFloorLoss
autoencoder = kwargs["autoencoder"] if "autoencoder" in kwargs else None
data_record = {key: [] for key in record_keys}
isplot = kwargs["isplot"] if "isplot" in kwargs else False
if patience is not None:
early_stopping = Early_Stopping(patience = patience, epsilon = early_stopping_epsilon)
if validation_data is not None:
X_valid, y_valid = validation_data
else:
X_valid, y_valid = X, y
# Get original loss:
criterion = get_criterion(loss_type, loss_precision_floor = loss_precision_floor)
DL_criterion = Loss_Fun(core = "DLs", loss_precision_floor = loss_precision_floor, DL_sum = True)
DL_criterion_absolute = Loss_Fun(core = "DLs", loss_precision_floor = PrecisionFloorLoss, DL_sum = True)
pred_valid = forward(model, X_valid, **kwargs)
loss_original = to_np_array(criterion(pred_valid, y_valid))
if "loss" in record_keys:
record_data(data_record, [-1, loss_original], ["iter","loss"])
if "mse" in record_keys:
record_data(data_record, [to_np_array(nn.MSELoss()(pred_valid, y_valid))], ["mse"])
if "data_DL" in record_keys:
record_data(data_record, [to_np_array(DL_criterion(pred_valid, y_valid))], ["data_DL"])
if "data_DL_absolute" in record_keys:
record_data(data_record, [to_np_array(DL_criterion_absolute(pred_valid, y_valid))], ["data_DL_absolute"])
if "model_DL" in record_keys:
record_data(data_record, [get_model_DL(model)], ["model_DL"])
if "param" in record_keys:
record_data(data_record, [model[0].get_weights_bias(W_source = "core", b_source = "core")], ["param"])
if "param_grad" in record_keys:
record_data(data_record, [model[0].get_weights_bias(W_source = "core", b_source = "core", is_grad = True)], ["param_grad"])
if "param_collapse_layers" in record_keys:
record_data(data_record, [simplify(deepcopy(model[0]), X, y, "collapse_layers", verbose = 0)[0] .get_weights_bias(W_source = "core", b_source = "core")], ["param"])
# Setting up optimizer:
parameters = itertools.chain(*[model_ele.parameters() for model_ele in model])
num_params = np.sum([[len(list(model_ele.parameters())) for model_ele in model]])
if num_params == 0:
print("No parameters to optimize!")
pred_valid = forward(model, X_valid, **kwargs)
loss_value = to_np_array(criterion(pred_valid, y_valid))
if "loss" in record_keys:
record_data(data_record, [0, loss_value], ["iter", "loss"])
if "mse" in record_keys:
record_data(data_record, [to_np_array(nn.MSELoss()(pred_valid, y_valid))], ["mse"])
if "data_DL" in record_keys:
record_data(data_record, [to_np_array(DL_criterion(pred_valid, y_valid))], ["data_DL"])
if "data_DL_absolute" in record_keys:
record_data(data_record, [to_np_array(DL_criterion_absolute(pred_valid, y_valid))], ["data_DL_absolute"])
if "model_DL" in record_keys:
record_data(data_record, [get_model_DL(model)], ["model_DL"])
if "param" in record_keys:
record_data(data_record, [model[0].get_weights_bias(W_source = "core", b_source = "core")], ["param"])
if "param_grad" in record_keys:
record_data(data_record, [model[0].get_weights_bias(W_source = "core", b_source = "core", is_grad = True)], ["param_grad"])
if "param_collapse_layers" in record_keys:
record_data(data_record, [simplify(deepcopy(model[0]), X, y, "collapse_layers", verbose = 0)[0] .get_weights_bias(W_source = "core", b_source = "core")], ["param"])
return loss_original, loss_value, data_record
optimizer = get_optimizer(optim_type, lr, parameters, **optim_kwargs)
# Set up learning rate scheduler:
if scheduler_type is not None:
if scheduler_type == "ReduceLROnPlateau":
scheduler_patience = kwargs["scheduler_patience"] if "scheduler_patience" in kwargs else 10
scheduler_factor = kwargs["scheduler_factor"] if "scheduler_factor" in kwargs else 0.1
scheduler = ReduceLROnPlateau(optimizer, factor = scheduler_factor, patience = scheduler_patience)
elif scheduler_type == "LambdaLR":
scheduler_lr_lambda = kwargs["scheduler_lr_lambda"] if "scheduler_lr_lambda" in kwargs else (lambda epoch: 1 / (1 + 0.01 * epoch))
scheduler = LambdaLR(optimizer, lr_lambda = scheduler_lr_lambda)
else:
raise
# Training:
to_stop = False
for i in range(epochs + 1):
if optim_type != "LBFGS":
optimizer.zero_grad()
pred = forward(model, X, **kwargs)
reg = get_regularization(model, **kwargs)
loss = criterion(pred, y) + reg
loss.backward()
optimizer.step()
else:
# "LBFGS" is a second-order optimization algorithm that requires a slightly different procedure:
def closure():
optimizer.zero_grad()
pred = forward(model, X, **kwargs)
reg = get_regularization(model, **kwargs)
loss = criterion(pred, y) + reg
loss.backward()
return loss
optimizer.step(closure)
if i % inspect_interval == 0:
pred_valid = forward(model, X_valid, **kwargs)
loss_value = to_np_array(criterion(pred_valid, y_valid))
if scheduler_type is not None:
if scheduler_type == "ReduceLROnPlateau":
scheduler.step(loss_value)
else:
scheduler.step()
if "loss" in record_keys:
record_data(data_record, [i, loss_value], ["iter", "loss"])
if "mse" in record_keys:
record_data(data_record, [to_np_array(nn.MSELoss()(pred_valid, y_valid))], ["mse"])
if "data_DL" in record_keys:
record_data(data_record, [to_np_array(DL_criterion(pred_valid, y_valid))], ["data_DL"])
if "data_DL_absolute" in record_keys:
record_data(data_record, [to_np_array(DL_criterion_absolute(pred_valid, y_valid))], ["data_DL_absolute"])
if "model_DL" in record_keys:
record_data(data_record, [get_model_DL(model)], ["model_DL"])
if "param" in record_keys:
record_data(data_record, [model[0].get_weights_bias(W_source = "core", b_source = "core")], ["param"])
if "param_grad" in record_keys:
record_data(data_record, [model[0].get_weights_bias(W_source = "core", b_source = "core", is_grad = True)], ["param_grad"])
if "param_collapse_layers" in record_keys:
record_data(data_record, [simplify(deepcopy(model[0]), X, y, "collapse_layers", verbose = 0)[0] .get_weights_bias(W_source = "core", b_source = "core")], ["param"])
if patience is not None:
to_stop = early_stopping.monitor(loss_value)
if to_stop:
break
pred_valid = forward(model, X_valid, **kwargs)
loss_value = to_np_array(criterion(pred_valid, y_valid))
if isplot:
import matplotlib.pylab as plt
if "mse" in data_record:
plt.semilogy(data_record["iter"], data_record["mse"])
plt.xlabel("epochs")
plt.title("MSE")
plt.show()
if "loss" in data_record:
plt.plot(data_record["iter"], data_record["loss"])
plt.xlabel("epochs")
plt.title("Loss")
plt.show()
return loss_original, loss_value, data_record
def load_model_dict_net(model_dict, is_cuda = False):
net_type = model_dict["type"]
if net_type.startswith("MLP"):
return MLP(input_size = model_dict["input_size"],
struct_param = model_dict["struct_param"] if "struct_param" in model_dict else None,
W_init_list = model_dict["weights"] if "weights" in model_dict else None,
b_init_list = model_dict["bias"] if "bias" in model_dict else None,
settings = model_dict["settings"] if "settings" in model_dict else {},
is_cuda = is_cuda,
)
elif net_type == "Labelmix_MLP":
model = Labelmix_MLP(input_size=model_dict["input_size"],
struct_param=model_dict["struct_param"],
idx_label=model_dict["idx_label"] if "idx_label" in model_dict else None,
is_cuda=is_cuda,
)
if "state_dict" in model_dict:
model.load_state_dict(model_dict["state_dict"])
return model
elif net_type == "Multi_MLP":
return Multi_MLP(input_size = model_dict["input_size"],
struct_param = model_dict["struct_param"],
W_init_list = model_dict["weights"] if "weights" in model_dict else None,
b_init_list = model_dict["bias"] if "bias" in model_dict else None,
settings = model_dict["settings"] if "settings" in model_dict else {},
is_cuda = is_cuda,
)
elif net_type == "Branching_Net":
return Branching_Net(net_base_model_dict = model_dict["net_base_model_dict"],
net_1_model_dict = model_dict["net_1_model_dict"],
net_2_model_dict = model_dict["net_2_model_dict"],
is_cuda = is_cuda,
)
elif net_type == "Fan_in_MLP":
return Fan_in_MLP(model_dict_branch1=model_dict["model_dict_branch1"],
model_dict_branch2=model_dict["model_dict_branch2"],
model_dict_joint=model_dict["model_dict_joint"],
is_cuda=is_cuda,
)
elif net_type == "Net_reparam":
return Net_reparam(model_dict=model_dict["model"],
reparam_mode=model_dict["reparam_mode"],
is_cuda=is_cuda,
)
elif net_type == "Wide_ResNet":
model = Wide_ResNet(depth=model_dict["depth"],
widen_factor=model_dict["widen_factor"],
input_channels=model_dict["input_channels"],
output_size=model_dict["output_size"],
dropout_rate=model_dict["dropout_rate"],
is_cuda=is_cuda,
)
if "state_dict" in model_dict:
model.load_state_dict(model_dict["state_dict"])
return model
elif net_type.startswith("ConvNet"):
return ConvNet(input_channels = model_dict["input_channels"],
struct_param = model_dict["struct_param"],
W_init_list = model_dict["weights"] if "weights" in model_dict else None,
b_init_list = model_dict["bias"] if "bias" in model_dict else None,
settings = model_dict["settings"] if "settings" in model_dict else {},
return_indices = model_dict["return_indices"] if "return_indices" in model_dict else False,
is_cuda = is_cuda,
)
elif net_type == "Conv_Autoencoder":
model = Conv_Autoencoder(input_channels_encoder = model_dict["input_channels_encoder"],
input_channels_decoder = model_dict["input_channels_decoder"],
struct_param_encoder = model_dict["struct_param_encoder"],
struct_param_decoder = model_dict["struct_param_decoder"],
settings = model_dict["settings"],
is_cuda = is_cuda,
)
if "encoder" in model_dict:
model.encoder.load_model_dict(model_dict["encoder"])
if "decoder" in model_dict:
model.decoder.load_model_dict(model_dict["decoder"])
return model
elif model_dict["type"] == "Conv_Model":
is_generative = model_dict["is_generative"] if "is_generative" in model_dict else False
return Conv_Model(encoder_model_dict = model_dict["encoder_model_dict"] if not is_generative else None,
core_model_dict = model_dict["core_model_dict"],
decoder_model_dict = model_dict["decoder_model_dict"],
latent_size = model_dict["latent_size"],
is_generative = model_dict["is_generative"] if is_generative else False,
is_res_block = model_dict["is_res_block"] if "is_res_block" in model_dict else False,
is_cuda = is_cuda,
)
else:
raise Exception("net_type {} not recognized!".format(net_type))
def load_model_dict(model_dict, is_cuda = False):
net_type = model_dict["type"]
if net_type not in ["Model_Ensemble", "LSTM", "Model_with_Uncertainty", "Mixture_Model", "Mixture_Gaussian"]:
return load_model_dict_net(model_dict, is_cuda = is_cuda)
elif net_type == "Model_Ensemble":
if model_dict["model_type"] == "MLP":
model_ensemble = Model_Ensemble(
num_models = model_dict["num_models"],
input_size = model_dict["input_size"],
model_type = model_dict["model_type"],
output_size = model_dict["output_size"],
is_cuda = is_cuda,
# Here we just create some placeholder network. The model will be overwritten in the next steps:
struct_param = [[1, "Simple_Layer", {}]],
)
elif model_dict["model_type"] == "LSTM":
model_ensemble = Model_Ensemble(
num_models = model_dict["num_models"],
input_size = model_dict["input_size"],
model_type = model_dict["model_type"],
output_size = model_dict["output_size"],
is_cuda = is_cuda,
# Here we just create some placeholder network. The model will be overwritten in the next steps:
hidden_size = 3,
output_struct_param = [[1, "Simple_Layer", {}]],
)
else:
raise
for k in range(model_ensemble.num_models):
setattr(model_ensemble, "model_{}".format(k), load_model_dict(model_dict["model_{}".format(k)], is_cuda = is_cuda))
return model_ensemble
elif net_type == "Model_with_Uncertainty":
return Model_with_Uncertainty(model_pred = load_model_dict(model_dict["model_pred"], is_cuda = is_cuda),
model_logstd = load_model_dict(model_dict["model_logstd"], is_cuda = is_cuda))
elif net_type == "Mixture_Model":
return Mixture_Model(model_dict_list=model_dict["model_dict_list"],
weight_logits_model_dict=model_dict["weight_logits_model_dict"],
num_components=model_dict["num_components"],
is_cuda=is_cuda,
)
elif net_type == "Mixture_Gaussian":
return load_model_dict_Mixture_Gaussian(model_dict, is_cuda = is_cuda)
else:
raise Exception("net_type {} not recognized!".format(net_type))
## Helper functions:
def get_accuracy(pred, target):
"""Get accuracy from prediction and target"""
assert len(pred.shape) == len(target.shape) == 1
assert len(pred) == len(target)
pred, target = to_np_array(pred, target)
accuracy = ((pred == target).sum().astype(float) / len(pred))
return accuracy
def flatten(*tensors):
"""Flatten the tensor except the first dimension"""
new_tensors = []
for tensor in tensors:
new_tensors.append(tensor.view(tensor.size(0), -1))
if len(new_tensors) == 1:
new_tensors = new_tensors[0]
return new_tensors
def fill_triangular(vec, dim, mode = "lower"):
"""Fill an lower or upper triangular matrices with given vectors"""
num_examples, size = vec.shape
assert size == dim * (dim + 1) // 2
matrix = torch.zeros(num_examples, dim, dim).to(vec.device)
idx = (torch.tril(torch.ones(dim, dim)) == 1).unsqueeze(0)
idx = idx.repeat(num_examples,1,1)
if mode == "lower":
matrix[idx] = vec.contiguous().view(-1)
elif mode == "upper":
matrix[idx] = vec.contiguous().view(-1)
else:
raise Exception("mode {} not recognized!".format(mode))
return matrix
def matrix_diag_transform(matrix, fun):
"""Return the matrices whose diagonal elements have been executed by the function 'fun'."""
num_examples = len(matrix)
idx = torch.eye(matrix.size(-1)).bool().unsqueeze(0)
idx = idx.repeat(num_examples, 1, 1)
new_matrix = matrix.clone()
new_matrix[idx] = fun(matrix.diagonal(dim1 = 1, dim2 = 2).contiguous().view(-1))
return new_matrix
def Zip(*data, **kwargs):
"""Recursive unzipping of data structure
Example: Zip(*[(('a',2), 1), (('b',3), 2), (('c',3), 3), (('d',2), 4)])
==> [[['a', 'b', 'c', 'd'], [2, 3, 3, 2]], [1, 2, 3, 4]]
Each subtree in the original data must be in the form of a tuple.
In the **kwargs, you can set the function that is applied to each fully unzipped subtree.
"""
import collections
function = kwargs["function"] if "function" in kwargs else None
if len(data) == 1:
return data[0]
data = [list(element) for element in zip(*data)]
for i, element in enumerate(data):
if isinstance(element[0], tuple):
data[i] = Zip(*element, **kwargs)
elif isinstance(element, list):
if function is not None:
data[i] = function(element)
return data
def get_loss(model, data_loader=None, X=None, y=None, criterion=None, transform_label=None, **kwargs):
"""Get loss using the whole data or data_loader. Return the average validation loss with np.ndarray format"""
max_validation_iter = kwargs["max_validation_iter"] if "max_validation_iter" in kwargs else None
if transform_label is None:
transform_label = Transform_Label()
if "loader_process" in kwargs and kwargs["loader_process"] is not None:
data_loader = kwargs["loader_process"]("test")
if data_loader is not None:
assert X is None and y is None
loss_record = 0
count = 0
# Taking the average of all metrics:
for j, data_batch in enumerate(data_loader):
if isinstance(data_batch, tuple) or isinstance(data_batch, list):
X_batch, y_batch = data_batch
if "data_loader_apply" in kwargs and kwargs["data_loader_apply"] is not None:
X_batch, y_batch = kwargs["data_loader_apply"](X_batch, y_batch)
else:
X_batch, y_batch = kwargs["data_loader_apply"](data_batch)
loss_ele = to_np_array(model.get_loss(X_batch, transform_label(y_batch), criterion = criterion, **kwargs))
if j == 0:
all_info_dict = {key: 0 for key in model.info_dict.keys()}
loss_record = loss_record + loss_ele
count += 1
for key in model.info_dict:
all_info_dict[key] = all_info_dict[key] + model.info_dict[key]
if max_validation_iter is not None and count > max_validation_iter:
break
for key in model.info_dict:
all_info_dict[key] = all_info_dict[key] / count
loss = loss_record / count
model.info_dict = deepcopy(all_info_dict)
else:
assert X is not None and y is not None
loss = to_np_array(model.get_loss(X, transform_label(y), criterion = criterion, **kwargs))
return loss
def plot_model(model, data_loader=None, X=None, y=None, transform_label=None, **kwargs):
data_loader_apply = kwargs["data_loader_apply"] if "data_loader_apply" in kwargs else None
max_validation_iter = kwargs["max_validation_iter"] if "max_validation_iter" in kwargs else None
if transform_label is None:
transform_label = Transform_Label()
if "loader_process" in kwargs and kwargs["loader_process"] is not None:
data_loader = kwargs["loader_process"]("test")
if data_loader is not None:
assert X is None and y is None
X_all = []
y_all = []
for i, data_batch in enumerate(data_loader):
if isinstance(data_batch, tuple) or isinstance(data_batch, list):
X_batch, y_batch = data_batch
if data_loader_apply is not None:
X_batch, y_batch = data_loader_apply(X_batch, y_batch)
else:
X_batch, y_batch = data_loader_apply(data_batch)
X_all.append(X_batch)
y_all.append(y_batch)
if max_validation_iter is not None and i >= max_validation_iter:
break
if not isinstance(X_all[0], torch.Tensor):
X_all = Zip(*X_all, function = torch.cat)
else:
X_all = torch.cat(X_all, 0)
y_all = torch.cat(y_all)
model.plot(X_all, transform_label(y_all))
else:
assert X is not None and y is not None
model.plot(X, transform_label(y))
def prepare_inspection(model, data_loader=None, X=None, y=None, transform_label=None, **kwargs):
inspect_functions = kwargs["inspect_functions"] if "inspect_functions" in kwargs else None
max_validation_iter = kwargs["max_validation_iter"] if "max_validation_iter" in kwargs else None
verbose = kwargs["verbose"] if "verbose" in kwargs else False
if transform_label is None:
transform_label = Transform_Label()
if "loader_process" in kwargs and kwargs["loader_process"] is not None:
data_loader = kwargs["loader_process"]("test")
if data_loader is None:
assert X is not None and y is not None
all_dict_summary = model.prepare_inspection(X, transform_label(y), **kwargs)
if inspect_functions is not None:
for inspect_function_key, inspect_function in inspect_functions.items():
all_dict_summary[inspect_function_key] = inspect_function(model, X, y, **kwargs)
else:
assert X is None and y is None
all_dict = {}
for j, data_batch in enumerate(data_loader):
if verbose is True:
print("valid step: {}".format(j))
if isinstance(data_batch, tuple) or isinstance(data_batch, list):
X_batch, y_batch = data_batch
if "data_loader_apply" in kwargs and kwargs["data_loader_apply"] is not None:
X_batch, y_batch = kwargs["data_loader_apply"](X_batch, y_batch)
else:
X_batch, y_batch = kwargs["data_loader_apply"](data_batch)
info_dict = model.prepare_inspection(X_batch, transform_label(y_batch), valid_step=j, **kwargs)
for key, item in info_dict.items():
if key not in all_dict:
all_dict[key] = [item]
else:
all_dict[key].append(item)
if inspect_functions is not None:
for inspect_function_key, inspect_function in inspect_functions.items():
inspect_function_result = inspect_function(model, X_batch, transform_label(y_batch), **kwargs)
if inspect_function_key not in all_dict:
all_dict[inspect_function_key] = [inspect_function_result]
else:
all_dict[inspect_function_key].append(inspect_function_result)
if max_validation_iter is not None and j >= max_validation_iter:
break
all_dict_summary = {}
for key, item in all_dict.items():
all_dict_summary[key + "_val"] = np.mean(all_dict[key])
return all_dict_summary
def get_inspect_items_train(inspect_items):
if inspect_items is None:
return []
inspect_items_train = []
for item in inspect_items:
if item.endswith("_tr"):
inspect_items_train.append("_".join(item.split("_")[:-1]))
return inspect_items_train
def update_key_train(info_dict_train, inspect_items_train):
info_dict_train_new = {}
for key, item in info_dict_train.items():
if key in inspect_items_train:
info_dict_train_new[key + "_tr"] = item
return deepcopy(info_dict_train_new)
# ## Simplifying functionality:
# In[ ]:
def simplify(model, X=None, y=None, mode="full", isplot=False, target_name=None, validation_data=None, **kwargs):
"""Simplify a neural network model in various ways. "model" can be a single model or a ordered list of models"""
verbose = kwargs["verbose"] if "verbose" in kwargs else 1
if validation_data is None:
X_valid, y_valid = X, y
else:
X_valid, y_valid = validation_data
simplify_criteria = kwargs["simplify_criteria"] if "simplify_criteria" in kwargs else ("DLs", 0.05, 3, "relative") # the first argument choose from "DL", "loss"
simplify_epsilon = simplify_criteria[1]
simplify_patience = simplify_criteria[2]
simplify_compare_mode = simplify_criteria[3]
performance_monitor = Performance_Monitor(patience = simplify_patience, epsilon = simplify_epsilon, compare_mode = simplify_compare_mode)
record_keys = kwargs["record_keys"] if "record_keys" in kwargs else ["mse"]
loss_precision_floor = kwargs["loss_precision_floor"] if "loss_precision_floor" in kwargs else PrecisionFloorLoss
if X is not None:
if y is None:
y = Variable(forward(model, X, **kwargs).data, requires_grad = False)
if not (isinstance(model, list) or isinstance(model, tuple)):
model = [model]
is_list = False
else:
is_list = True
if mode == "full":
mode = ["collapse_layers", "snap"]
if not isinstance(mode, list):
mode = [mode]
# Obtain the original loss and setup criterion:
loss_type = kwargs["loss_type"] if "loss_type" in kwargs else "mse"
criterion = get_criterion(loss_type, loss_precision_floor = loss_precision_floor)
DL_criterion = Loss_Fun(core = "DLs", loss_precision_floor = loss_precision_floor, DL_sum = True)
loss_dict = OrderedDict()
for mode_ele in mode:
if verbose >= 1:
print("\n" + "=" * 48 + "\nSimplifying mode: {}".format(mode_ele), end = "")
if mode_ele == "snap":
snap_mode = kwargs["snap_mode"] if "snap_mode" in kwargs else "integer"
print(" {}".format(snap_mode), end = "")
if target_name is not None:
print(" for {}".format(target_name))
else:
print()
print("=" * 48)
# Record the loss before simplification:
if X is not None:
pred_valid = forward(model, X_valid, **kwargs)
loss_original = to_np_array(criterion(pred_valid, y_valid))
loss_list = [loss_original]
if verbose >= 1:
print("original_loss: {}".format(loss_original))
mse_record_whole = [to_np_array(nn.MSELoss()(pred_valid, y_valid))]
data_DL_whole = [to_np_array(DL_criterion(pred_valid, y_valid))]
model_DL_whole = [get_model_DL(model)]
event_list = ["before simplification"]
iter_end_whole = [1]
is_accept_whole = []
if "param" in record_keys:
param_record_whole = [model[0].get_weights_bias(W_source = "core", b_source = "core")]
if "param_grad" in record_keys:
param_grad_record_whole = [model[0].get_weights_bias(W_source = "core", b_source = "core", is_grad = True)]
# Begin simplification:
if mode_ele == "collapse_layers":
all_collapse_dict = {}
for model_id, model_ele in enumerate(model):
# Obtain activations for each layer:
activation_list = []
for k in range(len(model_ele.struct_param)):
if "activation" in model_ele.struct_param[k][2]:
activation_list.append(model_ele.struct_param[k][2]["activation"])
elif "activation" in model_ele.settings:
activation_list.append(model_ele.settings["activation"])
else:
activation_list.append("default")
# Build the collapse_list that stipulates which layers to collapse:
collapse_dict = {}
current_start = None
current_layer_type = None
for k, activation in enumerate(activation_list):
if activation == "linear" and k != len(activation_list) - 1:
if k not in collapse_dict and current_start is None:
# Create a new bunch:
if model_ele.struct_param[k + 1][1] == model_ele.struct_param[k][1]: # The current layer must have the same layer_type as the next layer
current_start = k
collapse_dict[current_start] = [k]
current_layer_type = model_ele.struct_param[k][1]
else:
# Adding to current bunch:
if model_ele.struct_param[k + 1][1] == model_ele.struct_param[k][1] == current_layer_type:
collapse_dict[current_start].append(k)
else:
collapse_dict[current_start].append(k)
current_start = None
else:
if current_start is not None:
collapse_dict[current_start].append(k)
current_start = None
# Build new layer:
new_layer_info = {}
for current_start, layer_ids in collapse_dict.items():
for i, layer_id in enumerate(layer_ids):
layer = getattr(model_ele, "layer_{}".format(layer_id))
if i == 0:
W_accum = layer.W_core
b_accum = layer.b_core
else:
W_accum = torch.matmul(W_accum, layer.W_core)
b_accum = torch.matmul(b_accum, layer.W_core) + layer.b_core
if model_ele.is_cuda:
W_accum = W_accum.cpu()
b_accum = b_accum.cpu()
last_layer_id = collapse_dict[current_start][-1]
new_layer_info[current_start] = {"W_init": W_accum.data.numpy(), "b_init": b_accum.data.numpy(),
"layer_struct_param": [b_accum.size(0), model_ele.struct_param[last_layer_id][1], deepcopy(model_ele.struct_param[last_layer_id][2])],
}
new_layer_info[current_start].pop("snap_dict", None)
if verbose >= 1:
print("model_id {}, layers collapsed: {}".format(model_id, collapse_dict))
# Rebuild the Net:
if len(collapse_dict) > 0:
all_collapse_dict[model_id] = {"collapse_dict": collapse_dict,
"new_layer_info": new_layer_info,
"collapse_layer_ids": [idx for item in collapse_dict.values() for idx in item],
}
# Rebuild the list of models:
if len(all_collapse_dict) > 0:
model_new = []
for model_id, model_ele in enumerate(model):
if model_id in all_collapse_dict:
W_list, b_list = model_ele.get_weights_bias(W_source = "core", b_source = "core")
W_init_list = []
b_init_list = []
struct_param = []
for k in range(len(model_ele.struct_param)):
if k not in all_collapse_dict[model_id]["collapse_layer_ids"]:
struct_param.append(model_ele.struct_param[k])
W_init_list.append(W_list[k])
b_init_list.append(b_list[k])
else:
if k in all_collapse_dict[model_id]["collapse_dict"].keys():
struct_param.append(all_collapse_dict[model_id]["new_layer_info"][k]["layer_struct_param"])
W_init_list.append(all_collapse_dict[model_id]["new_layer_info"][k]["W_init"])
b_init_list.append(all_collapse_dict[model_id]["new_layer_info"][k]["b_init"])
model_ele_new = MLP(input_size = model_ele.input_size,
struct_param = struct_param,
W_init_list = W_init_list,
b_init_list = b_init_list,
settings = model_ele.settings,
is_cuda = model_ele.is_cuda,
)
else:
model_ele_new = model_ele
model_new.append(model_ele_new)
model = model_new
# Calculate the loss again:
pred_valid = forward(model, X_valid, **kwargs)
loss_new = to_np_array(criterion(pred_valid, y_valid))
if verbose >= 1:
print("after collapsing linear layers in all models, new loss {}".format(loss_new))
loss_list.append(loss_new)
mse_record_whole.append(to_np_array(nn.MSELoss()(pred_valid, y_valid)))
data_DL_whole.append(to_np_array(DL_criterion(pred_valid, y_valid)))
model_DL_whole.append(get_model_DL(model))
if "param" in record_keys:
param_record_whole.append(model[0].get_weights_bias(W_source = "core", b_source = "core"))
if "param_grad" in record_keys:
param_grad_record_whole.append(model[0].get_weights_bias(W_source = "core", b_source = "core", is_grad = True))
iter_end_whole.append(1)
event_list.append({mode_ele: all_collapse_dict})
elif mode_ele in ["local", "snap"]:
# 'local': greedily try reducing the input dimension by removing input dimension from the beginning;
# 'snap': greedily snap each float parameter into an integer or rational number. Set argument 'snap_mode' == 'integer' or 'rational'.
if mode_ele == "snap":
target_params = [[(model_id, layer_id), "snap"] for model_id, model_ele in enumerate(model) for layer_id in range(len(model_ele.struct_param))]
elif mode_ele == "local":
for model_id, model_ele in enumerate(model):
if len(model_ele.struct_param) > 0:
first_model_id = model_id
break
first_layer = getattr(model[first_model_id], "layer_0")
target_params = [[(first_model_id, 0), [[(("weight", (i, j)), 0.) for j in range(first_layer.output_size)] for i in range(first_layer.input_size)]]]
else:
raise
excluded_idx_dict = {item[0]: [] for item in target_params}
target_layer_ids_exclude = []
for (model_id, layer_id), target_list in target_params:
layer = getattr(model[model_id], "layer_{}".format(layer_id))
if isinstance(target_list, list):
max_passes = len(target_list)
elif target_list == "snap":
max_passes = (layer.input_size + 1) * layer.output_size
if "max_passes" in kwargs:
max_passes = min(max_passes, kwargs["max_passes"])
else:
raise Exception("target_list {} not recognizable!".format(target_list))
if verbose >= 2:
print("\n****starting model:****")
model[model_id].get_weights_bias(W_source = "core", b_source = "core", verbose = True)
print("********\n" )
performance_monitor.reset()
criteria_value, criteria_result = get_criteria_value(model, X, y, criteria_type = simplify_criteria[0], criterion = criterion, **kwargs)
to_stop, pivot_dict, log, _, pivot_id = performance_monitor.monitor(criteria_value, model_dict = model[model_id].model_dict, criteria_result = criteria_result)
for i in range(max_passes):
# Perform tentative simplification
if isinstance(target_list, list):
info = layer.simplify(mode = "snap", excluded_idx = excluded_idx_dict[(model_id, layer_id)], snap_targets = target_list[i], **kwargs)
else:
info = layer.simplify(mode = "snap", excluded_idx = excluded_idx_dict[(model_id, layer_id)], **kwargs)
if len(info) == 0:
target_layer_ids_exclude.append((model_id, layer_id))
print("Pass {0}, (model {1}, layer {2}) has no parameters to snap. Revert to pivot model. Go to next layer".format(i, model_id, layer_id))
break
excluded_idx_dict[(model_id, layer_id)] = excluded_idx_dict[(model_id, layer_id)] + info
_, loss_new, data_record = train_simple(model, X, y, optim_type = "adam", validation_data = validation_data, **kwargs)
if verbose >= 2:
print("=" * 8)
model[model_id].get_weights_bias(W_source = "core", b_source = "core", verbose = True)
criteria_value, criteria_result = get_criteria_value(model, X, y, criteria_type = simplify_criteria[0], criterion = criterion, **kwargs)
to_stop, pivot_dict, log, is_accept, pivot_id = performance_monitor.monitor(criteria_value, model_dict = model[model_id].model_dict, criteria_result = criteria_result)
is_accept_whole.append(is_accept)
if is_accept:
print('[Accepted] as pivot model!')
print()
# Check if the criterion after simplification and refit is worse. If it is worse than the simplify_epsilon, revert:
if to_stop:
target_layer_ids_exclude.append((model_id, layer_id))
if verbose >= 1:
print("Pass {0}, loss: {1}\tDL: {2}. New snap {3} is do not improve by {4} = {5} for {6} steps. Revert the simplification to pivot model. Go to next layer.".format(
i, view_item(log, ("criteria_result", "loss")), view_item(log, ("criteria_result", "DL")), info, simplify_criteria[0], simplify_epsilon, simplify_patience))
break
mse_record_whole += data_record["mse"]
data_DL_whole += data_record["data_DL"]
model_DL_whole += data_record["model_DL"]
if "param" in record_keys:
param_record_whole += data_record["param"]
if "param_grad" in record_keys:
param_grad_record_whole += data_record["param_grad"]
iter_end_whole.append(len(data_record["mse"]))
model[model_id].reset_layer(layer_id, layer)
loss_list.append(loss_new)
event_list.append({mode_ele: ((model_id, layer_id), info)})
if verbose >= 1:
print("Pass {0}, snap (model {1}, layer {2}), snap {3}. \tloss: {4}\tDL: {5}".format(
i, model_id, layer_id, info, view_item(log, ("criteria_result", "loss")), view_item(log, ("criteria_result", "DL"))))
# Update the whole model's struct_param and snap_dict:
model[model_id].load_model_dict(pivot_dict["model_dict"])
model[model_id].synchronize_settings()
if verbose >= 2:
print("\n****pivot model at {}th transformation:****".format(pivot_id))
model[model_id].get_weights_bias(W_source = "core", b_source = "core", verbose = True)
print("********\n" )
elif mode_ele == "pair_snap":
model_new = []
for model_id, model_ele in enumerate(model):
for layer_id, layer_struct_param in enumerate(model_ele.struct_param):
if layer_struct_param[1] == "Symbolic_Layer":
layer = getattr(model_ele, "layer_{}".format(layer_id))
max_passes = len(layer.get_param_dict()) - 1
if "max_passes" in kwargs:
max_passes = min(max_passes, kwargs["max_passes"])
if verbose > 1:
print("original:")
print("symbolic_expression: ", layer.symbolic_expression)
print("numerical_expression: ", layer.numerical_expression)
print()
performance_monitor.reset()
criteria_value, criteria_result = get_criteria_value(model, X, y, criteria_type = simplify_criteria[0], criterion = criterion, **kwargs)
to_stop, pivot_dict, log, _, pivot_id = performance_monitor.monitor(criteria_value, model_dict = model[model_id].model_dict, criteria_result = criteria_result)
for i in range(max_passes):
info = layer.simplify(mode = "pair_snap", **kwargs)
if len(info) == 0:
target_layer_ids_exclude.append((model_id, layer_id))
print("Pass {0}, (model {1}, layer {2}) has no parameters to pair_snap. Revert to pivot model. Go to next layer".format(i, model_id, layer_id))
break
_, loss, data_record = train_simple(model, X, y, optim_type = "adam", epochs = 1000, validation_data = validation_data, **kwargs)
criteria_value, criteria_result = get_criteria_value(model, X, y, criteria_type = simplify_criteria[0], criterion = criterion, **kwargs)
to_stop, pivot_dict, log, is_accept, pivot_id = performance_monitor.monitor(criteria_value, model_dict = model[model_id].model_dict, criteria_result = criteria_result)
is_accept_whole.append(is_accept)
if to_stop:
if verbose >= 1:
print("\nPass {0}, loss: {1}\tDL: {2}. New snap {3} is do not improve by {4} = {5} for {6} steps. Revert the simplification to pivot model. Go to next layer.".format(
i, view_item(log, ("criteria_result", "loss")), view_item(log, ("criteria_result", "DL")), info, simplify_criteria[0], simplify_epsilon, simplify_patience))
break
mse_record_whole += data_record["mse"]
data_DL_whole += data_record["data_DL"]
model_DL_whole += data_record["model_DL"]
if "param" in record_keys:
param_record_whole += data_record["param"]
if "param_grad" in record_keys:
param_grad_record_whole += data_record["param_grad"]
iter_end_whole.append(len(data_record["mse"]))
model[model_id].reset_layer(layer_id, layer)
loss_list.append(loss)
event_list.append({mode_ele: ((model_id, layer_id), info)})
if verbose >= 1:
print("\nPass {0}, snap (model {1}, layer {2}), snap {3}. \tloss: {4}\tDL: {5}".format(
i, model_id, layer_id, info, view_item(log, ("criteria_result", "loss")), view_item(log, ("criteria_result", "DL"))))
print("symbolic_expression: ", layer.symbolic_expression)
print("numerical_expression: ", layer.numerical_expression)
print()
model[model_id].load_model_dict(pivot_dict["model_dict"])
print("final: \nsymbolic_expression: ", getattr(model[model_id], "layer_{0}".format(layer_id)).symbolic_expression)
print("numerical_expression: ", getattr(model[model_id], "layer_{0}".format(layer_id)).numerical_expression)
print()
elif mode_ele[:11] == "to_symbolic":
from sympy import Symbol
force_simplification = kwargs["force_simplification"] if "force_simplification" in kwargs else False
is_multi_model = True if len(model) > 1 else False
for model_id, model_ele in enumerate(model):
for layer_id, layer_struct_param in enumerate(model_ele.struct_param):
prefix = "L{}_".format(layer_id)
if layer_struct_param[1] == "Simple_Layer":
# Obtain loss before simplification:
layer = getattr(model_ele, "layer_{}".format(layer_id))
if X is not None:
criteria_prev, criteria_result_prev = get_criteria_value(model, X, y, criteria_type = simplify_criteria[0], criterion = criterion, **kwargs)
if mode_ele.split("_")[-1] == "separable":
new_layer = Simple_2_Symbolic(layer, settings = model_ele.settings, mode = "separable", prefix = prefix)
else:
new_layer = Simple_2_Symbolic(layer, settings = model_ele.settings, prefix = prefix)
model[model_id].reset_layer(layer_id, new_layer)
if "snap_dict" in model_ele.settings and layer_id in model_ele.settings["snap_dict"]:
subs_targets = []
for (pos, true_idx), item in model_ele.settings["snap_dict"][layer_id].items():
if pos == "weight":
subs_targets.append((Symbol("W{0}{1}".format(true_idx[0], true_idx[1])), item["new_value"]))
elif pos == "bias":
subs_targets.append((Symbol("b{}".format(true_idx)), item["new_value"]))
else:
raise Exception("pos {} not recognized!".format(pos))
new_expression = [expression.subs(subs_targets) for expression in new_layer.symbolic_expression]
new_layer.set_symbolic_expression(new_expression)
model_ele.settings["snap_dict"].pop(layer_id)
model_ele.struct_param[layer_id][2].update(new_layer.struct_param[2])
# Calculate the loss again:
if X is not None:
criteria_new, criteria_result_new = get_criteria_value(model, X, y, criteria_type = simplify_criteria[0], criterion = criterion, **kwargs)
if verbose >= 1:
print("Prev_loss: {0}, new loss: {1}\tprev_DL: {2:.9f}, new DL: {3:.9f}".format(
criteria_result_prev["loss"], criteria_result_new["loss"], criteria_result_prev["DL"], criteria_result_new["DL"]))
print()
if criteria_new > criteria_prev * (1 + 0.05):
print("to_symbolic DL increase more than 5%! ", end = "")
if not force_simplification:
print("Reset layer.")
model[model_id].reset_layer(layer_id, layer)
else:
print("Nevertheless, force simplification.")
loss_list.append(criteria_result_new["loss"])
print("{0} succeed. Prev_loss: {1}\tnew_loss: {2}\tprev_DL: {3:.9f}, new_DL: {4:.9f}".format(
mode_ele, criteria_result_prev["loss"], criteria_result_new["loss"],
criteria_result_prev["DL"], criteria_result_new["DL"]))
else:
print("{0} succeed.".format(mode_ele))
event_list.append({mode_ele: (model_id, layer_id)})
elif layer_struct_param[1] == "Sneuron_Layer":
# Obtain loss before simplification:
layer = getattr(model_ele, "layer_{0}".format(layer_id))
criteria_prev, criteria_result_prev = get_criteria_value(model, X, y, criteria_type = simplify_criteria[0], criterion = criterion, **kwargs)
new_layer = Sneuron_2_Symbolic(layer, prefix = prefix)
model[model_id].reset_layer(layer_id, new_layer)
# Calculate the loss again:
criteria_new, criteria_result_new = get_criteria_value(model, X, y, criteria_type = simplify_criteria[0], criterion = criterion, **kwargs)
if verbose >= 1:
print("Prev_loss: {0}, new loss: {1}\tprev_DL: {2:.9f}, new DL: {3:.9f}".format(
criteria_result_prev["loss"], criteria_result_new["loss"], criteria_result_prev["DL"], criteria_result_new["DL"]))
print()
if criteria_new > criteria_prev * (1 + 0.05):
print("to_symbolic DL increase more than 5%! ", end = "")
if not force_simplification:
print("Reset layer.")
model[model_id].reset_layer(layer_id, layer)
else:
print("Nevertheless, force simplification.")
loss_list.append(criteria_result_new["loss"])
event_list.append({mode_ele: (model_id, layer_id)})
print("{0} succeed. Prev_loss: {1}\tnew_loss: {2}\tprev_DL: {3:.9f}, new_DL: {4:.9f}".format(
mode_ele, criteria_result_prev["loss"], criteria_result_new["loss"],
criteria_result_prev["DL"], criteria_result_new["DL"]))
if X is not None:
mse_record_whole.append(to_np_array(nn.MSELoss()(pred_valid, y_valid)))
data_DL_whole.append(to_np_array(DL_criterion(pred_valid, y_valid)))
model_DL_whole.append(get_model_DL(model))
if "param" in record_keys:
param_record_whole.append(model[0].get_weights_bias(W_source = "core", b_source = "core"))
if "param_grad" in record_keys:
param_grad_record_whole.append(model[0].get_weights_bias(W_source = "core", b_source = "core", is_grad = True))
iter_end_whole.append(1)
elif mode_ele == "symbolic_simplification":
"""Collapse multi-layer symbolic expression"""
from sympy import Symbol, Poly, expand, prod
force_simplification = kwargs["force_simplification"] if "force_simplification" in kwargs else False
numerical_threshold = kwargs["numerical_threshold"] if "numerical_threshold" in kwargs else None
is_numerical = kwargs["is_numerical"] if "is_numerical" in kwargs else False
max_poly_degree = kwargs["max_poly_degree"] if "max_poly_degree" in kwargs else None
show_before_truncate = kwargs["show_before_truncate"] if "show_before_truncate" in kwargs else False
for model_id, model_ele in enumerate(model):
is_all_symbolic = True
for layer_id, layer_struct_param in enumerate(model_ele.struct_param):
if layer_struct_param[1] != "Symbolic_Layer":
is_all_symbolic = False
if is_all_symbolic:
criteria_prev, criteria_result_prev = get_criteria_value(model, X, y, criteria_type = simplify_criteria[0], criterion = criterion, **kwargs)
variables = OrderedDict()
for i in range(model[0].layer_0.input_size):
variables["x{0}".format(i)] = Symbol("x{0}".format(i))
expression = list(variables.values())
param_dict_all = {}
# Collapse multiple layers:
for layer_id, layer_struct_param in enumerate(model_ele.struct_param):
layer = getattr(model_ele, "layer_{0}".format(layer_id))
layer_expression = deepcopy(layer.numerical_expression)
layer_expression_new = []
for expr in layer_expression:
new_expr = expr.subs({"x{0}".format(i): "t{0}".format(i) for i in range(len(expression))}) # Use a temporary variable to prevent overriding
new_expr = new_expr.subs({"t{0}".format(i): expression[i] for i in range(len(expression))})
layer_expression_new.append(expand(new_expr))
expression = layer_expression_new
# Show full expression before performing truncation:
if show_before_truncate:
for i, expr in enumerate(expression):
print("Full expression {0}:".format(i))
pp.pprint(Poly(expr, *list(variables.values())))
print()
model_ele_candidate = MLP(input_size = model[0].layer_0.input_size,
struct_param = [[layer.output_size, "Symbolic_Layer", {"symbolic_expression": "x0"}]],
settings = {},
is_cuda = model_ele.is_cuda,
)
# Setting maximul degree for polynomial:
if max_poly_degree is not None:
new_expression = []
for expr in expression:
expr = Poly(expr, *list(variables.values()))
degree_list = []
coeff_list = []
for degree, coeff in expr.terms():
# Only use monomials with degree not larger than max_poly_degree:
if sum(degree) <= max_poly_degree:
degree_list.append(degree)
coeff_list.append(coeff)
new_expr = 0
for degree, coeff in zip(degree_list, coeff_list):
new_expr += prod([variables["x{0}".format(i)] ** degree[i] for i in range(len(degree))]) * coeff
new_expression.append(new_expr)
expression = new_expression
# Update symbolic expression for model_ele_candidate:
if not is_numerical:
param_dict_all = {}
expression_new_all = []
for expr in expression:
expression_new, param_dict = numerical_2_parameter(expr, idx = len(param_dict_all), threshold = numerical_threshold)
expression_new_all.append(expression_new)
param_dict_all.update(param_dict)
model_ele_candidate.layer_0.set_symbolic_expression(expression_new_all, p_init = param_dict_all)
else:
model_ele_candidate.layer_0.set_symbolic_expression(expression)
model_ele_candidate.layer_0.set_numerical(True)
criteria_new, criteria_result_new = get_criteria_value(model_ele_candidate, X, y, criteria_type = simplify_criteria[0], criterion = criterion, **kwargs)
if criteria_new > criteria_prev * (1 + 0.05):
print("to_symbolic DL increase more than 5%! ", end = "")
if force_simplification:
print("Nevertheless, force simplification.")
model[model_id] = model_ele_candidate
else:
print("Revert.")
else:
model[model_id] = model_ele_candidate
elif mode_ele == "activation_snap":
from sympy import Function
def get_sign_snap_candidate(layer, activation_source, excluded_neurons = None):
coeff_dict = {}
for i in range(len(layer.symbolic_expression)):
current_expression = [layer.symbolic_expression[i]]
func_names = layer.get_function_name_list(current_expression)
if activation_source in func_names:
coeff = [element for element in layer.get_param_name_list(current_expression) if element[0] == "W"]
coeff_dict[i] = np.mean([np.abs(value) for key, value in layer.get_param_dict().items() if key in coeff])
best_idx = None
best_value = 0
for key, value in coeff_dict.items():
if value > best_value and key not in excluded_neurons:
best_value = value
best_idx = key
return best_idx, best_value
activation_source = kwargs["activation_source"] if "activation_source" in kwargs else "sigmoid"
activation_target = kwargs["activation_target"] if "activation_target" in kwargs else "heaviside"
activation_fun_source = Function(activation_source)
activation_fun_target = Function(activation_target)
for model_id, model_ele in enumerate(model):
for layer_id, layer_struct_param in enumerate(model_ele.struct_param):
if layer_struct_param[1] == "Symbolic_Layer":
layer = getattr(model_ele, "layer_{0}".format(layer_id))
excluded_neurons = []
if activation_source not in layer.get_function_name_list():
continue
performance_monitor.reset()
criteria_value, criteria_result = get_criteria_value(model, X, y, criteria_type = simplify_criteria[0], criterion = criterion, **kwargs)
to_stop, pivot_dict, log, _, pivot_id = performance_monitor.monitor(criteria_value, model_dict = model[model_id].model_dict, criteria_result = criteria_result)
for i in range(layer_struct_param[0]):
# Obtain loss before simplification:
layer = getattr(model_ele, "layer_{0}".format(layer_id))
best_idx, _ = get_sign_snap_candidate(layer, activation_source, excluded_neurons = excluded_neurons)
excluded_neurons.append(best_idx)
new_expression = [expression.subs(activation_fun_source, activation_fun_target) if j == best_idx else expression for j, expression in enumerate(layer.symbolic_expression)]
print("Pass {0}, candidate new expression: {1}".format(i, new_expression))
layer.set_symbolic_expression(new_expression)
# Train:
_, loss_new, data_record = train_simple(model, X, y, validation_data = validation_data, **kwargs)
criteria_value, criteria_result = get_criteria_value(model, X, y, criteria_type = simplify_criteria[0], criterion = criterion, **kwargs)
to_stop, pivot_dict, log, is_accept, pivot_id = performance_monitor.monitor(criteria_value, model_dict = model[model_id].model_dict, criteria_result = criteria_result)
is_accept_whole.append(is_accept)
# Check if the criterion after simplification and refit is worse. If it is worse than the simplify_epsilon, revert:
if to_stop:
model[model_id].load_model_dict(pivot_dict["model_dict"])
if verbose >= 1:
print("Pass {0}, loss: {1}\tDL: {2}. New snap {3} is do not improve by {4} = {5} for {6} steps. Revert the simplification to pivot model. Continue".format(
i, view_item(log, ("criteria_result", "loss")), view_item(log, ("criteria_result", "DL")), info, simplify_criteria[0], simplify_epsilon, simplify_patience))
continue
mse_record_whole += data_record["mse"]
data_DL_whole += data_record["data_DL"]
model_DL_whole += data_record["model_DL"]
if "param" in record_keys:
param_record_whole += data_record["param"]
if "param_grad" in record_keys:
param_grad_record_whole += data_record["param_grad"]
iter_end_whole.append(len(data_record["mse"]))
loss_list.append(loss_new)
event_list.append({mode_ele: (model_id, layer_id)})
if verbose >= 1:
print("{0} succeed at (model {1}, layer {2}). loss: {3}\tDL: {4}".format(
mode_ele, model_id, layer_id, view_item(log, ("criteria_result", "loss")), view_item(log, ("criteria_result", "DL"))))
print("symbolic_expression: ", layer.symbolic_expression)
print("numerical_expression: ", layer.numerical_expression)
print()
model[model_id].load_model_dict(pivot_dict["model_dict"])
elif mode_ele == "ramping-L1":
loss_list_specific = []
ramping_L1_list = kwargs["ramping_L1_list"] if "ramping_L1_list" in kwargs else np.logspace(-7, -1, 30)
ramping_mse_threshold = kwargs["ramping_mse_threshold"] if "ramping_mse_threshold" in kwargs else 1e-5
ramping_final_multiplier = kwargs["ramping_final_multiplier"] if "ramping_final_multiplier" in kwargs else 1e-2
layer_dict_dict = {}
for i, L1_amp in enumerate(ramping_L1_list):
reg_dict = {"weight": L1_amp, "bias": L1_amp, "param": L1_amp}
_, loss_end, data_record = train_simple(model, X, y, reg_dict = reg_dict, patience = None, validation_data = validation_data, **kwargs)
layer_dict_dict[i] = model[0].layer_0.layer_dict
weight, bias = model[0].layer_0.get_weights_bias()
print("L1-amp: {0}\tloss: {1}\tweight: {2}\tbias: {3}".format(L1_amp, loss_end, weight, bias))
loss_list_specific.append(loss_end)
if "param" in record_keys:
param_record_whole.append((weight, bias))
if loss_end > ramping_mse_threshold:
if len(loss_list_specific) == 1:
print("\nThe MSE after the first L1-amp={0} is already larger than the ramping_mse_threshold. Stop and use current L1-amp. The figures will look empty.".format(ramping_mse_threshold))
else:
print("\nThe MSE {0} is larger than the ramping_mse_threshold {1}, stop ramping-L1 simplification".format(loss_end, ramping_mse_threshold))
break
mse_record_whole.append(data_record["mse"][-1])
data_DL_whole.append(data_record["data_DL"][-1])
model_DL_whole.append(data_record["model_DL"][-1])
iter_end_whole.append(1)
final_L1_amp = L1_amp * ramping_final_multiplier
final_L1_idx = np.argmin(np.abs(np.array(ramping_L1_list) - final_L1_amp))
layer_dict_final = layer_dict_dict[final_L1_idx]
print("Final L1_amp used: {0}".format(ramping_L1_list[final_L1_idx]))
if "param" in record_keys:
print("Final param value:\nweights: {0}\nbias{1}".format(param_record_whole[final_L1_idx][0], param_record_whole[final_L1_idx][1]))
model[0].layer_0.load_layer_dict(layer_dict_final)
mse_record_whole = mse_record_whole[: final_L1_idx + 2]
data_DL_whole = data_DL_whole[: final_L1_idx + 2]
model_DL_whole = model_DL_whole[: final_L1_idx + 2]
iter_end_whole = iter_end_whole[: final_L1_idx + 2]
if isplot:
def dict_to_list(Dict):
return np.array([value for value in Dict.values()])
weights_list = []
bias_list = []
for element in param_record_whole:
if isinstance(element[0], dict):
element_core = dict_to_list(element[0])
weights_list.append(element_core)
else:
element_core = to_np_array(element[0]).squeeze(1)
weights_list.append(element_core)
bias_list.append(to_np_array(element[1]))
weights_list = np.array(weights_list)
bias_list = np.array(bias_list).squeeze(1)
import matplotlib.pylab as plt
plt.figure(figsize = (7,5))
plt.loglog(ramping_L1_list[: len(loss_list_specific)], loss_list_specific)
plt.xlabel("L1 amp", fontsize = 16)
plt.ylabel("mse", fontsize = 16)
plt.show()
plt.figure(figsize = (7,5))
plt.semilogx(ramping_L1_list[: len(loss_list_specific)], loss_list_specific)
plt.xlabel("L1 amp", fontsize = 16)
plt.ylabel("mse", fontsize = 16)
plt.show()
plt.figure(figsize = (7,5))
for i in range(weights_list.shape[1]):
plt.semilogx(ramping_L1_list[: len(loss_list_specific)], weights_list[:,i], label = "weight_{0}".format(i))
if len(bias_list) > 0:
plt.semilogx(ramping_L1_list[: len(loss_list_specific)], bias_list, label = "bias")
plt.xlabel("L1 amp", fontsize = 16)
plt.ylabel("parameter_values", fontsize = 16)
plt.legend()
plt.show()
plt.clf()
plt.close()
else:
raise Exception("mode {0} not recognized!".format(mode_ele))
loss_dict[mode_ele] = {}
if X is not None:
loss_dict[mode_ele]["mse_record_whole"] = mse_record_whole
loss_dict[mode_ele]["data_DL_whole"] = data_DL_whole
loss_dict[mode_ele]["{0}_test".format(loss_type)] = loss_list
loss_dict[mode_ele]["model_DL_whole"] = model_DL_whole
if "param" in record_keys:
loss_dict[mode_ele]["param_record_whole"] = param_record_whole
if "param_grad" in record_keys:
loss_dict[mode_ele]["param_grad_record_whole"] = param_grad_record_whole
loss_dict[mode_ele]["iter_end_whole"] = iter_end_whole
loss_dict[mode_ele]["event_list"] = event_list
loss_dict[mode_ele]["is_accept_whole"] = is_accept_whole
if mode_ele == "ramping-L1":
loss_dict[mode_ele]["ramping_L1_list"] = ramping_L1_list
loss_dict[mode_ele]["loss_list_specific"] = loss_list_specific
if not is_list:
model = model[0]
return model, loss_dict
# ## The following are different model architectures:
# ## MLP:
# In[3]:
class MLP(nn.Module):
def __init__(
self,
input_size,
struct_param = None,
W_init_list = None, # initialization for weights
b_init_list = None, # initialization for bias
settings = {}, # Default settings for each layer, if the settings for the layer is not provided in struct_param
is_cuda = False,
):
super(MLP, self).__init__()
self.input_size = input_size
self.is_cuda = is_cuda
self.settings = deepcopy(settings)
if struct_param is not None:
self.num_layers = len(struct_param)
self.W_init_list = W_init_list
self.b_init_list = b_init_list
self.info_dict = {}
self.init_layers(deepcopy(struct_param))
else:
self.num_layers = 0
@property
def struct_param(self):
return [getattr(self, "layer_{0}".format(i)).struct_param for i in range(self.num_layers)]
@property
def output_size(self):
return self.get_layer(-1).output_size
@property
def structure(self):
structure = OrderedDict()
structure["input_size"] = self.input_size
structure["output_size"] = self.output_size
structure["struct_param"] = self.struct_param if hasattr(self, "struct_param") else None
return structure
def init_layers(self, struct_param):
res_forward = self.settings["res_forward"] if "res_forward" in self.settings else False
for k, layer_struct_param in enumerate(struct_param):
if res_forward:
num_neurons_prev = struct_param[k - 1][0] + self.input_size if k > 0 else self.input_size
else:
num_neurons_prev = struct_param[k - 1][0] if k > 0 else self.input_size
num_neurons = layer_struct_param[0]
W_init = self.W_init_list[k] if self.W_init_list is not None else None
b_init = self.b_init_list[k] if self.b_init_list is not None else None
# Get settings for the current layer:
layer_settings = deepcopy(self.settings) if bool(self.settings) else {}
layer_settings.update(layer_struct_param[2])
# Construct layer:
layer = get_Layer(layer_type = layer_struct_param[1],
input_size = num_neurons_prev,
output_size = num_neurons,
W_init = W_init,
b_init = b_init,
settings = layer_settings,
is_cuda = self.is_cuda,
)
setattr(self, "layer_{}".format(k), layer)
def forward(self, *input, p_dict=None, **kwargs):
kwargs = filter_kwargs(kwargs, ["res_forward", "is_res_block", "act_noise_scale"]) # only allow certain kwargs to be passed
if isinstance(input, tuple):
input = torch.cat(input, -1)
output = input
res_forward = self.settings["res_forward"] if "res_forward" in self.settings else False
is_res_block = self.settings["is_res_block"] if "is_res_block" in self.settings else False
for k in range(len(self.struct_param)):
p_dict_ele = p_dict[k] if p_dict is not None else None
if res_forward and k > 0:
output = getattr(self, "layer_{}".format(k))(torch.cat([output, input], -1), p_dict=p_dict_ele, **kwargs)
else:
output = getattr(self, "layer_{}".format(k))(output, p_dict=p_dict_ele, **kwargs)
if is_res_block:
output = output + input
return output
def copy(self):
return deepcopy(self)
def simplify(self, X=None, y=None, mode="full", isplot=False, target_name=None, validation_data = None, **kwargs):
new_model, _ = simplify(self, X, y, mode=mode, isplot=isplot, target_name=target_name, validation_data=validation_data, **kwargs)
self.__dict__.update(new_model.__dict__)
def snap(self, snap_mode="integer", top=5, **kwargs):
"""Generate a set of new models whose parameters are snapped, each model with a different number of snapped parameters."""
if not hasattr(self, "num_layers") or self.num_layers != 1:
return False, [self]
else:
model_list = []
top = top if snap_mode != "unsnap" else 1
for top_ele in range(1, top + 1):
new_model = self.copy()
layer = new_model.layer_0
info_list = layer.simplify(mode="snap", top=top_ele, snap_mode=snap_mode)
if len(info_list) > 0:
new_model.reset_layer(0, layer)
model_list.append(new_model)
is_succeed = len(model_list) > 0
return is_succeed, model_list
def get_regularization(self, source = ["weight", "bias"], mode = "L1", **kwargs):
reg = to_Variable([0], is_cuda=self.is_cuda)
for k in range(len(self.struct_param)):
layer = getattr(self, "layer_{}".format(k))
reg = reg + layer.get_regularization(mode = mode, source = source)
return reg
def get_layer(self, layer_id):
if layer_id < 0:
layer_id += self.num_layers
return getattr(self, "layer_{}".format(layer_id))
def reset_layer(self, layer_id, layer):
setattr(self, "layer_{}".format(layer_id), layer)
def insert_layer(self, layer_id, layer):
if layer_id < 0:
layer_id += self.num_layers
if layer_id < self.num_layers - 1:
next_layer = getattr(self, "layer_{}".format(layer_id + 1))
if next_layer.struct_param[1] == "Simple_Layer":
assert next_layer.input_size == layer.output_size, "The inserted layer's output_size {0} must be compatible with next layer_{1}'s input_size {2}!" .format(layer.output_size, layer_id + 1, next_layer.input_size)
for i in range(self.num_layers - 1, layer_id - 1, -1):
setattr(self, "layer_{}".format(i + 1), getattr(self, "layer_{}".format(i)))
setattr(self, "layer_{}".format(layer_id), layer)
self.num_layers += 1
def remove_layer(self, layer_id):
if layer_id < 0:
layer_id += self.num_layers
if layer_id < self.num_layers - 1:
num_neurons_prev = self.struct_param[layer_id - 1][0] if layer_id > 0 else self.input_size
replaced_layer = getattr(self, "layer_{}".format(layer_id + 1))
if replaced_layer.struct_param[1] == "Simple_Layer":
assert replaced_layer.input_size == num_neurons_prev, "After deleting layer_{0}, the replaced layer's input_size {1} must be compatible with previous layer's output neurons {2}!" .format(layer_id, replaced_layer.input_size, num_neurons_prev)
for i in range(layer_id, self.num_layers - 1):
setattr(self, "layer_{}".format(i), getattr(self, "layer_{}".format(i + 1)))
self.num_layers -= 1
def prune_neurons(self, layer_id, neuron_ids):
if layer_id == "input":
layer = self.get_layer(0)
layer.prune_input_neurons(neuron_ids)
self.input_size = layer.input_size
else:
if layer_id < 0:
layer_id = self.num_layers + layer_id
layer = getattr(self, "layer_{}".format(layer_id))
layer.prune_output_neurons(neuron_ids)
self.reset_layer(layer_id, layer)
if layer_id < self.num_layers - 1:
next_layer = getattr(self, "layer_{}".format(layer_id + 1))
next_layer.prune_input_neurons(neuron_ids)
self.reset_layer(layer_id + 1, next_layer)
def add_neurons(self, layer_id, num_neurons, mode = ("imitation", "zeros")):
if not isinstance(mode, list) and not isinstance(mode, tuple):
mode = (mode, mode)
if layer_id < 0:
layer_id = self.num_layers + layer_id
layer = getattr(self, "layer_{}".format(layer_id))
layer.add_output_neurons(num_neurons, mode = mode[0])
self.reset_layer(layer_id, layer)
if layer_id < self.num_layers - 1:
next_layer = getattr(self, "layer_{}".format(layer_id + 1))
next_layer.add_input_neurons(num_neurons, mode = mode[1])
self.reset_layer(layer_id + 1, next_layer)
if layer_id == 0:
self.input_size = self.get_layer(0).input_size
def inspect_operation(self, input, operation_between, p_dict = None, **kwargs):
output = input
res_forward = self.settings["res_forward"] if "res_forward" in self.settings else False
is_res_block = self.settings["is_res_block"] if "is_res_block" in self.settings else False
for k in range(*operation_between):
p_dict_ele = p_dict[k] if p_dict is not None else None
if res_forward and k > 0:
output = getattr(self, "layer_{}".format(k))(torch.cat([output, input], -1), p_dict = p_dict_ele)
else:
output = getattr(self, "layer_{}".format(k))(output, p_dict = p_dict_ele)
if is_res_block:
output = output + input
return output
def get_weights_bias(self, W_source = "core", b_source = "core", layer_ids = None, is_grad = False, isplot = False, verbose = False, raise_error = True):
if not hasattr(self, "struct_param"):
return None, None
layer_ids = range(len(self.struct_param)) if layer_ids is None else layer_ids
W_list = []
b_list = []
if W_source is not None:
for k in range(len(self.struct_param)):
if k in layer_ids:
if W_source == "core":
try:
W, _ = getattr(self, "layer_{}".format(k)).get_weights_bias(is_grad = is_grad)
except Exception as e:
if raise_error:
raise
else:
print(e)
W = np.array([np.NaN])
else:
raise Exception("W_source '{}' not recognized!".format(W_source))
W_list.append(W)
if b_source is not None:
for k in range(len(self.struct_param)):
if k in layer_ids:
if b_source == "core":
try:
_, b = getattr(self, "layer_{}".format(k)).get_weights_bias(is_grad = is_grad)
except Exception as e:
if raise_error:
raise
else:
print(e)
b = np.array([np.NaN])
else:
raise Exception("b_source '{}' not recognized!".format(b_source))
b_list.append(b)
if verbose:
import pprint as pp
if W_source is not None:
print("weight:")
pp.pprint(W_list)
if b_source is not None:
print("bias:")
pp.pprint(b_list)
if isplot:
if W_source is not None:
print("weight {}:".format(W_source))
plot_matrices(W_list)
if b_source is not None:
print("bias {}:".format(b_source))
plot_matrices(b_list)
return W_list, b_list
def split_to_model_ensemble(self, mode = "standardize"):
num_models = self.struct_param[-1][0]
model_core = deepcopy(self)
if mode == "standardize":
last_layer = getattr(model_core, "layer_{}".format(model_core.num_layers - 1))
last_layer.standardize(mode = "b_mean_zero")
else:
raise Exception("mode {} not recognized!".format(mode))
model_list = [deepcopy(model_core) for i in range(num_models)]
for i, model in enumerate(model_list):
to_prune = list(range(num_models))
to_prune.pop(i)
model.prune_neurons(-1, to_prune)
return construct_model_ensemble_from_nets(model_list)
@property
def model_dict(self):
model_dict = {"type": self.__class__.__name__}
model_dict["input_size"] = self.input_size
model_dict["struct_param"] = get_full_struct_param(self.struct_param, self.settings)
model_dict["weights"], model_dict["bias"] = self.get_weights_bias(W_source = "core", b_source = "core")
model_dict["settings"] = deepcopy(self.settings)
model_dict["net_type"] = self.__class__.__name__
return model_dict
@property
def DL(self):
return np.sum([getattr(self, "layer_{}".format(i)).DL for i in range(self.num_layers)])
def load_model_dict(self, model_dict):
new_net = load_model_dict_net(model_dict, is_cuda = self.is_cuda)
self.__dict__.update(new_net.__dict__)
def load(self, filename):
mode = "json" if filename.endswith(".json") else "pickle"
model_dict = load_model(filename, mode=mode)
self.load_model_dict(model_dict)
def save(self, filename):
mode = "json" if filename.endswith(".json") else "pickle"
save_model(self.model_dict, filename, mode=mode)
def get_loss(self, input, target, criterion, **kwargs):
y_pred = self(input, **kwargs)
return criterion(y_pred, target)
def prepare_inspection(self, X, y, **kwargs):
return {}
def set_cuda(self, is_cuda):
for k in range(self.num_layers):
getattr(self, "layer_{}".format(k)).set_cuda(is_cuda)
self.is_cuda = is_cuda
def set_trainable(self, is_trainable):
for k in range(self.num_layers):
getattr(self, "layer_{}".format(k)).set_trainable(is_trainable)
def get_snap_dict(self):
snap_dict = {}
for k in range(len(self.struct_param)):
layer = getattr(self, "layer_{}".format(k))
if hasattr(layer, "snap_dict"):
recorded_layer_snap_dict = {}
for key, item in layer.snap_dict.items():
recorded_layer_snap_dict[key] = {"new_value": item["new_value"]}
if len(recorded_layer_snap_dict) > 0:
snap_dict[k] = recorded_layer_snap_dict
return snap_dict
def synchronize_settings(self):
snap_dict = self.get_snap_dict()
if len(snap_dict) > 0:
self.settings["snap_dict"] = snap_dict
return self.settings
def get_sympy_expression(self, verbose = True):
expressions = {i: {} for i in range(self.num_layers)}
for i in range(self.num_layers):
layer = getattr(self, "layer_{}".format(i))
if layer.struct_param[1] == "Symbolic_Layer":
if verbose:
print("Layer {}, symbolic_expression: {}".format(i, layer.symbolic_expression))
print(" numerical_expression: {}".format(layer.numerical_expression))
expressions[i]["symbolic_expression"] = layer.symbolic_expression
expressions[i]["numerical_expression"] = layer.numerical_expression
expressions[i]["param_dict"] = layer.get_param_dict()
expressions[i]["DL"] = layer.DL
else:
if verbose:
print("Layer {} is not a symbolic layer.".format(i))
expressions[i] = None
return expressions
# ## Labelmix_MLP:
# In[ ]:
class Labelmix_MLP(nn.Module):
def __init__(
self,
input_size,
struct_param,
idx_label=None,
is_cuda=False,
):
super(Labelmix_MLP, self).__init__()
self.input_size = input_size
self.struct_param = struct_param
self.num_layers = len(struct_param)
if idx_label is not None and len(idx_label) == input_size:
idx_label = None
if idx_label is not None:
self.idx_label = torch.LongTensor(idx_label)
idx_main = list(set(range(input_size)) - set(to_np_array(idx_label).astype(int).tolist()))
self.idx_main = torch.LongTensor(idx_main)
else:
self.idx_label = None
self.idx_main = torch.LongTensor(list(range(input_size)))
num_neurons_prev = len(self.idx_main)
for i, layer_struct_param in enumerate(struct_param):
num_neurons = layer_struct_param[0]
setattr(self, "W_{}_main".format(i), nn.Parameter(torch.randn(num_neurons_prev, num_neurons)))
setattr(self, "b_{}_main".format(i), nn.Parameter(torch.zeros(num_neurons)))
init_weight(getattr(self, "W_{}_main".format(i)), init=None)
num_neurons_prev = num_neurons
if self.idx_label is not None:
setattr(self, "W_{}_mul".format(i), nn.Parameter(torch.randn(len(self.idx_label), num_neurons)))
setattr(self, "W_{}_add".format(i), nn.Parameter(torch.randn(len(self.idx_label), num_neurons)))
init_weight(getattr(self, "W_{}_mul".format(i)), init=None)
init_weight(getattr(self, "W_{}_add".format(i)), init=None)
setattr(self, "b_{}_mul".format(i), nn.Parameter(torch.zeros(num_neurons)))
setattr(self, "b_{}_add".format(i), nn.Parameter(torch.zeros(num_neurons)))
self.set_cuda(is_cuda)
def forward(self, input):
output = input[:, self.idx_main]
if self.idx_label is not None:
labels = input[:, self.idx_label]
for i, layer_struct_param in enumerate(self.struct_param):
output = torch.matmul(output, getattr(self, "W_{}_main".format(i))) + getattr(self, "b_{}_main".format(i))
if "activation" in layer_struct_param[2]:
output = get_activation(layer_struct_param[2]["activation"])(output)
if self.idx_label is not None:
A_mul = torch.matmul(labels, getattr(self, "W_{}_mul".format(i))) + getattr(self, "b_{}_mul".format(i))
A_add = torch.matmul(labels, getattr(self, "W_{}_add".format(i))) + getattr(self, "b_{}_add".format(i))
output = output * A_mul + A_add
return output
def get_loss(self, X, y, criterion, **kwargs):
y_pred = self(X)
return criterion(y_pred, y)
def set_cuda(self, is_cuda):
if isinstance(is_cuda, str):
self.cuda(is_cuda)
else:
if is_cuda:
self.cuda()
else:
self.cpu()
self.is_cuda = is_cuda
def get_regularization(self, source = ["weight", "bias"], mode = "L1", **kwargs):
reg = to_Variable([0], is_cuda=self.is_cuda)
return reg
@property
def model_dict(self):
model_dict = {"type": "Labelmix_MLP"}
model_dict["input_size"] = self.input_size
model_dict["struct_param"] = self.struct_param
if self.idx_label is not None:
model_dict["idx_label"] = to_np_array(self.idx_label).astype(int)
model_dict["state_dict"] = to_cpu_recur(self.state_dict())
return model_dict
# ## Multi_MLP (MLPs in series):
# In[ ]:
class Multi_MLP(nn.Module):
def __init__(
self,
input_size,
struct_param,
W_init_list = None, # initialization for weights
b_init_list = None, # initialization for bias
settings = None, # Default settings for each layer, if the settings for the layer is not provided in struct_param
is_cuda = False,
):
super(Multi_MLP, self).__init__()
self.input_size = input_size
self.num_layers = len(struct_param)
self.W_init_list = W_init_list
self.b_init_list = b_init_list
self.settings = deepcopy(settings)
self.num_blocks = len(struct_param)
self.is_cuda = is_cuda
for i, struct_param_ele in enumerate(struct_param):
input_size_block = input_size if i == 0 else struct_param[i - 1][-1][0]
setattr(self, "block_{0}".format(i), MLP(input_size = input_size_block,
struct_param = struct_param_ele,
W_init_list = W_init_list[i] if W_init_list is not None else None,
b_init_list = b_init_list[i] if b_init_list is not None else None,
settings = self.settings[i] if self.settings is not None else {},
is_cuda = self.is_cuda,
))
def forward(self, input):
output = input
for i in range(self.num_blocks):
output = getattr(self, "block_{0}".format(i))(output)
return output
def get_loss(self, input, target, criterion, **kwargs):
y_pred = self(input, **kwargs)
return criterion(y_pred, target)
def get_regularization(self, source = ["weight", "bias"], mode = "L1", **kwargs):
reg = Variable(torch.FloatTensor([0]), requires_grad = False)
if self.is_cuda:
reg = reg.cuda()
for i in range(self.num_blocks):
reg = reg + getattr(self, "block_{0}".format(i)).get_regularization(mode = mode, source = source)
return reg
@property
def struct_param(self):
return [getattr(self, "block_{0}".format(i)).struct_param for i in range(self.num_blocks)]
@property
def model_dict(self):
model_dict = {"type": self.__class__.__name__}
model_dict["input_size"] = self.input_size
model_dict["struct_param"] = self.struct_param
model_dict["weights"], model_dict["bias"] = self.get_weights_bias(W_source = "core", b_source = "core")
model_dict["settings"] = deepcopy(self.settings)
model_dict["net_type"] = self.__class__.__name__
return model_dict
def load_model_dict(self, model_dict):
new_net = load_model_dict_Multi_MLP(model_dict, is_cuda = self.is_cuda)
self.__dict__.update(new_net.__dict__)
def load(self, filename):
mode = "json" if filename.endswith(".json") else "pickle"
model_dict = load_model(filename, mode=mode)
self.load_model_dict(model_dict)
def save(self, filename):
mode = "json" if filename.endswith(".json") else "pickle"
save_model(self.model_dict, filename, mode=mode)
def get_weights_bias(self, W_source = "core", b_source = "core"):
W_list = []
b_list = []
for i in range(self.num_blocks):
W, b = getattr(self, "block_{0}".format(i)).get_weights_bias(W_source = W_source, b_source = b_source)
W_list.append(W)
b_list.append(b)
return deepcopy(W_list), deepcopy(b_list)
def prepare_inspection(self, X, y, **kwargs):
return {}
def set_cuda(self, is_cuda):
for i in range(self.num_blocks):
getattr(self, "block_{0}".format(i)).set_cuda(is_cuda)
self.is_cuda = is_cuda
def set_trainable(self, is_trainable):
for i in range(self.num_blocks):
getattr(self, "block_{0}".format(i)).set_trainable(is_trainable)
# ## Branching_Net:
# In[ ]:
class Branching_Net(nn.Module):
"""An MLP that consists of a base network, and net_1 and net_2 that branches off from the output of the base network."""
def __init__(
self,
net_base_model_dict,
net_1_model_dict,
net_2_model_dict,
is_cuda = False,
):
super(Branching_Net, self).__init__()
self.net_base = load_model_dict(net_base_model_dict, is_cuda = is_cuda)
self.net_1 = load_model_dict(net_1_model_dict, is_cuda = is_cuda)
self.net_2 = load_model_dict(net_2_model_dict, is_cuda = is_cuda)
self.info_dict = {}
def forward(self, X, **kwargs):
shared = self.net_base(X)
shared = shared.max(0, keepdim = True)[0]
return self.net_1(shared)[0], self.net_2(shared)[0]
def get_regularization(self, source = ["weights", "bias"], mode = "L1"):
reg = self.net_base.get_regularization(source = source, mode = mode) + self.net_1.get_regularization(source = source, mode = mode) + self.net_2.get_regularization(source = source, mode = mode)
return reg
def set_trainable(self, is_trainable):
self.net_base.set_trainable(is_trainable)
self.net_1.set_trainable(is_trainable)
self.net_2.set_trainable(is_trainable)
def prepare_inspection(self, X, y, **kwargs):
return deepcopy(self.info_dict)
@property
def model_dict(self):
model_dict = {"type": "Branching_Net"}
model_dict["net_base_model_dict"] = self.net_base.model_dict
model_dict["net_1_model_dict"] = self.net_1.model_dict
model_dict["net_2_model_dict"] = self.net_2.model_dict
return model_dict
def load(self, filename):
mode = "json" if filename.endswith(".json") else "pickle"
model_dict = load_model(filename, mode=mode)
self.load_model_dict(model_dict)
def save(self, filename):
mode = "json" if filename.endswith(".json") else "pickle"
save_model(self.model_dict, filename, mode=mode)
class Fan_in_MLP(nn.Module):
def __init__(
self,
model_dict_branch1,
model_dict_branch2,
model_dict_joint,
is_cuda=False,
):
super(Fan_in_MLP, self).__init__()
if model_dict_branch1 is not None:
self.net_branch1 = load_model_dict(model_dict_branch1, is_cuda=is_cuda)
else:
self.net_branch1 = None
if model_dict_branch2 is not None:
self.net_branch2 = load_model_dict(model_dict_branch2, is_cuda=is_cuda)
else:
self.net_branch2 = None
self.net_joint = load_model_dict(model_dict_joint, is_cuda=is_cuda)
self.is_cuda = is_cuda
self.info_dict = {}
def forward(self, X1, X2, is_outer=False):
if is_outer:
X2 = X2[...,None,:]
if self.net_branch1 is not None:
X1 = self.net_branch1(X1)
if self.net_branch2 is not None:
X2 = self.net_branch2(X2)
X1, X2 = broadcast_all(X1, X2)
out = torch.cat([X1, X2], -1)
# if is_outer=True, then output dimension: [..., X2dim, X1dim, out_dim]:
return self.net_joint(out).squeeze(-1)
def get_loss(self, input, target, criterion, **kwargs):
X1, X2 = input
y_pred = self(X1, X2)
return criterion(y_pred, target)
def get_regularization(self, source = ["weight", "bias"], mode = "L1", **kwargs):
reg = Variable(torch.FloatTensor([0]), requires_grad = False)
if self.is_cuda:
reg = reg.cuda()
if self.net_branch1 is not None:
reg = reg + self.net_branch1.get_regularization(source=source, mode=mode)
if self.net_branch2 is not None:
reg = reg + self.net_branch2.get_regularization(source=source, mode=mode)
return reg
def prepare_inspection(self, X, y, **kwargs):
return deepcopy(self.info_dict)
@property
def model_dict(self):
model_dict = {'type': self.__class__.__name__}
model_dict["model_dict_branch1"] = self.net_branch1.model_dict if self.net_branch1 is not None else None
model_dict["model_dict_branch2"] = self.net_branch2.model_dict if self.net_branch2 is not None else None
model_dict["model_dict_joint"] = self.net_joint.model_dict
return model_dict
def load(self, filename):
mode = "json" if filename.endswith(".json") else "pickle"
model_dict = load_model(filename, mode=mode)
self.load_model_dict(model_dict)
def save(self, filename):
mode = "json" if filename.endswith(".json") else "pickle"
save_model(self.model_dict, filename, mode=mode)
# ## Mixture_Model:
# In[ ]:
class Mixture_Model(nn.Module):
def __init__(
self,
model_dict_list,
weight_logits_model_dict,
num_components,
is_cuda=False,
):
super(Mixture_Model, self).__init__()
self.num_components = num_components
for i in range(self.num_components):
if isinstance(model_dict_list, list):
setattr(self, "model_{}".format(i), load_model_dict(model_dict_list[i], is_cuda=is_cuda))
else:
assert isinstance(model_dict_list, dict)
setattr(self, "model_{}".format(i), load_model_dict(model_dict_list, is_cuda=is_cuda))
self.weight_logits_model = load_model_dict(weight_logits_model_dict, is_cuda=is_cuda)
self.is_cuda = is_cuda
def forward(self, input):
output_list = []
for i in range(self.num_components):
output = getattr(self, "model_{}".format(i))(input)
output_list.append(output)
output_list = torch.stack(output_list, -1)
weight_logits = self.weight_logits_model(input)
return output_list, weight_logits
@property
def model_dict(self):
model_dict = {"type": "Mixture_Model",
"model_dict_list": [getattr(self, "model_{}".format(i)).model_dict for i in range(self.num_components)],
"weight_logits_model_dict": self.weight_logits_model.model_dict,
"num_components": self.num_components,
}
return model_dict
# ## Model_Ensemble:
# In[ ]:
class Model_Ensemble(nn.Module):
"""Model_Ensemble is a collection of models with the same architecture
but independent parameters"""
def __init__(
self,
num_models,
input_size,
struct_param,
W_init_list = None,
b_init_list = None,
settings = None,
net_type = "MLP",
is_cuda = False,
):
super(Model_Ensemble, self).__init__()
self.num_models = num_models
self.input_size = input_size
self.net_type = net_type
self.is_cuda = is_cuda
for i in range(self.num_models):
if settings is None:
settings_model = {}
elif isinstance(settings, list) or isinstance(settings, tuple):
settings_model = settings[i]
else:
settings_model = settings
if isinstance(struct_param, tuple):
struct_param_model = struct_param[i]
else:
struct_param_model = struct_param
if net_type == "MLP":
net = MLP(input_size = self.input_size,
struct_param = deepcopy(struct_param_model),
W_init_list = deepcopy(W_init_list[i]) if W_init_list is not None else None,
b_init_list = deepcopy(b_init_list[i]) if b_init_list is not None else None,
settings = deepcopy(settings_model),
is_cuda = is_cuda,
)
elif net_type == "ConvNet":
net = ConvNet(input_channels = self.input_size,
struct_param = deepcopy(struct_param_model),
settings = deepcopy(settings_model),
is_cuda = is_cuda,
)
else:
raise Exception("Net_type {0} not recognized!".format(net_type))
setattr(self, "model_{0}".format(i), net)
@property
def struct_param(self):
return tuple(getattr(self, "model_{0}".format(i)).struct_param for i in range(self.num_models))
@property
def settings(self):
return [getattr(self, "model_{0}".format(i)).settings for i in range(self.num_models)]
def get_all_models(self):
return [getattr(self, "model_{0}".format(i)) for i in range(self.num_models)]
def init_bias_with_input(self, input, mode = "std_sqrt", neglect_last_layer = True):
for i in range(self.num_models):
model = getattr(self, "model_{0}".format(i))
model.init_bias_with_input(input, mode = mode, neglect_last_layer = neglect_last_layer)
def initialize_param_freeze(self, update_values = True):
for i in range(self.num_models):
model = getattr(self, "model_{0}".format(i))
model.initialize_param_freeze(update_values = update_values)
def apply_model(self, input, model_id):
return fetch_model(self, model_id)(input)
def fetch_model(self, model_id):
return getattr(self, "model_{0}".format(model_id))
def set_trainable(self, is_trainable):
for i in range(self.num_models):
getattr(self, "model_{0}".format(i)).set_trainable(is_trainable)
def forward(self, input):
output_list = []
for i in range(self.num_models):
if self.net_type == "MLP":
output = getattr(self, "model_{0}".format(i))(input)
elif self.net_type == "ConvNet":
output = getattr(self, "model_{0}".format(i))(input)[0]
else:
raise Exception("Net_type {0} not recognized!".format(self.net_type))
output_list.append(output)
return torch.stack(output_list, 1)
def get_regularization(self, source = ["weight", "bias"], mode = "L1", **kwargs):
if not isinstance(source, list):
source = [source]
reg = Variable(torch.FloatTensor([0]), requires_grad = False)
if self.is_cuda:
reg = reg.cuda()
model0 = self.model_0
# Elastic_weight_reg:
if "elastic_weight" in source or "elastic_bias" in source:
# Setting up excluded layer:
excluded_layer = kwargs["excluded_layer"] if "excluded_layer" in kwargs else [-1]
if not isinstance(excluded_layer, list):
excluded_layer = [excluded_layer]
excluded_layer = [element + model0.num_layers if element < 0 else element for element in excluded_layer]
elastic_mode = kwargs["elastic_mode"] if "elastic_mode" in kwargs else "var"
# Compute the elastic_weight_reg:
for k in range(model0.num_layers):
if k in excluded_layer:
continue
W_accum_k = []
b_accum_k = []
num_neurons_prev = model0.struct_param[k - 1][0] if k > 0 else self.input_size
num_neurons = model0.struct_param[k][0]
for i in range(self.num_models):
model = getattr(self, "model_{0}".format(i))
assert model0.num_layers == model.num_layers
assert num_neurons_prev == model.struct_param[k - 1][0] if k > 0 else model.input_size, "all models' input/output size at each layer must be identical!"
assert num_neurons == model.struct_param[k][0], "all models' input/output size at each layer must be identical!"
layer_k = getattr(model, "layer_{0}".format(k))
if "elastic_weight" in source:
W_accum_k.append(layer_k.W_core)
if "elastic_bias" in source:
b_accum_k.append(layer_k.b_core)
if "elastic_weight" in source:
if elastic_mode == "var":
reg = reg + torch.stack(W_accum_k, -1).var(-1).sum()
elif elastic_mode == "std":
reg = reg + torch.stack(W_accum_k, -1).std(-1).sum()
else:
raise
if "elastic_bias" in source:
if elastic_mode == "var":
reg = reg + torch.stack(b_accum_k, -1).var(-1).sum()
elif elastic_mode == "std":
reg = reg + torch.stack(b_accum_k, -1).std(-1).sum()
else:
raise
source_core = deepcopy(source)
if "elastic_weight" in source_core:
source_core.remove("elastic_weight")
if "elastic_bias" in source_core:
source_core.remove("elastic_bias")
else:
source_core = source
# Other regularizations:
for k in range(self.num_models):
reg = reg + getattr(self, "model_{0}".format(k)).get_regularization(source = source_core, mode = mode, **kwargs)
return reg
def get_weights_bias(self, W_source = None, b_source = None, verbose = False, isplot = False):
W_list_dict = {}
b_list_dict = {}
for i in range(self.num_models):
if verbose:
print("\nmodel {0}:".format(i))
W_list_dict[i], b_list_dict[i] = getattr(self, "model_{0}".format(i)).get_weights_bias(
W_source = W_source, b_source = b_source, verbose = verbose, isplot = isplot)
return W_list_dict, b_list_dict
def combine_to_net(self, mode = "mean", last_layer_mode = "concatenate"):
model0 = self.model_0
if mode == "mean":
struct_param = deepcopy(model0.struct_param)
settings = deepcopy(model0.settings)
W_init_list = []
b_init_list = []
for k in range(model0.num_layers):
num_neurons_prev = model0.struct_param[k - 1][0] if k > 0 else self.input_size
num_neurons = model0.struct_param[k][0]
W_accum_k = []
b_accum_k = []
for i in range(self.num_models):
model = getattr(self, "model_{0}".format(i))
assert model0.num_layers == model.num_layers
assert num_neurons_prev == model.struct_param[k - 1][0] if k > 0 else model.input_size, "If mode == 'mean', all models' input/output size at each layer must be identical!"
assert num_neurons == model.struct_param[k][0], "If mode == 'mean', all models' input/output size at each layer must be identical!"
layer_k = getattr(model, "layer_{0}".format(k))
W_accum_k.append(layer_k.W_core)
b_accum_k.append(layer_k.b_core)
if k == model0.num_layers - 1:
current_mode = last_layer_mode
else:
current_mode = mode
if current_mode == "mean":
W_accum_k = torch.stack(W_accum_k, -1).mean(-1)
b_accum_k = torch.stack(b_accum_k, -1).mean(-1)
elif current_mode == "concatenate":
W_accum_k = torch.cat(W_accum_k, -1)
b_accum_k = torch.cat(b_accum_k, -1)
struct_param[-1][0] = sum([self.struct_param[i][-1][0] for i in range(self.num_models)])
else:
raise Exception("mode {0} not recognized!".format(last_layer_mode))
W_init_list.append(W_accum_k.data.numpy())
b_init_list.append(b_accum_k.data.numpy())
# Build the net:
net = MLP(input_size = self.input_size,
struct_param = struct_param,
W_init_list = W_init_list,
b_init_list = b_init_list,
settings = settings,
)
else:
raise Exception("mode {0} not recognized!".format(mode))
return net
def remove_models(self, model_ids):
if not isinstance(model_ids, list):
model_ids = [model_ids]
model_list = []
k = 0
for i in range(self.num_models):
if i not in model_ids:
if k != i:
setattr(self, "model_{0}".format(k), getattr(self, "model_{0}".format(i)))
k += 1
num_models_new = k
for i in range(num_models_new, self.num_models):
delattr(self, "model_{0}".format(i))
self.num_models = num_models_new
def add_models(self, models):
if not isinstance(models, list):
models = [models]
for i, model in enumerate(models):
setattr(self, "model_{0}".format(i + self.num_models), model)
self.num_models += len(models)
def simplify(self, X, y, idx, mode = "full", validation_data = None, isplot = False, **kwargs):
def process_idx(idx):
idx = idx.byte()
if len(idx.size()) == 1:
idx = idx.unqueeze(1)
if idx.size(1) == 1:
idx = idx.repeat(1, self.num_models)
return idx
idx = process_idx(idx)
if validation_data is not None:
X_valid, y_valid, idx_valid = validation_data
idx_valid = process_idx(idx_valid)
loss_dict = {}
for i in range(self.num_models):
model = getattr(self, "model_{0}".format(i))
X_chosen = torch.masked_select(X, idx[:, i:i+1]).view(-1, X.size(1))
y_chosen = torch.masked_select(y, idx[:, i:i+1]).view(-1, y.size(1))
if validation_data is not None:
X_valid_chosen = torch.masked_select(X_valid, idx_valid[:, i:i+1]).view(-1, X_valid.size(1))
y_valid_chosen = torch.masked_select(y_valid, idx_valid[:, i:i+1]).view(-1, y_valid.size(1))
if len(X_valid_chosen) == 0:
validation_data_chosen = None
else:
validation_data_chosen = (X_valid_chosen, y_valid_chosen)
else:
validation_data_chosen = None
if len(X_chosen) == 0:
print("The {0}'th model has no corresponding data to simplify with, skip.".format(i))
else:
new_model, loss_dict["model_{0}".format(i)] = simplify(model, X_chosen, y_chosen, mode = mode, validation_data = validation_data_chosen, isplot = isplot, target_name = "model_{0}".format(i), **kwargs)
setattr(self, "model_{0}".format(i), new_model)
return loss_dict
def get_sympy_expression(self):
expressions = {}
for k in range(self.num_models):
print("\nmodel {0}:".format(k))
expressions["model_{0}".format(k)] = getattr(self, "model_{0}".format(k)).get_sympy_expression()
return expressions
@property
def DL(self):
return np.sum([getattr(self, "model_{0}".format(i)).DL for i in range(self.num_models)])
def get_weights_bias(self, W_source = None, b_source = None, verbose = False, isplot = False):
W_list_dict = {}
b_list_dict = {}
for i in range(self.num_models):
if verbose:
print("\nmodel {0}:".format(i))
W_list_dict[i], b_list_dict[i] = getattr(self, "model_{0}".format(i)).get_weights_bias(W_source = W_source, b_source = b_source, verbose = verbose, isplot = isplot)
return W_list_dict, b_list_dict
@property
def model_dict(self):
model_dict = {"type": "Model_Ensemble"}
for i in range(self.num_models):
model_dict["model_{0}".format(i)] = getattr(self, "model_{0}".format(i)).model_dict
model_dict["input_size"] = self.input_size
model_dict["struct_param"] = self.struct_param
model_dict["num_models"] = self.num_models
model_dict["net_type"] = self.net_type
return model_dict
def load_model_dict(self, model_dict):
new_model_ensemble = load_model_dict(model_dict, is_cuda = self.is_cuda)
self.__dict__.update(new_model_ensemble.__dict__)
def load(self, filename):
mode = "json" if filename.endswith(".json") else "pickle"
model_dict = load_model(filename, mode=mode)
self.load_model_dict(model_dict)
def save(self, filename):
mode = "json" if filename.endswith(".json") else "pickle"
save_model(self.model_dict, filename, mode=mode)
def load_model_dict_model_ensemble(model_dict, is_cuda = False):
num_models = len([model_name for model_name in model_dict if model_name[:6] == "model_"])
return Model_Ensemble(num_models = num_models,
input_size = model_dict["input_size"],
struct_param = tuple([deepcopy(model_dict["model_{0}".format(i)]["struct_param"]) for i in range(num_models)]),
W_init_list = [deepcopy(model_dict["model_{0}".format(i)]["weights"]) for i in range(num_models)],
b_init_list = [deepcopy(model_dict["model_{0}".format(i)]["bias"]) for i in range(num_models)],
settings = [deepcopy(model_dict["model_{0}".format(i)]["settings"]) for i in range(num_models)],
net_type = model_dict["net_type"] if "net_type" in model_dict else "MLP",
is_cuda = is_cuda,
)
def combine_model_ensembles(model_ensembles, input_size):
model_ensembles = deepcopy(model_ensembles)
model_ensemble_combined = None
model_id = 0
for k, model_ensemble in enumerate(model_ensembles):
if model_ensemble.input_size == input_size:
if model_ensemble_combined is None:
model_ensemble_combined = model_ensemble
else:
continue
for i in range(model_ensemble.num_models):
model = getattr(model_ensemble, "model_{0}".format(i))
setattr(model_ensemble_combined, "model_{0}".format(model_id), model)
model_id += 1
model_ensemble_combined.num_models = model_id
return model_ensemble_combined
def construct_model_ensemble_from_nets(nets):
num_models = len(nets)
if num_models is None:
return None
input_size = nets[0].input_size
struct_param = tuple(net.struct_param for net in nets)
is_cuda = False
for net in nets:
if net.input_size != input_size:
raise Exception("The input_size for all nets must be the same!")
if net.is_cuda:
is_cuda = True
model_ensemble = Model_Ensemble(num_models = num_models, input_size = input_size, struct_param = struct_param, is_cuda = is_cuda)
for i, net in enumerate(nets):
setattr(model_ensemble, "model_{0}".format(i), net)
return model_ensemble
# In[ ]:
class Model_with_uncertainty(nn.Module):
def __init__(
self,
model_pred,
model_logstd,
):
super(Model_with_uncertainty, self).__init__()
self.model_pred = model_pred
self.model_logstd = model_logstd
def forward(self, input, noise_amp = None, **kwargs):
return self.model_pred(input, noise_amp = noise_amp, **kwargs), self.model_logstd(input, **kwargs)
def get_loss(self, input, target, criterion, noise_amp = None, **kwargs):
pred, log_std = self(input, noise_amp = noise_amp, **kwargs)
return criterion(pred = pred, target = target, log_std = log_std)
def get_regularization(self, source = ["weight", "bias"], mode = "L1", **kwargs):
return self.model_pred.get_regularization(source = source, mode = mode, **kwargs) + self.model_logstd.get_regularization(source = source, mode = mode, **kwargs)
@property
def model_dict(self):
model_dict = {}
model_dict["type"] = "Model_with_Uncertainty"
model_dict["model_pred"] = self.model_pred.model_dict
model_dict["model_logstd"] = self.model_logstd.model_dict
return model_dict
def load(self, filename):
mode = "json" if filename.endswith(".json") else "pickle"
model_dict = load_model(filename, mode=mode)
self.load_model_dict(model_dict)
def save(self, filename):
mode = "json" if filename.endswith(".json") else "pickle"
save_model(self.model_dict, filename, mode=mode)
def set_cuda(self, is_cuda):
self.model_pred.set_cuda(is_cuda)
self.model_logstd.set_cuda(is_cuda)
def set_trainable(self, is_trainable):
self.model_pred.set_trainable(is_trainable)
self.model_logstd.set_trainable(is_trainable)
# ## RNN:
# In[ ]:
class RNNCellBase(nn.Module):
def extra_repr(self):
s = '{input_size}, {hidden_size}'
if 'bias' in self.__dict__ and self.bias is not True:
s += ', bias={bias}'
if 'nonlinearity' in self.__dict__ and self.nonlinearity != "tanh":
s += ', nonlinearity={nonlinearity}'
return s.format(**self.__dict__)
def check_forward_input(self, input):
if input.size(1) != self.input_size:
raise RuntimeError(
"input has inconsistent input_size: got {}, expected {}".format(
input.size(1), self.input_size))
def check_forward_hidden(self, input, hx, hidden_label=''):
if input.size(0) != hx.size(0):
raise RuntimeError(
"Input batch size {} doesn't match hidden{} batch size {}".format(
input.size(0), hidden_label, hx.size(0)))
if hx.size(1) != self.hidden_size:
raise RuntimeError(
"hidden{} has inconsistent hidden_size: got {}, expected {}".format(
hidden_label, hx.size(1), self.hidden_size))
# ### LSTM:
# In[ ]:
class LSTM(RNNCellBase):
"""a LSTM class"""
def __init__(
self,
input_size,
hidden_size,
output_struct_param,
output_settings = {},
bias = True,
is_cuda = False,
):
super(LSTM, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.W_ih = nn.Parameter(torch.Tensor(4 * hidden_size, input_size))
self.W_hh = nn.Parameter(torch.Tensor(4 * hidden_size, hidden_size))
self.output_net = MLP(input_size = self.hidden_size, struct_param = output_struct_param, settings = output_settings, is_cuda = is_cuda)
if bias:
self.b_ih = nn.Parameter(torch.Tensor(4 * hidden_size))
self.b_hh = nn.Parameter(torch.Tensor(4 * hidden_size))
else:
self.register_parameter('b_ih', None)
self.register_parameter('b_hh', None)
self.reset_parameters()
self.is_cuda = is_cuda
self.device = torch.device(self.is_cuda if isinstance(self.is_cuda, str) else "cuda" if self.is_cuda else "cpu")
self.to(self.device)
def reset_parameters(self):
stdv = 1.0 / np.sqrt(self.hidden_size)
for weight in self.parameters():
weight.data.uniform_(-stdv, stdv)
def forward_one_step(self, input, hx):
self.check_forward_input(input)
self.check_forward_hidden(input, hx[0], '[0]')
self.check_forward_hidden(input, hx[1], '[1]')
return self._backend.LSTMCell(
input, hx,
self.W_ih, self.W_hh,
self.b_ih, self.b_hh,
)
def forward(self, input, hx = None):
if hx is None:
hx = [torch.randn(input.size(0), self.hidden_size).to(self.device),
torch.randn(input.size(0), self.hidden_size).to(self.device),
]
hhx, ccx = hx
for i in range(input.size(1)):
hhx, ccx = self.forward_one_step(input[:, i], (hhx, ccx))
output = self.output_net(hhx)
return output
def get_regularization(self, source, mode = "L1", **kwargs):
if not isinstance(source, list):
source = [source]
reg = self.output_net.get_regularization(source = source, mode = mode)
for source_ele in source:
if source_ele == "weight":
if mode == "L1":
reg = reg + self.W_ih.abs().sum() + self.W_hh.abs().sum()
elif mode == "L2":
reg = reg + (self.W_ih ** 2).sum() + (self.W_hh ** 2).sum()
else:
raise Exception("mode {0} not recognized!".format(mode))
elif source_ele == "bias":
if self.bias:
if mode == "L1":
reg = reg + self.b_ih.abs().sum() + self.b_hh.abs().sum()
elif mode == "L2":
reg = reg + (self.b_ih ** 2).sum() + (self.b_hh ** 2).sum()
else:
raise Exception("mode {0} not recognized!".format(mode))
else:
raise Exception("source {0} not recognized!".format(source_ele))
return reg
def get_weights_bias(self, W_source = None, b_source = None, verbose = False, isplot = False):
W_dict = OrderedDict()
b_dict = OrderedDict()
W_o, b_o = self.output_net.get_weights_bias(W_source = W_source, b_source = b_source)
if W_source == "core":
W_dict["W_ih"] = self.W_ih.cpu().detach().numpy()
W_dict["W_hh"] = self.W_hh.cpu().detach().numpy()
W_dict["W_o"] = W_o
if isplot:
print("W_ih, W_hh:")
plot_matrices([W_dict["W_ih"], W_dict["W_hh"]])
print("W_o:")
plot_matrices(W_o)
if self.bias and b_source == "core":
b_dict["b_ih"] = self.b_ih.cpu().detach().numpy()
b_dict["b_hh"] = self.b_hh.cpu().detach().numpy()
b_dict["b_o"] = b_o
if isplot:
print("b_ih, b_hh:")
plot_matrices([b_dict["b_ih"], b_dict["b_hh"]])
print("b_o:")
plot_matrices(b_o)
return W_dict, b_dict
def get_loss(self, input, target, criterion, hx = None, **kwargs):
y_pred = self(input, hx = hx)
return criterion(y_pred, target)
def prepare_inspection(self, X, y, **kwargs):
return {}
def load(self, filename):
mode = "json" if filename.endswith(".json") else "pickle"
model_dict = load_model(filename, mode=mode)
self.load_model_dict(model_dict)
def save(self, filename):
mode = "json" if filename.endswith(".json") else "pickle"
save_model(self.model_dict, filename, mode=mode)
# ## Wide ResNet:
# In[ ]:
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=True)
def conv_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
init.xavier_uniform_(m.weight, gain=np.sqrt(2))
init.constant_(m.bias, 0)
elif classname.find('BatchNorm') != -1:
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
class wide_basic(nn.Module):
def __init__(self, in_planes, planes, dropout_rate=None, stride=1):
super(wide_basic, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, padding=1, bias=True)
if dropout_rate is not None:
self.dropout = nn.Dropout(p=dropout_rate)
else:
self.dropout = None
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=True)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=True),
)
def forward(self, x):
out = self.conv1(F.relu(self.bn1(x)))
if self.dropout is not None:
out = self.dropout(out)
out = self.conv2(F.relu(self.bn2(out)))
out += self.shortcut(x)
return out
class Wide_ResNet(nn.Module):
"""Adapted from https://github.com/meliketoy/wide-resnet.pytorch/blob/master/networks/wide_resnet.py"""
def __init__(
self,
depth,
widen_factor,
input_channels,
output_size,
dropout_rate=None,
is_cuda=False,
):
super(Wide_ResNet, self).__init__()
self.depth = depth
self.widen_factor = widen_factor
self.input_channels = input_channels
self.dropout_rate = dropout_rate
self.output_size = output_size
assert ((depth-4)%6 ==0), 'Wide-resnet depth should be 6n+4'
n = (depth-4)//6
k = widen_factor
nStages = [16*k, 16*k, 32*k, 64*k]
self.in_planes = nStages[0]
self.conv1 = conv3x3(self.input_channels,nStages[0])
self.layer1 = self._wide_layer(wide_basic, nStages[1], n, dropout_rate, stride=1)
self.layer2 = self._wide_layer(wide_basic, nStages[2], n, dropout_rate, stride=2)
self.layer3 = self._wide_layer(wide_basic, nStages[3], n, dropout_rate, stride=2)
self.bn1 = nn.BatchNorm2d(nStages[3], momentum=0.9)
self.linear = nn.Linear(nStages[3], output_size)
self.set_cuda(is_cuda)
def _wide_layer(self, block, planes, num_blocks, dropout_rate, stride):
strides = [stride] + [1]*(int(num_blocks)-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, dropout_rate, stride))
self.in_planes = planes
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.relu(self.bn1(out))
out = out.mean((-1,-2)) # replacing the out= F.avg_pool2d(out, 8) which is sensitive to the input shape.
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def set_cuda(self, is_cuda):
if isinstance(is_cuda, str):
self.cuda(is_cuda)
else:
if is_cuda:
self.cuda()
else:
self.cpu()
self.is_cuda = is_cuda
@property
def model_dict(self):
model_dict = {"type": "Wide_ResNet"}
model_dict["state_dict"] = to_cpu_recur(self.state_dict())
model_dict["depth"] = self.depth
model_dict["widen_factor"] = self.widen_factor
model_dict["input_channels"] = self.input_channels
model_dict["output_size"] = self.output_size
model_dict["dropout_rate"] = self.dropout_rate
return model_dict
def load(self, filename):
mode = "json" if filename.endswith(".json") else "pickle"
model_dict = load_model(filename, mode=mode)
self.load_model_dict(model_dict)
def save(self, filename):
mode = "json" if filename.endswith(".json") else "pickle"
save_model(self.model_dict, filename, mode=mode)
def get_regularization(self, *args, **kwargs):
return to_Variable([0], is_cuda = self.is_cuda)
def prepare_inspection(self, *args, **kwargs):
return {}
# ## CNN:
# In[ ]:
class ConvNet(nn.Module):
def __init__(
self,
input_channels,
struct_param=None,
W_init_list=None,
b_init_list=None,
settings={},
return_indices=False,
is_cuda=False,
):
super(ConvNet, self).__init__()
self.input_channels = input_channels
if struct_param is not None:
self.struct_param = struct_param
self.W_init_list = W_init_list
self.b_init_list = b_init_list
self.settings = settings
self.num_layers = len(struct_param)
self.info_dict = {}
self.param_available = ["Conv2d", "ConvTranspose2d", "BatchNorm2d", "Simple_Layer"]
self.return_indices = return_indices
for i in range(len(self.struct_param)):
if i > 0:
k = 1
while self.struct_param[i - k][0] is None:
k += 1
num_channels_prev = self.struct_param[i - k][0]
else:
num_channels_prev = input_channels
k = 0
if self.struct_param[i - k][1] == "Simple_Layer" and isinstance(num_channels_prev, tuple) and len(num_channels_prev) == 3:
num_channels_prev = num_channels_prev[0]
num_channels = self.struct_param[i][0]
layer_type = self.struct_param[i][1]
layer_settings = self.struct_param[i][2]
if "layer_input_size" in layer_settings and isinstance(layer_settings["layer_input_size"], tuple):
num_channels_prev = layer_settings["layer_input_size"][0]
if layer_type == "Conv2d":
layer = nn.Conv2d(num_channels_prev,
num_channels,
kernel_size = layer_settings["kernel_size"],
stride = layer_settings["stride"] if "stride" in layer_settings else 1,
padding = layer_settings["padding"] if "padding" in layer_settings else 0,
dilation = layer_settings["dilation"] if "dilation" in layer_settings else 1,
)
elif layer_type == "ConvTranspose2d":
layer = nn.ConvTranspose2d(num_channels_prev,
num_channels,
kernel_size = layer_settings["kernel_size"],
stride = layer_settings["stride"] if "stride" in layer_settings else 1,
padding = layer_settings["padding"] if "padding" in layer_settings else 0,
output_padding = layer_settings["output_padding"] if "output_padding" in layer_settings else 0,
dilation = layer_settings["dilation"] if "dilation" in layer_settings else 1,
)
elif layer_type == "Simple_Layer":
layer = get_Layer(layer_type = layer_type,
input_size = layer_settings["layer_input_size"],
output_size = num_channels,
W_init = W_init_list[i] if self.W_init_list is not None and self.W_init_list[i] is not None else None,
b_init = b_init_list[i] if self.b_init_list is not None and self.b_init_list[i] is not None else None,
settings = layer_settings,
is_cuda = is_cuda,
)
elif layer_type == "MaxPool2d":
layer = nn.MaxPool2d(kernel_size = layer_settings["kernel_size"],
stride = layer_settings["stride"] if "stride" in layer_settings else None,
padding = layer_settings["padding"] if "padding" in layer_settings else 0,
return_indices = layer_settings["return_indices"] if "return_indices" in layer_settings else False,
)
elif layer_type == "MaxUnpool2d":
layer = nn.MaxUnpool2d(kernel_size = layer_settings["kernel_size"],
stride = layer_settings["stride"] if "stride" in layer_settings else None,
padding = layer_settings["padding"] if "padding" in layer_settings else 0,
)
elif layer_type == "Upsample":
layer = nn.Upsample(scale_factor = layer_settings["scale_factor"],
mode = layer_settings["mode"] if "mode" in layer_settings else "nearest",
)
elif layer_type == "BatchNorm2d":
layer = nn.BatchNorm2d(num_features = num_channels)
elif layer_type == "Dropout2d":
layer = nn.Dropout2d(p = 0.5)
elif layer_type == "Flatten":
layer = Flatten()
else:
raise Exception("layer_type {0} not recognized!".format(layer_type))
# Initialize using provided initial values:
if self.W_init_list is not None and self.W_init_list[i] is not None and layer_type not in ["Simple_Layer"]:
layer.weight.data = torch.FloatTensor(self.W_init_list[i])
layer.bias.data = torch.FloatTensor(self.b_init_list[i])
setattr(self, "layer_{0}".format(i), layer)
self.set_cuda(is_cuda)
def forward(self, input, indices_list = None, **kwargs):
return self.inspect_operation(input, operation_between = (0, self.num_layers), indices_list = indices_list)
def inspect_operation(self, input, operation_between, indices_list = None):
output = input
if indices_list is None:
indices_list = []
start_layer, end_layer = operation_between
if end_layer < 0:
end_layer += self.num_layers
for i in range(start_layer, end_layer):
if "layer_input_size" in self.struct_param[i][2]:
output_size_last = output.shape[0]
layer_input_size = self.struct_param[i][2]["layer_input_size"]
if not isinstance(layer_input_size, tuple):
layer_input_size = (layer_input_size,)
output = output.view(-1, *layer_input_size)
assert output.shape[0] == output_size_last, "output_size reshaped to different length. Check shape!"
if "Unpool" in self.struct_param[i][1]:
output_tentative = getattr(self, "layer_{0}".format(i))(output, indices_list.pop(-1))
else:
output_tentative = getattr(self, "layer_{0}".format(i))(output)
if isinstance(output_tentative, tuple):
output, indices = output_tentative
indices_list.append(indices)
else:
output = output_tentative
if "activation" in self.struct_param[i][2]:
activation = self.struct_param[i][2]["activation"]
else:
if "activation" in self.settings:
activation = self.settings["activation"]
else:
activation = "linear"
if "Pool" in self.struct_param[i][1] or "Unpool" in self.struct_param[i][1] or "Upsample" in self.struct_param[i][1]:
activation = "linear"
output = get_activation(activation)(output)
if self.return_indices:
return output, indices_list
else:
return output
def get_loss(self, input, target, criterion, **kwargs):
y_pred = self(input, **kwargs)
if self.return_indices:
y_pred = y_pred[0]
return criterion(y_pred, target)
def get_regularization(self, source = ["weight", "bias"], mode = "L1", **kwargs):
if not isinstance(source, list):
source = [source]
reg = Variable(torch.FloatTensor([0]), requires_grad = False)
if self.is_cuda:
reg = reg.cuda()
for k in range(self.num_layers):
if self.struct_param[k][1] not in self.param_available:
continue
layer = getattr(self, "layer_{0}".format(k))
for source_ele in source:
if source_ele == "weight":
if self.struct_param[k][1] not in ["Simple_Layer"]:
item = layer.weight
else:
item = layer.W_core
elif source_ele == "bias":
if self.struct_param[k][1] not in ["Simple_Layer"]:
item = layer.bias
else:
item = layer.b_core
if mode == "L1":
reg = reg + item.abs().sum()
elif mode == "L2":
reg = reg + (item ** 2).sum()
else:
raise Exception("mode {0} not recognized!".format(mode))
return reg
def get_weights_bias(self, W_source = "core", b_source = "core"):
W_list = []
b_list = []
for k in range(self.num_layers):
if self.struct_param[k][1] == "Simple_Layer":
layer = getattr(self, "layer_{0}".format(k))
if W_source == "core":
W_list.append(to_np_array(layer.W_core))
if b_source == "core":
b_list.append(to_np_array(layer.b_core))
elif self.struct_param[k][1] in self.param_available:
layer = getattr(self, "layer_{0}".format(k))
if W_source == "core":
W_list.append(to_np_array(layer.weight))
if b_source == "core":
b_list.append(to_np_array(layer.bias, full_reduce = False))
else:
if W_source == "core":
W_list.append(None)
if b_source == "core":
b_list.append(None)
return W_list, b_list
@property
def model_dict(self):
model_dict = {"type": self.__class__.__name__}
model_dict["net_type"] = self.__class__.__name__
model_dict["input_channels"] = self.input_channels
model_dict["struct_param"] = self.struct_param
model_dict["settings"] = self.settings
model_dict["weights"], model_dict["bias"] = self.get_weights_bias(W_source = "core", b_source = "core")
model_dict["return_indices"] = self.return_indices
return model_dict
@property
def output_size(self):
return self.struct_param[-1][0]
@property
def structure(self):
structure = OrderedDict()
structure["input_channels"] = self.input_channels
structure["output_size"] = self.output_size
structure["struct_param"] = self.struct_param if hasattr(self, "struct_param") else None
return structure
def get_sympy_expression(self, verbose=True):
expressions = {i: None for i in range(self.num_layers)}
return expressions
def load(self, filename):
mode = "json" if filename.endswith(".json") else "pickle"
model_dict = load_model(filename, mode=mode)
self.load_model_dict(model_dict)
def save(self, filename):
mode = "json" if filename.endswith(".json") else "pickle"
save_model(self.model_dict, filename, mode=mode)
def DL(self):
DL = 0
for k in range(self.num_layers):
layer_type = self.struct_param[k][1]
if layer_type in self.param_available:
layer = getattr(self, "layer_{0}".format(k))
if layer_type == "Simple_Layer":
DL += layer.DL
else:
DL += get_list_DL(to_np_array(layer.weight), "non-snapped")
DL += get_list_DL(to_np_array(layer.bias), "non-snapped")
return DL
def load_model_dict(self, model_dict):
new_net = load_model_dict_net(model_dict, is_cuda = self.is_cuda)
self.__dict__.update(new_net.__dict__)
def prepare_inspection(self, X, y, **kwargs):
pred_prob = self(X)
if self.return_indices:
pred_prob = pred_prob[0]
pred = pred_prob.max(1)[1]
# self.info_dict["accuracy"] = get_accuracy(pred, y)
return deepcopy(self.info_dict)
def set_cuda(self, is_cuda):
if isinstance(is_cuda, str):
self.cuda(is_cuda)
else:
if is_cuda:
self.cuda()
else:
self.cpu()
self.is_cuda = is_cuda
def set_trainable(self, is_trainable):
for k in range(self.num_layers):
layer = getattr(self, "layer_{0}".format(k))
if self.struct_param[k][1] == "Simple_Layer":
layer.set_trainable(is_trainable)
elif self.struct_param[k][1] in self.param_available:
for param in layer.parameters():
param.requires_grad = is_trainable
class Conv_Model(nn.Module):
def __init__(
self,
encoder_model_dict,
core_model_dict,
decoder_model_dict,
latent_size = 2,
is_generative = True,
is_res_block = True,
is_cuda = False,
):
"""Conv_Model consists of an encoder, a core and a decoder"""
super(Conv_Model, self).__init__()
self.latent_size = latent_size
self.is_generative = is_generative
if not is_generative:
self.encoder = load_model_dict(encoder_model_dict, is_cuda = is_cuda)
self.core = load_model_dict(core_model_dict, is_cuda = is_cuda)
self.decoder = load_model_dict(decoder_model_dict, is_cuda = is_cuda)
self.is_res_block = is_res_block
self.is_cuda = is_cuda
self.info_dict = {}
@property
def num_layers(self):
if self.is_generative:
return 1
else:
return len(self.core.model_dict["struct_param"])
def forward(
self,
X,
latent = None,
**kwargs
):
if self.is_generative:
if len(latent.shape) == 1:
latent = latent.repeat(len(X), 1)
latent = self.core(latent)
else:
p_dict = {k: latent if k == 0 else None for k in range(self.num_layers)}
latent = self.encoder(X)
latent = self.core(latent, p_dict = p_dict)
output = self.decoder(latent)
if self.is_res_block:
output = (X + nn.Sigmoid()(output)).clamp(0, 1)
return output
def forward_multistep(self, X, latents, isplot = False, num_images = 1):
assert len(latents.shape) == 1
length = int(len(latents) / 2)
output = X
for i in range(length - 1):
latent = latents[i * self.latent_size: (i + 2) * self.latent_size]
output = self(output, latent = latent)
if isplot:
plot_matrices(output[:num_images,0])
return output
def get_loss(self, X, y, criterion, **kwargs):
return criterion(self(X = X[0], latent = X[1]), y)
def plot(self, X, y, num_images = 1):
y_pred = self(X[0], latent = X[1])
idx_list = np.random.choice(len(X[0]), num_images)
for idx in idx_list:
matrix = torch.cat([X[0][idx], y[idx], y_pred[idx]])
plot_matrices(matrix, images_per_row = 8)
def get_regularization(self, source = ["weights", "bias"], mode = "L1"):
if self.is_generative:
return self.core.get_regularization(source = source, mode = mode) + self.decoder.get_regularization(source = source, mode = mode)
else:
return self.encoder.get_regularization(source = source, mode = mode) + self.core.get_regularization(source = source, mode = mode) + self.decoder.get_regularization(source = source, mode = mode)
def prepare_inspection(self, X, y, **kwargs):
return deepcopy(self.info_dict)
def set_trainable(self, is_trainable):
if not self.is_generative:
self.encoder.set_trainable(is_trainable)
self.core.set_trainable(is_trainable)
self.decoder.set_trainable(is_trainable)
@property
def model_dict(self):
model_dict = {"type": "Conv_Model"}
if not self.is_generative:
model_dict["encoder_model_dict"] = self.encoder.model_dict
model_dict["latent_size"] = self.latent_size
model_dict["core_model_dict"] = self.core.model_dict
model_dict["decoder_model_dict"] = self.decoder.model_dict
model_dict["is_generative"] = self.is_generative
model_dict["is_res_block"] = self.is_res_block
return model_dict
def load(self, filename):
mode = "json" if filename.endswith(".json") else "pickle"
model_dict = load_model(filename, mode=mode)
self.load_model_dict(model_dict)
def save(self, filename):
mode = "json" if filename.endswith(".json") else "pickle"
save_model(self.model_dict, filename, mode=mode)
class Conv_Autoencoder(nn.Module):
def __init__(
self,
input_channels_encoder,
input_channels_decoder,
struct_param_encoder,
struct_param_decoder,
latent_size = (1,2),
share_model_among_steps = False,
settings = {},
is_cuda = False,
):
"""Conv_Autoencoder consists of an encoder and a decoder"""
super(Conv_Autoencoder, self).__init__()
self.input_channels_encoder = input_channels_encoder
self.input_channels_decoder = input_channels_decoder
self.struct_param_encoder = struct_param_encoder
self.struct_param_decoder = struct_param_decoder
self.share_model_among_steps = share_model_among_steps
self.settings = settings
self.encoder = ConvNet(input_channels = input_channels_encoder, struct_param = struct_param_encoder, settings = settings, is_cuda = is_cuda)
self.decoder = ConvNet(input_channels = input_channels_decoder, struct_param = struct_param_decoder, settings = settings, is_cuda = is_cuda)
self.is_cuda = is_cuda
def encode(self, input):
if self.share_model_among_steps:
latent = []
for i in range(input.shape[1]):
latent_step = self.encoder(input[:, i:i+1])
latent.append(latent_step)
return torch.cat(latent, 1)
else:
return self.encoder(input)
def decode(self, latent):
if self.share_model_among_steps:
latent_size = self.struct_param_encoder[-1][0]
latent = latent.view(latent.size(0), -1, latent_size)
output = []
for i in range(latent.shape[1]):
output_step = self.decoder(latent[:, i].contiguous())
output.append(output_step)
return torch.cat(output, 1)
else:
return self.decoder(latent)
def set_trainable(self, is_trainable):
self.encoder.set_trainable(is_trainable)
self.decoder.set_trainable(is_trainable)
def forward(self, input):
return self.decode(self.encode(input))
def get_loss(self, input, target, criterion, **kwargs):
return criterion(self(input), target)
def get_regularization(self, source = ["weight", "bias"], mode = "L1"):
return self.encoder.get_regularization(source = source, mode = mode) + self.decoder.get_regularization(source = source, mode = mode)
@property
def model_dict(self):
model_dict = {"type": "Conv_Autoencoder"}
model_dict["net_type"] = "Conv_Autoencoder"
model_dict["input_channels_encoder"] = self.input_channels_encoder
model_dict["input_channels_decoder"] = self.input_channels_decoder
model_dict["struct_param_encoder"] = self.struct_param_encoder
model_dict["struct_param_decoder"] = self.struct_param_decoder
model_dict["share_model_among_steps"] = self.share_model_among_steps
model_dict["settings"] = self.settings
model_dict["encoder"] = self.encoder.model_dict
model_dict["decoder"] = self.decoder.model_dict
return model_dict
def load_model_dict(self, model_dict):
model = load_model_dict(model_dict, is_cuda = self.is_cuda)
self.__dict__.update(model.__dict__)
def load(self, filename):
mode = "json" if filename.endswith(".json") else "pickle"
model_dict = load_model(filename, mode=mode)
self.load_model_dict(model_dict)
def save(self, filename):
mode = "json" if filename.endswith(".json") else "pickle"
save_model(self.model_dict, filename, mode=mode)
def DL(self):
return self.encoder.DL + self.decoder.DL
class Flatten(nn.Module):
def __init__(self):
super(Flatten, self).__init__()
def forward(self, x):
return x.view(x.size(0), -1)
# ## VAE:
# In[ ]:
class VAE(nn.Module):
def __init__(
self,
encoder_model_dict,
decoder_model_dict,
is_cuda = False,
):
super(VAE, self).__init__()
self.encoder = load_model_dict(encoder_model_dict, is_cuda = is_cuda)
self.decoder = load_model_dict(decoder_model_dict, is_cuda = is_cuda)
self.is_cuda = is_cuda
self.info_dict = {}
def encode(self, X):
Z = self.encoder(X)
latent_size = int(Z.shape[-1] / 2)
mu = Z[..., :latent_size]
logvar = Z[..., latent_size:]
return mu, logvar
def reparameterize(self, mu, logvar):
std = torch.exp(0.5*logvar)
eps = torch.randn_like(std)
return eps.mul(std).add_(mu)
def decode(self, Z):
return self.decoder(Z)
def forward(self, X):
mu, logvar = self.encode(X)
Z = self.reparameterize(mu, logvar)
return self.decode(Z), mu, logvar
def get_loss(self, X, y = None, **kwargs):
recon_X, mu, logvar = self(X)
BCE = F.binary_cross_entropy(recon_X.view(recon_X.shape[0], -1), X.view(X.shape[0], -1), reduction='sum')
# see Appendix B from VAE paper:
# Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014
# https://arxiv.org/abs/1312.6114
# 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
loss = (BCE + KLD) / len(X)
self.info_dict["KLD"] = KLD.item() / len(X)
self.info_dict["BCE"] = BCE.item() / len(X)
return loss
def model_dict(self):
model_dict = {"type": "VAE"}
model_dict["encoder_model_dict"] = self.encoder.model_dict
model_dict["decoder_model_dict"] = self.decoder.model_dict
return model_dict
def load(self, filename):
mode = "json" if filename.endswith(".json") else "pickle"
model_dict = load_model(filename, mode=mode)
self.load_model_dict(model_dict)
def save(self, filename):
mode = "json" if filename.endswith(".json") else "pickle"
save_model(self.model_dict, filename, mode=mode)
def get_regularization(self, source = ["weight", "bias"], mode = "L1"):
return self.encoder.get_regularization(source = source, mode = mode) + self.decoder.get_regularization(source = source, mode = mode)
def prepare_inspection(self, X, y, **kwargs):
return deepcopy(self.info_dict)
# ## Reparameterization toolkit:
# In[ ]:
class Net_reparam(nn.Module):
"""Module that uses reparameterization to take into two inputs and gets a scaler"""
def __init__(
self,
model_dict,
reparam_mode,
is_cuda=False,
):
super(Net_reparam, self).__init__()
self.model = load_model_dict(model_dict, is_cuda=is_cuda)
self.reparam_mode = reparam_mode
def forward(self, X, Z, is_outer=False):
"""
Obtaining single value using reparameterization.
Args:
X shape: [Bx, ...]
Z shape: [S, Bz, Z]
is_outer: whether to use outer product to get a tensor with shape [S, Bz, Bx].
Returns:
If is_outer==True, return log_prob of shape [S, Bz, Bx]
If is_outer==False, return log_prob of shape [S, Bz] (where Bz=Bx)
"""
dist, _ = reparameterize(self.model, X, mode=self.reparam_mode)
if is_outer:
log_prob = dist.log_prob(Z[...,None,:])
else:
log_prob = dist.log_prob(Z)
if self.reparam_mode == 'diag':
log_prob = log_prob.sum(-1)
return log_prob
def get_regularization(self, source = ["weight", "bias"], mode = "L1", **kwargs):
return self.model.get_regularization(source=source, model=mode, **kwargs)
def prepare_inspection(self, X, y, **kwargs):
return {}
@property
def model_dict(self):
model_dict = {"type": "Net_reparam"}
model_dict["model"] = self.model.model_dict
model_dict["reparam_mode"] = self.reparam_mode
return model_dict
def load(self, filename):
mode = "json" if filename.endswith(".json") else "pickle"
model_dict = load_model(filename, mode=mode)
self.load_model_dict(model_dict)
def save(self, filename):
mode = "json" if filename.endswith(".json") else "pickle"
save_model(self.model_dict, filename, mode=mode)
def reparameterize(model, input, mode="full", size=None):
if mode.startswith("diag"):
if model is not None and model.__class__.__name__ == "Mixture_Model":
return reparameterize_mixture_diagonal(model, input, mode=mode)
else:
return reparameterize_diagonal(model, input, mode=mode)
elif mode == "full":
return reparameterize_full(model, input, size=size)
else:
raise Exception("Mode {} is not valid!".format(mode))
def reparameterize_diagonal(model, input, mode):
if model is not None:
mean_logit = model(input)
else:
mean_logit = input
if mode.startswith("diagg"):
if isinstance(mean_logit, tuple):
mean = mean_logit[0]
else:
mean = mean_logit
std = torch.ones(mean.shape).to(mean.device)
dist = Normal(mean, std)
return dist, (mean, std)
elif mode.startswith("diag"):
if isinstance(mean_logit, tuple):
mean_logit = mean_logit[0]
size = int(mean_logit.size(-1) / 2)
mean = mean_logit[:, :size]
std = F.softplus(mean_logit[:, size:], beta=1) + 1e-10
dist = Normal(mean, std)
return dist, (mean, std)
else:
raise Exception("mode {} is not valid!".format(mode))
def reparameterize_mixture_diagonal(model, input, mode):
mean_logit, weight_logits = model(input)
if mode.startswith("diagg"):
mean_list = mean_logit
scale_list = torch.ones(mean_list.shape).to(mean_list.device)
else:
size = int(mean_logit.size(-2) / 2)
mean_list = mean_logit[:, :size]
scale_list = F.softplus(mean_logit[:, size:], beta=1) + 0.01 # Avoid the std to go to 0
dist = Mixture_Gaussian_reparam(mean_list=mean_list,
scale_list=scale_list,
weight_logits=weight_logits,
)
return dist, (mean_list, scale_list)
def reparameterize_full(model, input, size=None):
if model is not None:
mean_logit = model(input)
else:
mean_logit = input
if isinstance(mean_logit, tuple):
mean_logit = mean_logit[0]
if size is None:
dim = mean_logit.size(-1)
size = int((np.sqrt(9 + 8 * dim) - 3) / 2)
mean = mean_logit[:, :size]
scale_tril = fill_triangular(mean_logit[:, size:], size)
scale_tril = matrix_diag_transform(scale_tril, F.softplus)
dist = MultivariateNormal(mean, scale_tril = scale_tril)
return dist, (mean, scale_tril)
def sample(dist, n=None):
"""Sample n instances from distribution dist"""
if n is None:
return dist.rsample()
else:
return dist.rsample((n,))
# ## Probability models:
# ### Mixture of Gaussian:
# In[ ]:
class Mixture_Gaussian(nn.Module):
def __init__(
self,
num_components,
dim,
param_mode = "full",
is_cuda = False,
):
super(Mixture_Gaussian, self).__init__()
self.num_components = num_components
self.dim = dim
self.param_mode = param_mode
self.is_cuda = is_cuda
self.device = torch.device(self.is_cuda if isinstance(self.is_cuda, str) else "cuda" if self.is_cuda else "cpu")
self.info_dict = {}
def initialize(self, model_dict = None, input = None, num_samples = 100, verbose = False):
if input is not None:
neg_log_prob_min = np.inf
loc_init_min = None
scale_init_min = None
for i in range(num_samples):
neg_log_prob, loc_init_list, scale_init_list = self.initialize_ele(input)
if verbose:
print("{0}: neg_log_prob: {1:.4f}".format(i, neg_log_prob))
if neg_log_prob < neg_log_prob_min:
neg_log_prob_min = neg_log_prob
loc_init_min = self.loc_list.detach()
scale_init_min = self.scale_list.detach()
self.loc_list = nn.Parameter(loc_init_min.to(self.device))
self.scale_list = nn.Parameter(scale_init_min.to(self.device))
print("min neg_log_prob: {0:.6f}".format(to_np_array(neg_log_prob_min)))
else:
if model_dict is None:
self.weight_logits = nn.Parameter((torch.randn(self.num_components) * np.sqrt(2 / (1 + self.dim))).to(self.device))
else:
self.weight_logits = nn.Parameter((torch.FloatTensor(model_dict["weight_logits"])).to(self.device))
if self.param_mode == "full":
size = self.dim * (self.dim + 1) // 2
elif self.param_mode == "diag":
size = self.dim
else:
raise
if model_dict is None:
self.loc_list = nn.Parameter(torch.randn(self.num_components, self.dim).to(self.device))
self.scale_list = nn.Parameter((torch.randn(self.num_components, size) / self.dim).to(self.device))
else:
self.loc_list = nn.Parameter(torch.FloatTensor(model_dict["loc_list"]).to(self.device))
self.scale_list = nn.Parameter(torch.FloatTensor(model_dict["scale_list"]).to(self.device))
def initialize_ele(self, input):
if self.param_mode == "full":
size = self.dim * (self.dim + 1) // 2
elif self.param_mode == "diag":
size = self.dim
else:
raise
length = len(input)
self.weight_logits = nn.Parameter(torch.zeros(self.num_components).to(self.device))
self.loc_list = nn.Parameter(input[torch.multinomial(torch.ones(length) / length, self.num_components)].detach())
self.scale_list = nn.Parameter((torch.randn(self.num_components, size).to(self.device) * input.std() / 5).to(self.device))
neg_log_prob = self.get_loss(input)
return neg_log_prob
def prob(self, input):
if len(input.shape) == 1:
input = input.unsqueeze(1)
assert len(input.shape) in [0, 2, 3]
input = input.unsqueeze(-2)
if self.param_mode == "diag":
scale_list = F.softplus(self.scale_list)
logits = (- (input - self.loc_list) ** 2 / 2 / scale_list ** 2 - torch.log(scale_list * np.sqrt(2 * np.pi))).sum(-1)
else:
raise
prob = torch.matmul(torch.exp(logits), nn.Softmax(dim = 0)(self.weight_logits))
# prob_list = []
# for i in range(self.num_components):
# if self.param_mode == "full":
# scale_tril = fill_triangular(getattr(self, "scale_{0}".format(i)), self.dim)
# scale_tril = matrix_diag_transform(scale_tril, F.softplus)
# dist = MultivariateNormal(getattr(self, "loc_{0}".format(i)), scale_tril = scale_tril)
# log_prob = dist.log_prob(input)
# elif self.param_mode == "diag":
# dist = Normal(getattr(self, "loc_{0}".format(i)).unsqueeze(0), F.softplus(getattr(self, "scale_{0}".format(i))))
# mu = getattr(self, "loc_{0}".format(i)).unsqueeze(0)
# sigma = F.softplus(getattr(self, "scale_{0}".format(i)))
# log_prob = (- (input - mu) ** 2 / 2 / sigma ** 2 - torch.log(sigma * np.sqrt(2 * np.pi))).sum(-1)
# else:
# raise
# setattr(self, "component_{0}".format(i), dist)
# prob = torch.exp(log_prob)
# prob_list.append(prob)
# prob_list = torch.stack(prob_list, -1)
# prob = torch.matmul(prob_list, nn.Softmax(dim = 0)(self.weight_logits))
return prob
def log_prob(self, input):
return torch.log(self.prob(input) + 1e-45)
def get_loss(self, X, y = None, **kwargs):
"""Optimize negative log-likelihood"""
neg_log_prob = - self.log_prob(X).mean() / np.log(2)
self.info_dict["loss"] = to_np_array(neg_log_prob)
return neg_log_prob
def prepare_inspection(X, y, criterion, **kwargs):
return deepcopy(self.info_dict)
@property
def model_dict(self):
model_dict = {"type": "Mixture_Gaussian"}
model_dict["num_components"] = self.num_components
model_dict["dim"] = self.dim
model_dict["param_mode"] = self.param_mode
model_dict["weight_logits"] = to_np_array(self.weight_logits)
model_dict["loc_list"] = to_np_array(self.loc_list)
model_dict["scale_list"] = to_np_array(self.scale_list)
return model_dict
def load(self, filename):
mode = "json" if filename.endswith(".json") else "pickle"
model_dict = load_model(filename, mode=mode)
self.load_model_dict(model_dict)
def save(self, filename):
mode = "json" if filename.endswith(".json") else "pickle"
save_model(self.model_dict, filename, mode=mode)
def get_param(self):
weights = to_np_array(nn.Softmax(dim = 0)(self.weight_logits))
loc_list = to_np_array(self.loc_list)
scale_list = to_np_array(self.scale_list)
print("weights: {0}".format(weights))
print("loc:")
pp.pprint(loc_list)
print("scale:")
pp.pprint(scale_list)
return weights, loc_list, scale_list
def visualize(self, input):
import scipy
import matplotlib.pylab as plt
std = to_np_array(input.std())
X = np.arange(to_np_array(input.min()) - 0.2 * std, to_np_array(input.max()) + 0.2 * std, 0.1)
Y_dict = {}
weights = nn.Softmax(dim = 0)(self.weight_logits)
plt.figure(figsize=(10, 4), dpi=100).set_facecolor('white')
for i in range(self.num_components):
Y_dict[i] = weights[0].item() * scipy.stats.norm.pdf((X - self.loc_list[i].item()) / self.scale_list[i].item())
plt.plot(X, Y_dict[i])
Y = np.sum([item for item in Y_dict.values()], 0)
plt.plot(X, Y, 'k--')
plt.plot(input.data.numpy(), np.zeros(len(input)), 'k*')
plt.title('Density of {0}-component mixture model'.format(self.num_components))
plt.ylabel('probability density');
def get_regularization(self, source = ["weights", "bias"], mode = "L1", **kwargs):
reg = to_Variable([0], requires_grad = False).to(self.device)
return reg
# ### Mixture_Gaussian for reparameterization:
# In[ ]:
class Mixture_Gaussian_reparam(nn.Module):
def __init__(
self,
# Use as reparamerization:
mean_list=None,
scale_list=None,
weight_logits=None,
# Use as prior:
Z_size=None,
n_components=None,
mean_scale=0.1,
scale_scale=0.1,
# Mode:
is_reparam=True,
reparam_mode="diag",
device= torch.device("cpu"),
):
super(Mixture_Gaussian_reparam, self).__init__()
self.is_reparam = is_reparam
self.reparam_mode = reparam_mode
# self.is_cuda = is_cuda
self.device = device
if self.is_reparam:
self.mean_list = mean_list # size: [B, Z, k]
self.scale_list = scale_list # size: [B, Z, k]
self.weight_logits = weight_logits # size: [B, k]
self.n_components = self.weight_logits.shape[-1]
self.Z_size = self.mean_list.shape[-2]
else:
self.n_components = n_components
self.Z_size = Z_size
self.mean_list = nn.Parameter((torch.rand(1, Z_size, n_components) - 0.5) * mean_scale)
self.scale_list = nn.Parameter(torch.log(torch.exp((torch.rand(1, Z_size, n_components) * 0.2 + 0.9) * scale_scale) - 1))
self.weight_logits = nn.Parameter(torch.zeros(1, n_components))
if mean_list is not None:
self.mean_list.data = to_Variable(mean_list)
self.scale_list.data = to_Variable(scale_list)
self.weight_logits.data = to_Variable(weight_logits)
self.to(self.device)
def log_prob(self, input):
"""Obtain the log_prob of the input."""
input = input.unsqueeze(-1) # [S, B, Z, 1]
if self.reparam_mode == "diag":
if self.is_reparam:
# logits: [S, B, Z, k]
logits = - (input - self.mean_list) ** 2 / 2 / self.scale_list ** 2 - torch.log(self.scale_list * np.sqrt(2 * np.pi))
else:
scale_list = F.softplus(self.scale_list, beta=1)
logits = - (input - self.mean_list) ** 2 / 2 / scale_list ** 2 - torch.log(scale_list * np.sqrt(2 * np.pi))
else:
raise
# log_softmax(weight_logits): [B, k]
# logits: [S, B, Z, k]
# log_prob: [S, B, Z]
log_prob = torch.logsumexp(logits + F.log_softmax(self.weight_logits, -1).unsqueeze(-2), axis=-1) # F(...).unsqueeze(-2): [B, 1, k]
return log_prob
def prob(self, Z):
return torch.exp(self.log_prob(Z))
def sample(self, n=None):
if n is None:
n_core = 1
else:
assert isinstance(n, tuple)
n_core = n[0]
weight_probs = F.softmax(self.weight_logits, -1) # size: [B, m]
idx = torch.multinomial(weight_probs, n_core, replacement=True).unsqueeze(-2).expand(-1, self.mean_list.shape[-2], -1) # multinomial result: [B, S]; result: [B, Z, S]
mean_list = torch.gather(self.mean_list, dim=-1, index=idx) # [B, Z, S]
if self.is_reparam:
scale_list = torch.gather(self.scale_list, dim=-1, index=idx) # [B, Z, S]
else:
scale_list = F.softplus(torch.gather(self.scale_list, dim=-1, index=idx), beta=1) # [B, Z, S]
Z = torch.normal(mean_list, scale_list).permute(2, 0, 1)
if n is None:
Z = Z.squeeze(0)
return Z
def rsample(self, n=None):
return self.sample(n=n)
def __repr__(self):
return "Mixture_Gaussian_reparam({}, Z_size={})".format(self.n_components, self.Z_size)
@property
def model_dict(self):
model_dict = {"type": "Mixture_Gaussian_reparam"}
model_dict["is_reparam"] = self.is_reparam
model_dict["reparam_mode"] = self.reparam_mode
model_dict["Z_size"] = self.Z_size
model_dict["n_components"] = self.n_components
model_dict["mean_list"] = to_np_array(self.mean_list)
model_dict["scale_list"] = to_np_array(self.scale_list)
model_dict["weight_logits"] = to_np_array(self.weight_logits)
return model_dict
# ### Triangular distribution:
# In[ ]:
class Triangular_dist(Distribution):
"""Probability distribution with a Triangular shape."""
def __init__(self, loc, a, b, validate_args=None):
self.loc, self.a, self.b = broadcast_all(loc, a, b)
batch_shape = torch.Size() if isinstance(loc, Number) else self.loc.size()
super(Triangular_dist, self).__init__(batch_shape, validate_args=validate_args)
@property
def mean(self):
return self.loc + (self.b - self.a) / 3
@property
def variance(self):
return (self.a ** 2 + self.b ** 2 + self.a * self.b) / 18
@property
def stddev(self):
return torch.sqrt(self.variance)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(PieceWise, _instance)
batch_shape = torch.Size(batch_shape)
new.loc = self.loc.expand(batch_shape)
new.a = self.a.expand(batch_shape)
new.b = self.b.expand(batch_shape)
super(Triangular_dist, new).__init__(batch_shape, validate_args=False)
new._validate_args = self._validate_args
return new
@constraints.dependent_property
def support(self):
return constraints.interval(self.loc - self.a, self.loc + self.b)
def sample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
rand = torch.rand(shape, dtype=self.loc.dtype, device=self.loc.device)
with torch.no_grad():
return self.icdf(rand)
def rsample(self, sample_shape=torch.Size()):
"""Sample with reparameterization."""
shape = self._extended_shape(sample_shape)
rand = torch.rand(shape, dtype=self.loc.dtype, device=self.loc.device)
return self.icdf(rand)
def icdf(self, value):
"""Inverse cdf."""
if self._validate_args:
self._validate_sample(value)
assert value.min() >= 0 and value.max() <= 1
value, loc, a, b = broadcast_all(value, self.loc, self.a, self.b)
a_plus_b = a + b
idx = value < a / a_plus_b
iidx = ~idx
out = torch.ones_like(value)
out[idx] = loc[idx] - a[idx] + torch.sqrt(a[idx] * a_plus_b[idx] * value[idx])
out[iidx] = loc[iidx] + b[iidx] - torch.sqrt(b[iidx] * a_plus_b[iidx] * (1 - value[iidx]) )
return out
def prob(self, value):
"""Get probability."""
if self._validate_args:
self._validate_sample(value)
# compute the variance
value, loc, a, b = broadcast_all(value, self.loc, self.a, self.b)
idx1 = (loc - a <= value) & (value <= loc)
idx2 = (loc < value) & (value <= loc + b)
a_plus_b = a + b
out = torch.zeros_like(value)
out[idx1] = 2 * (value[idx1] - loc[idx1] + a[idx1]) / a[idx1] / a_plus_b[idx1]
out[idx2] = -2 * (value[idx2] - loc[idx2] - b[idx2]) / b[idx2] / a_plus_b[idx2]
return out
def log_prob(self, value):
"""Get log probability."""
return torch.log(self.prob(value))
@property
def model_dict(self):
model_dict = {"type": "Triangular_dist"}
model_dict["loc"] = to_np_array(self.loc)
model_dict["a"] = to_np_array(self.a)
model_dict["b"] = to_np_array(self.b)
return model_dict
def load(self, filename):
mode = "json" if filename.endswith(".json") else "pickle"
model_dict = load_model(filename, mode=mode)
self.load_model_dict(model_dict)
def save(self, filename):
mode = "json" if filename.endswith(".json") else "pickle"
save_model(self.model_dict, filename, mode=mode)
# In[ ]:
def load_model_dict_distribution(model_dict, is_cuda = False):
if model_dict["type"] == "Mixture_Gaussian":
model = Mixture_Gaussian(
num_components=model_dict["num_components"],
dim=model_dict["dim"],
param_mode=model_dict["param_mode"],
is_cuda=is_cuda,
)
model.initialize(model_dict = model_dict)
elif model_dict["type"] == "Mixture_Gaussian_reparam":
model = Mixture_Gaussian_reparam(
is_reparam=model_dict["is_reparam"],
reparam_mode=model_dict["reparam_mode"],
mean_list=model_dict["mean_list"],
scale_list=model_dict["scale_list"],
weight_logits=model_dict["weight_logits"],
Z_size=model_dict["Z_size"],
n_components=model_dict["n_components"],
is_cuda=is_cuda,
)
elif model_dict["type"] == "Triangular_dist":
model = Triangular_dist(
loc=model_dict["loc"],
a=model_dict["a"],
b=model_dict["b"],
)
else:
raise Exception("Type {} is not valid!".format(model_dict["type"]))
return model
| 208,307 | 46.428962 | 300 | py |
theedhum-nandrum | theedhum-nandrum-master/src/tn/multiclassrnnclassifier.py | """
@author sanjeethr, oligoglot
Thanks to Susan Li for this step by step guide: https://towardsdatascience.com/multi-class-text-classification-with-lstm-1590bee1bd17
"""
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import sys, os
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras import Sequential
from keras.layers import Embedding, SpatialDropout1D, LSTM, Dense
from keras.callbacks import EarlyStopping
from keras.optimizers import Adam
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
from sklearn.metrics import classification_report
from libindic.soundex import Soundex
from lib.feature_utils import load_docs, get_emojis_from_text, get_doc_len_range
sys.path.append(os.path.join(os.path.dirname(sys.path[0]),'extern', 'indic_nlp_library'))
from indicnlp.normalize.indic_normalize import BaseNormalizer
try:
from indictrans import Transliterator
except ImportError:
print('Please install indic-trans from git: https://github.com/libindic/indic-trans')
ta_trans = Transliterator(source='eng', target='tam', build_lookup=True)
ml_trans = Transliterator(source='eng', target='mal', build_lookup=True)
# The maximum number of words to be used. (most frequent)
MAX_NB_WORDS = 50000
# Max number of words in each review.
MAX_SEQUENCE_LENGTH = 150
# This is fixed.
EMBEDDING_DIM = 100
tokenizer = Tokenizer(num_words=MAX_NB_WORDS, filters='!"#$%&()*+,-./:;<=>?@[\]^_`{|}~', lower=True)
soundexer = Soundex()
def load_language_maps(mapfile):
lmap = {}
with open(mapfile, 'r') as mapf:
for line in mapf:
text, lang, conf = line.rstrip().split('\t')
lmap[text] = (lang, float(conf))
return lmap
def get_language_tag(text):
return lmap.get(text, ('unknown', 0.0))
def append_language_tag(text):
p_lang, conf = get_language_tag(text)
if p_lang == lang or p_lang == (lang + 'en'):
# google agrees with some confidence
agreement = 1
elif conf < 0.5:
# google says not-tamil, but weakly
agreement = 0.5
else:
# google clearly says not-tamil
agreement = 0
return ' '.join((' ', text, p_lang, lang, str(agreement), ' '))
def append_emoji_sentiment(text):
emojis, sentiment = get_emojis_from_text(text)
return ' '.join((' ', text, str(emojis), sentiment, ' '))
def append_soundex(text):
if lang == 'ta':
text = ta_trans.transform(text)
if lang == 'ml':
text = ml_trans.transform(text)
soundexes = [soundexer.soundex(word) for word in text.split()]
return ' ' + text + ' ' + ' '.join(soundexes) + ' '
def append_doc_len_range(text):
return ' ' + get_doc_len_range(text) + ' '
def load_data(df, mode, lb = None):
df.info()
df = df.reset_index(drop=True)
df['text'] = df['text'].apply(append_emoji_sentiment)
df['text'] = df['text'].apply(append_language_tag)
df['text'] = df['text'].apply(append_soundex)
df['text'] = df['text'].apply(append_doc_len_range)
tokenizer.fit_on_texts([normalizer.normalize (text) for text in df.text.values])
word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index))
X = tokenizer.texts_to_sequences(df.text.values)
X = pad_sequences(X, maxlen=MAX_SEQUENCE_LENGTH)
print('Shape of data tensor:', X.shape)
if mode == 'pred':
Y = df.id.values
else:
print(df.category.value_counts())
if lb is None:
lb = LabelBinarizer()
Y = lb.fit_transform(df.category.values.reshape(-1, 1))
else:
Y = lb.transform(df.category.values.reshape(-1, 1))
print('Shape of label tensor:', Y.shape)
return (X, Y, lb)
lang, train_file, test_file, predict_file, outfile = sys.argv[1:6]
normalizer = BaseNormalizer(lang)
lmap = load_language_maps('../../resources/data/alltextslang.txt')
#train_file = '../../resources/data/tamil_train.tsv'
train_df = pd.read_csv(train_file, sep='\t')
X_train, Y_train, lb = load_data(train_df, 'train')
#test_file = '../../resources/data/tamil_dev.tsv'
test_df = pd.read_csv(test_file, sep='\t')
X_test, Y_test, lb = load_data(test_df, 'test', lb)
# X_train, X_test, Y_train, Y_test = train_test_split(X,Y, test_size = 0.10, random_state = 42)
print(X_train.shape,Y_train.shape)
print(X_test.shape,Y_test.shape)
if lang == 'ta':
model = Sequential()
model.add(Embedding(MAX_NB_WORDS, EMBEDDING_DIM, input_length=X_train.shape[1]))
model.add(SpatialDropout1D(0.8))
model.add(LSTM(100, dropout=0.7, recurrent_dropout=0.5))
model.add(Dense(5, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer=Adam(learning_rate=0.0001), metrics=['accuracy'])
epochs = 15
batch_size = 64
if lang == 'ml':
model = Sequential()
model.add(Embedding(MAX_NB_WORDS, EMBEDDING_DIM, input_length=X_train.shape[1]))
model.add(SpatialDropout1D(0.5))
#model.add(LSTM(100, dropout=0.3, recurrent_dropout=0.3, return_sequences=True))
model.add(LSTM(100, dropout=0.3, recurrent_dropout=0.3))
model.add(Dense(5, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer=Adam(learning_rate=0.0001), metrics=['accuracy'])
epochs = 10
batch_size = 64
history = model.fit(X_train, Y_train, epochs=epochs, batch_size=batch_size,validation_split=0.1,callbacks=[EarlyStopping(monitor='val_loss', patience=3, min_delta=0.0001)])
# accr = model.evaluate(X_test,Y_test)
# print('Test set\n Loss: {:0.3f}\n Accuracy: {:0.3f}'.format(accr[0],accr[1]))
Y_test_idx = np.argmax(Y_test, axis=1) # Convert one-hot to index
Y_pred = model.predict_classes(X_test)
print(classification_report(Y_test_idx, Y_pred))
new_review = ['Thalaiva superstar Rajinikanth number one mass Hero']
seq = tokenizer.texts_to_sequences(new_review)
padded = pad_sequences(seq, maxlen=MAX_SEQUENCE_LENGTH)
pred = model.predict(padded)
print(pred, lb.inverse_transform(pred))
with open(outfile, 'w') as outf:
test_df = pd.read_csv(predict_file, sep='\t')
X_pred, ID_pred, lb = load_data(test_df, 'pred', lb)
Y_pred = lb.inverse_transform(model.predict(X_pred)).flatten()
outf.write('id\ttext\tlabel\n')
for idx, text, pred_category in zip(ID_pred, test_df.text.values, Y_pred):
#print(idx, text, pred_category)
outf.write('\t'.join((idx, text, pred_category)) + '\n') | 6,528 | 38.331325 | 172 | py |
deephyper | deephyper-master/setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Note: To use the 'upload' functionality of this file, you must:
# $ pip install twine
import os
import platform
import sys
from shutil import rmtree
from setuptools import Command, setup
# path of the directory where this file is located
here = os.path.abspath(os.path.dirname(__file__))
# query platform informations, e.g. 'macOS-12.0.1-arm64-arm-64bit'
platform_infos = platform.platform()
# What packages are required for this module to be executed?
REQUIRED = [
"ConfigSpace>=0.4.20",
"dm-tree",
"Jinja2<3.1",
"numpy", # ==1.19.4", # working with 1.20.1
"pandas>=0.24.2",
"packaging",
"parse",
"scikit-learn>=0.23.1",
"scipy>=1.7",
"tqdm>=4.64.0",
"pyyaml",
]
# !Requirements for Neural Architecture Search (NAS)
REQUIRED_NAS = ["networkx", "pydot"]
REQUIRED_NAS_PLATFORM = {
"default": ["tensorflow>=2.0.0", "tensorflow_probability"],
"macOS-arm64": ["tensorflow_probability~=0.14"],
}
# if "macOS" in platform_infos and "arm64" in platform_infos:
# REQUIRED_NAS = REQUIRED_NAS + REQUIRED_NAS_PLATFORM["macOS-arm64"]
# else: # x86_64
REQUIRED_NAS = REQUIRED_NAS + REQUIRED_NAS_PLATFORM["default"]
# !Requirements for Pipeline Optimization for ML (popt)
REQUIRED_POPT = ["xgboost"]
# !Requirements for Automated Deep Ensemble with Uncertainty Quantification (AutoDEUQ)
REQUIRED_AUTODEUQ = REQUIRED_NAS + ["ray[default]>=1.3.0"]
# !Transfer Learning for Bayesian Optimization with SVD
REQUIRED_TL_SDV = ["sdv>=0.17.1"]
# What packages are optional?
EXTRAS = {
"autodeuq": REQUIRED_AUTODEUQ, # automated deep ensemble with uncertainty quantification
"automl": ["xgboost"], # for automl with scikit-learn
"jax-cpu": ["jax[cpu]>=0.3.25", "numpyro[cpu]"],
"jax-cuda": ["jax[cuda]>=0.3.25", "numpyro[cuda]"],
"hps": [], # hyperparameter search (already the base requirements)
"nas": REQUIRED_NAS, # neural architecture search
"hps-tl": REQUIRED_TL_SDV, # transfer learning for bayesian optimization,
"mpi": ["mpi4py>=3.1.3"],
"ray": ["ray[default]>=1.3.0"],
"redis": ["redis[hiredis]"],
"dev": [
# Test
"codecov",
"pytest",
"pytest-cov",
# Packaging
"twine",
# Formatter and Linter
"black==22.6.0",
"flake8==5.0.4",
"pre-commit",
"rstcheck",
# Documentation
"GitPython",
"ipython",
"nbsphinx",
"Sphinx~=3.5.4",
"sphinx-book-theme==0.3.2",
"sphinx-copybutton",
"sphinx-gallery",
"sphinx_lfs_content",
"sphinx-togglebutton",
],
"analytics": [
"altair",
"jupyter",
"jupyter_contrib_nbextensions>=0.5.1",
"nbconvert<6",
"streamlit",
"streamlit-aggrid",
"tinydb",
],
"hvd": ["horovod>=0.21.3", "mpi4py>=3.0.0"],
}
# Default dependencies for DeepHyper
DEFAULT_DEPENDENCIES = REQUIRED[:]
DEFAULT_DEPENDENCIES += EXTRAS["nas"]
DEFAULT_DEPENDENCIES += EXTRAS["autodeuq"]
DEFAULT_DEPENDENCIES += EXTRAS["hps-tl"]
DEFAULT_DEPENDENCIES += EXTRAS["jax-cpu"]
EXTRAS["default"] = DEFAULT_DEPENDENCIES
# Useful commands to build/upload the wheel to PyPI
class UploadCommand(Command):
"""Support setup.py upload."""
description = "Build and publish the package."
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print("\033[1m{0}\033[0m".format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status("Removing previous builds…")
rmtree(os.path.join(here, "dist"))
except OSError:
pass
self.status("Building Source and Wheel (universal) distribution…")
os.system("{0} setup.py sdist bdist_wheel --universal".format(sys.executable))
self.status("Uploading the package to PyPI via Twine…")
os.system("twine upload dist/*")
sys.exit()
class TestUploadCommand(Command):
"""Support setup.py testupload."""
description = "Build and publish the package to test.pypi."
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print("\033[1m{0}\033[0m".format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status("Removing previous builds…")
rmtree(os.path.join(here, "dist"))
except OSError:
pass
self.status("Building Source and Wheel (universal) distribution…")
os.system("{0} setup.py sdist bdist_wheel --universal".format(sys.executable))
self.status("Uploading the package to PyPI via Twine…")
os.system("twine upload --repository-url https://test.pypi.org/legacy/ dist/*")
sys.exit()
class TestInstallCommand(Command):
"""Support setup.py testinstall"""
description = "Install deephyper from TestPyPI."
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print("\033[1m{0}\033[0m".format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
self.status("Downloading the package from Test PyPI and installing it")
os.system("pip install --index-url https://test.pypi.org/simple/ deephyper")
sys.exit()
# Where the magic happens:
setup(
install_requires=REQUIRED,
extras_require=EXTRAS,
cmdclass={
"upload": UploadCommand,
"testupload": TestUploadCommand,
"testinstall": TestInstallCommand,
},
)
| 5,761 | 25.552995 | 93 | py |
deephyper | deephyper-master/tests/deephyper/nas/test_node.py | import unittest
import pytest
@pytest.mark.fast
@pytest.mark.nas
class NodeTest(unittest.TestCase):
def test_mirror_node(self):
import tensorflow as tf
from deephyper.nas.node import MirrorNode, VariableNode
from deephyper.nas.operation import operation
Dense = operation(tf.keras.layers.Dense)
vnode = VariableNode()
vop = Dense(10)
vnode.add_op(vop)
vnode.add_op(Dense(20))
mnode = MirrorNode(vnode)
vnode.set_op(0)
assert vnode.op == vop
assert mnode.op == vop
def test_mime_node(self):
import tensorflow as tf
from deephyper.nas.node import MimeNode, VariableNode
from deephyper.nas.operation import operation
Dense = operation(tf.keras.layers.Dense)
vnode = VariableNode()
vop = Dense(10)
vnode.add_op(vop)
vnode.add_op(Dense(20))
mnode = MimeNode(vnode)
mop = Dense(30)
mnode.add_op(mop)
mnode.add_op(Dense(40))
vnode.set_op(0)
assert vnode.op == vop
assert mnode.op == mop
| 1,118 | 21.836735 | 63 | py |
deephyper | deephyper-master/tests/deephyper/nas/test_new_api.py | import pytest
@pytest.mark.fast
@pytest.mark.nas
def test_basic_space(verbose=0):
import tensorflow as tf
from deephyper.nas import KSearchSpace
from deephyper.nas.node import VariableNode, ConstantNode
from deephyper.nas.operation import operation, Identity
Dense = operation(tf.keras.layers.Dense)
class BasicSpace(KSearchSpace):
def __init__(self, input_shape, output_shape, batch_size=None, *args, **kwargs):
super().__init__(
input_shape, output_shape, batch_size=batch_size, *args, **kwargs
)
def build(self):
input_node = self.input[0]
dense = VariableNode()
dense.add_op(Identity())
for i in range(1, 1000):
dense.add_op(Dense(i))
self.connect(input_node, dense)
output_node = ConstantNode(Dense(self.output_shape[0]))
self.connect(dense, output_node)
space = BasicSpace(input_shape=(1,), output_shape=(1,))
space.build()
model_1 = space.sample([1])
if verbose:
model_1.summary()
model_2 = space.sample()
if verbose:
model_2.summary()
| 1,177 | 24.608696 | 88 | py |
deephyper | deephyper-master/tests/deephyper/nas/test_trainer_keras_regressor.py | import unittest
import pytest
@pytest.mark.slow
@pytest.mark.nas
class TrainerKerasRegressorTest(unittest.TestCase):
def test_trainer_regressor_train_valid_with_one_input(self):
import sys
from random import random
import deephyper.core.utils
import numpy as np
from deephyper.nas.trainer import BaseTrainer
from deephyper.test.nas.linearReg.problem import Problem
config = Problem.space
config["hyperparameters"]["num_epochs"] = 2
# load functions
load_data = deephyper.core.utils.load_attr(config["load_data"]["func"])
config["load_data"]["func"] = load_data
config["search_space"]["class"] = deephyper.core.utils.load_attr(
config["search_space"]["class"]
)
# Loading data
kwargs = config["load_data"].get("kwargs")
(tX, ty), (vX, vy) = load_data() if kwargs is None else load_data(**kwargs)
# Set data shape
input_shape = np.shape(tX)[1:] # interested in shape of data not in length
output_shape = np.shape(ty)[1:]
config["data"] = {"train_X": tX, "train_Y": ty, "valid_X": vX, "valid_Y": vy}
search_space = config["search_space"]["class"](
input_shape, output_shape, **config["search_space"]["kwargs"]
).build()
arch_seq = [random() for i in range(search_space.num_nodes)]
search_space.set_ops(arch_seq)
search_space.plot("trainer_keras_regressor_test.dot")
if config.get("preprocessing") is not None:
preprocessing = deephyper.core.utils.load_attr(
config["preprocessing"]["func"]
)
config["preprocessing"]["func"] = preprocessing
else:
config["preprocessing"] = None
model = search_space.create_model()
trainer = BaseTrainer(config=config, model=model)
res = trainer.train()
assert res != sys.float_info.max and type(res) is dict
def test_trainer_regressor_train_valid_with_multiple_ndarray_inputs(self):
import sys
from random import random
import deephyper.core.utils
import numpy as np
from deephyper.nas.trainer import BaseTrainer
from deephyper.test.nas.linearRegMultiInputs.problem import Problem
config = Problem.space
config["hyperparameters"]["num_epochs"] = 2
# load functions
load_data = deephyper.core.utils.load_attr(config["load_data"]["func"])
config["load_data"]["func"] = load_data
config["search_space"]["class"] = deephyper.core.utils.load_attr(
config["search_space"]["class"]
)
# Loading data
kwargs = config["load_data"].get("kwargs")
(tX, ty), (vX, vy) = load_data() if kwargs is None else load_data(**kwargs)
# Set data shape
# interested in shape of data not in length
input_shape = [np.shape(itX)[1:] for itX in tX]
output_shape = np.shape(ty)[1:]
config["data"] = {"train_X": tX, "train_Y": ty, "valid_X": vX, "valid_Y": vy}
search_space = config["search_space"]["class"](
input_shape, output_shape, **config["search_space"]["kwargs"]
).build()
arch_seq = [random() for i in range(search_space.num_nodes)]
search_space.set_ops(arch_seq)
search_space.plot("trainer_keras_regressor_test.dot")
if config.get("preprocessing") is not None:
preprocessing = deephyper.core.utils.load_attr(
config["preprocessing"]["func"]
)
config["preprocessing"]["func"] = preprocessing
else:
config["preprocessing"] = None
model = search_space.create_model()
trainer = BaseTrainer(config=config, model=model)
res = trainer.train()
assert res != sys.float_info.max and type(res) is dict
def test_trainer_regressor_train_valid_with_multiple_generator_inputs(self):
import sys
from deephyper.nas.run._util import get_search_space, load_config, setup_data
from deephyper.nas.trainer import BaseTrainer
from deephyper.test.nas.linearReg.problem import Problem
from deephyper.test.nas.linearRegMultiInputsGen import Problem
config = Problem.space
load_config(config)
input_shape, output_shape = setup_data(config)
search_space = get_search_space(config, input_shape, output_shape, 42)
config["hyperparameters"]["num_epochs"] = 2
model = search_space.sample()
trainer = BaseTrainer(config=config, model=model)
res = trainer.train()
assert res != sys.float_info.max and type(res) is dict
if __name__ == "__main__":
test = TrainerKerasRegressorTest()
test.test_trainer_regressor_train_valid_with_multiple_ndarray_inputs()
| 4,879 | 33.125874 | 85 | py |
deephyper | deephyper-master/tests/deephyper/nas/test_keras_search_space.py | import unittest
import pytest
@pytest.mark.nas
class TestKSearchSpace(unittest.TestCase):
def test_create(self):
import tensorflow as tf
from deephyper.nas import KSearchSpace
from deephyper.nas.node import VariableNode
from deephyper.nas.operation import operation
Dense = operation(tf.keras.layers.Dense)
class TestSpace(KSearchSpace):
def __init__(self, input_shape, output_shape):
super().__init__(input_shape, output_shape)
def build(self):
vnode = VariableNode()
self.connect(self.input_nodes[0], vnode)
vnode.add_op(Dense(1))
return self
space = TestSpace((5,), (1,)).build()
model = space.sample()
def test_create_more_nodes(self):
import tensorflow as tf
from deephyper.nas import KSearchSpace
from deephyper.nas.node import VariableNode
from deephyper.nas.operation import operation
Dense = operation(tf.keras.layers.Dense)
class TestSpace(KSearchSpace):
def __init__(self, input_shape, output_shape):
super().__init__(input_shape, output_shape)
def build(self):
vnode1 = VariableNode()
self.connect(self.input_nodes[0], vnode1)
vnode1.add_op(Dense(10))
vnode2 = VariableNode()
vnode2.add_op(Dense(1))
self.connect(vnode1, vnode2)
return self
space = TestSpace((5,), (1,)).build()
model = space.sample()
def test_create_multiple_inputs_with_one_vnode(self):
import tensorflow as tf
from deephyper.nas import KSearchSpace
from deephyper.nas.node import ConstantNode, VariableNode
from deephyper.nas.operation import operation, Concatenate
Dense = operation(tf.keras.layers.Dense)
class TestSpace(KSearchSpace):
def __init__(self, input_shape, output_shape):
super().__init__(input_shape, output_shape)
def build(self):
merge = ConstantNode()
merge.set_op(Concatenate(self, self.input_nodes))
vnode1 = VariableNode()
self.connect(merge, vnode1)
vnode1.add_op(Dense(1))
return self
space = TestSpace([(5,), (5,)], (1,)).build()
model = space.sample()
| 2,481 | 28.2 | 66 | py |
deephyper | deephyper-master/tests/deephyper/keras/layers/padding_test.py | import pytest
@pytest.mark.fast
@pytest.mark.nas
def test_padding_layer():
import tensorflow as tf
import numpy as np
from deephyper.keras.layers import Padding
model = tf.keras.Sequential()
model.add(Padding([[1, 1]]))
data = np.random.random((3, 1))
shape_data = np.shape(data)
assert shape_data == (3, 1)
res = model.predict(data, batch_size=1)
res_shape = np.shape(res)
assert res_shape == (3, 3)
| 450 | 20.47619 | 46 | py |
deephyper | deephyper-master/docs/conf.py | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import git
import sphinx_book_theme
sys.path.insert(0, os.path.abspath(".."))
# -- Project information -----------------------------------------------------
project = "DeepHyper"
copyright = "2018-2022, Argonne"
author = "Argonne"
# The short X.Y version
about = {}
with open(f"../deephyper/__version__.py") as f:
exec(f.read(), about)
version = about["__version__"]
# The full version, including alpha/beta/rc tags
if about["__version__"] == "":
release = f'v{about["__version__"]}'
else:
release = f'v{about["__version__"]}-{about["__version_suffix__"]}'
# PULL Tutorials
branch_name_map = {"master": "main", "latest": "main", "develop": "develop"}
if os.environ.get("READTHEDOCS"):
doc_version = os.environ["READTHEDOCS_VERSION"]
else:
github_repo = git.Repo(search_parent_directories=True)
doc_version = github_repo.active_branch.name
tutorial_branch = branch_name_map.get(doc_version, "develop")
tutorials_github_link = "https://github.com/deephyper/tutorials.git"
tutorials_dest_dir = "tutorials"
def pull_tutorials(github_link, dest_dir, tutorial_branch):
os.system(f"rm -rf {dest_dir}/")
os.system(
f"git clone --depth=1 --branch={tutorial_branch} {github_link} {dest_dir}"
)
os.system(f"rm -rf {dest_dir}/.git")
pull_tutorials(tutorials_github_link, tutorials_dest_dir, tutorial_branch)
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"nbsphinx",
"sphinx_book_theme",
"sphinx_copybutton",
"sphinx_gallery.gen_gallery",
"sphinx_lfs_content",
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.coverage",
"sphinx.ext.doctest",
"sphinx.ext.githubpages",
"sphinx.ext.ifconfig",
"sphinx.ext.intersphinx",
"sphinx.ext.mathjax",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
]
autosummary_generate = True
autosummary_imported_members = True
# Add any paths that contain templates here, relative to this directory.
templates_path = [
"_templates",
os.path.join(sphinx_book_theme.get_html_theme_path(), "components"),
]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_suffix = {".rst": "restructuredtext"}
# The master toctree document.
master_doc = "index"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = [
"_build",
"_templates",
"Thumbs.db",
".DS_Store",
"examples/*.ipynb",
]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_book_theme"
html_theme_path = [sphinx_book_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_logo = "_static/logo/medium.png"
html_theme_options = {
# header settings
"repository_url": "https://github.com/deephyper/deephyper",
"use_repository_button": True,
"use_issues_button": True,
"use_edit_page_button": True,
"repository_branch": "develop",
"path_to_docs": "docs",
"use_download_button": True,
# sidebar settings
"show_navbar_depth": 1,
"logo_only": True,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "deephyperdoc"
# CopyButton Settings
copybutton_prompt_text = r">>> |\.\.\. |\$ |In \[\d*\]: | {2,5}\.\.\.: | {5,8}: "
copybutton_prompt_is_regexp = True
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "deephyper.tex", "deephyper Documentation", "ArgonneMCS", "manual")
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "deephyper", "deephyper Documentation", [author], 1)]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"deephyper",
"deephyper Documentation",
author,
"Automated Machine Learning Software for HPC",
"Miscellaneous",
)
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ["search.html"]
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {"python": ("https://docs.python.org/3", None)}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# makes sphinx do a mock import of mpi4py so it’s not broken when you try to do auto-docs and import mpi4py
autodoc_mock_imports = [
"horovod",
"joblib",
"matplotlib",
"mpi4py",
"nbformat",
"networkx",
"ray",
"redis",
"sklearn",
"skopt",
"tensorflow_probability",
"tensorflow",
"tqdm",
"xgboost",
]
autosummary_mock_imports = autodoc_mock_imports
# Remove <BLANKLINE>
trim_doctest_flags = True
# Sphinx Gallery
sphinx_gallery_conf = {
"examples_dirs": "../examples", # path to your example scripts
"gallery_dirs": "examples", # path to where to save gallery generated output
"filename_pattern": "/plot_",
"ignore_pattern": r"_util\.py",
}
def setup(app):
app.add_css_file("custom.css")
app.add_js_file("custom.js")
| 8,809 | 27.79085 | 107 | py |
deephyper | deephyper-master/deephyper/stopper/_lcmodel_stopper.py | import sys
from functools import partial
import jax
import jax.numpy as jnp
import numpy as np
import numpyro
import numpyro.distributions as dist
from numpyro.infer import MCMC, NUTS
from scipy.optimize import least_squares
from sklearn.base import BaseEstimator, RegressorMixin
from sklearn.utils import check_random_state
from sklearn.utils.validation import check_array, check_is_fitted, check_X_y
from deephyper.stopper._stopper import Stopper
# Budget allocation models
def b_lin2(z, nu=[1, 1]):
return nu[1] * (z - 1) + nu[0]
def b_exp2(z, nu=[1, 2]):
return nu[0] * jnp.power(nu[1], z - 1)
# Learning curves models
def f_lin2(z, b, rho):
return rho[1] * b(z) + rho[0]
def f_loglin2(z, b, rho):
Z = jnp.log(b(z))
Y = rho[1] * Z + rho[0]
y = jnp.exp(Y)
return -y
def f_loglin3(z, b, rho):
Z = jnp.log(b(z))
Y = rho[2] * jnp.power(Z, 2) + rho[1] * Z + rho[0]
y = jnp.exp(Y)
return -y
def f_loglin4(z, b, rho):
Z = jnp.log(b(z))
Y = rho[3] * jnp.power(Z, 3) + rho[2] * jnp.power(Z, 2) + rho[1] * Z + rho[0]
y = jnp.exp(Y)
return -y
def f_pow3(z, b, rho):
return rho[0] - rho[1] * b(z) ** -rho[2]
def f_mmf4(z, b, rho):
return (rho[0] * rho[1] + rho[2] * jnp.power(b(z), rho[3])) / (
rho[1] + jnp.power(b(z), rho[3])
)
def f_vapor3(z, b, rho):
return rho[0] + rho[1] / b(z) + rho[2] * np.log(b(z))
def f_logloglin2(z, b, rho):
return jnp.log(rho[0] * jnp.log(b(z)) + rho[1])
def f_hill3(z, b, rho):
ymax, eta, kappa = rho
return ymax * (b(z) ** eta) / (kappa * eta + b(z) ** eta)
def f_logpow3(z, b, rho):
return rho[0] / (1 + (b(z) / jnp.exp(rho[1])) ** rho[2])
def f_pow4(z, b, rho):
return rho[2] - (rho[0] * b(z) + rho[1]) ** (-rho[3])
def f_exp4(z, b, rho):
return rho[2] - jnp.exp(-rho[0] * (b(z) ** rho[3]) + rho[1])
def f_janoschek4(z, b, rho):
return rho[0] - (rho[0] - rho[1]) * jnp.exp(-rho[2] * (b(z) ** rho[3]))
def f_weibull4(z, b, rho):
return rho[0] - (rho[0] - rho[1]) * jnp.exp(-((rho[2] * b(z)) ** rho[3]))
def f_ilog2(z, b, rho):
return rho[1] - (rho[0] / jnp.log(b(z) + 1))
# Utility to estimate parameters of learning curve model
# The combination of "partial" and "static_argnums" is necessary
# with the "f" lambda function passed as argument
@partial(jax.jit, static_argnums=(1,))
def residual_least_square(rho, f, z, y):
"""Residual for least squares."""
return f(z, rho) - y
def prob_model(z=None, y=None, f=None, rho_mu_prior=None, num_obs=None):
rho_mu_prior = jnp.array(rho_mu_prior)
rho_sigma_prior = 1.0
rho = numpyro.sample("rho", dist.Normal(rho_mu_prior, rho_sigma_prior))
sigma = numpyro.sample("sigma", dist.Exponential(1.0)) # introducing noise
# sigma = 0.1
mu = f(z[:num_obs], rho)
numpyro.sample("obs", dist.Normal(mu, sigma), obs=y[:num_obs])
@partial(jax.jit, static_argnums=(0,))
def predict_moments_from_posterior(f, X, posterior_samples):
vf_model = jax.vmap(f, in_axes=(None, 0))
posterior_mu = vf_model(X, posterior_samples)
mean_mu = jnp.mean(posterior_mu, axis=0)
std_mu = jnp.std(posterior_mu, axis=0)
return mean_mu, std_mu
class BayesianLearningCurveRegressor(BaseEstimator, RegressorMixin):
def __init__(
self,
f_model=f_loglin3,
f_model_num_params=3,
b_model=b_lin2,
max_trials_ls_fit=10,
mcmc_num_warmup=200,
mcmc_num_samples=1_000,
n_jobs=-1,
random_state=None,
verbose=0,
batch_size=100,
):
self.b_model = b_model
self.f_model = lambda z, rho: f_model(z, self.b_model, rho)
self.f_nparams = f_model_num_params
self.mcmc_num_warmup = mcmc_num_warmup
self.mcmc_num_samples = mcmc_num_samples
self.max_trials_ls_fit = max_trials_ls_fit
self.n_jobs = n_jobs
self.random_state = check_random_state(random_state)
self.verbose = verbose
self.rho_mu_prior_ = np.zeros((self.f_nparams,))
self.batch_size = batch_size
self.X_ = np.zeros((self.batch_size,))
self.y_ = np.zeros((self.batch_size,))
def fit(self, X, y, update_prior=True):
check_X_y(X, y, ensure_2d=False)
# !Trick for performance to avoid performign JIT again and again
# !This will fix the shape of inputs of the model for numpyro
# !see https://github.com/pyro-ppl/numpyro/issues/441
num_samples = len(X)
assert num_samples <= self.batch_size
self.X_[:num_samples] = X[:]
self.y_[:num_samples] = y[:]
if update_prior:
self.rho_mu_prior_[:] = self._fit_learning_curve_model_least_square(X, y)[:]
if not (hasattr(self, "kernel_")):
self.kernel_ = NUTS(
model=lambda z, y, rho_mu_prior: prob_model(
z, y, self.f_model, rho_mu_prior, num_obs=num_samples
),
)
self.mcmc_ = MCMC(
self.kernel_,
num_warmup=self.mcmc_num_warmup,
num_samples=self.mcmc_num_samples,
progress_bar=self.verbose,
jit_model_args=True,
)
seed = self.random_state.randint(low=0, high=2**32)
rng_key = jax.random.PRNGKey(seed)
self.mcmc_.run(rng_key, z=self.X_, y=self.y_, rho_mu_prior=self.rho_mu_prior_)
if self.verbose:
self.mcmc_.print_summary()
return self
def predict(self, X, return_std=True):
posterior_samples = self.predict_posterior_samples(X)
mean_mu = jnp.mean(posterior_samples, axis=0)
if return_std:
std_mu = jnp.std(posterior_samples, axis=0)
return mean_mu, std_mu
return mean_mu
def predict_posterior_samples(self, X):
# Check if fit has been called
check_is_fitted(self)
# Input validation
X = check_array(X, ensure_2d=False)
posterior_samples = self.mcmc_.get_samples()
vf_model = jax.vmap(self.f_model, in_axes=(None, 0))
posterior_mu = vf_model(X, posterior_samples["rho"])
return posterior_mu
def prob(self, X, condition):
"""Compute the approximate probability of P(cond(m(X_i), y_i))
where m is the current fitted model and cond a condition.
Args:
X (np.array): An array of inputs.
condition (callable): A function defining the condition to test.
Returns:
array: an array of shape X.
"""
# Check if fit has been called
check_is_fitted(self)
# Input validation
X = check_array(X, ensure_2d=False)
posterior_samples = self.mcmc_.get_samples()
vf_model = jax.vmap(self.f_model, in_axes=(None, 0))
posterior_mu = vf_model(X, posterior_samples["rho"])
prob = jnp.mean(condition(posterior_mu), axis=0)
return prob
def _fit_learning_curve_model_least_square(
self,
z_train,
y_train,
):
"""The learning curve model is assumed to be modeled by 'f' with
interface f(z, rho).
"""
seed = self.random_state.randint(low=0, high=2**32)
random_state = check_random_state(seed)
z_train = np.asarray(z_train)
y_train = np.asarray(y_train)
# compute the jacobian
# using the true jacobian is important to avoid problems
# with numerical errors and approximations! indeed the scale matters
# a lot when approximating with finite differences
def fun_wrapper(rho, f, z, y):
return np.array(residual_least_square(rho, f, z, y))
if not (hasattr(self, "jac_residual_ls_")):
self.jac_residual_ls_ = partial(jax.jit, static_argnums=(1,))(
jax.jacfwd(residual_least_square, argnums=0)
)
def jac_wrapper(rho, f, z, y):
return np.array(self.jac_residual_ls_(rho, f, z, y))
results = []
mse_hist = []
for _ in range(self.max_trials_ls_fit):
rho_init = random_state.randn(self.f_nparams)
try:
res_lsq = least_squares(
fun_wrapper,
rho_init,
args=(self.f_model, z_train, y_train),
method="lm",
jac=jac_wrapper,
)
except ValueError:
continue
mse_res_lsq = np.mean(res_lsq.fun**2)
mse_hist.append(mse_res_lsq)
results.append(res_lsq.x)
i_best = np.nanargmin(mse_hist)
res = results[i_best]
return res
def area_learning_curve(z, f, z_max) -> float:
assert len(z) == len(f)
assert z[-1] <= z_max
area = 0
for i in range(1, len(z)):
# z: is always monotinic increasing but not f!
area += (z[i] - z[i - 1]) * f[i - 1]
if z[-1] < z_max:
area += (z_max - z[-1]) * f[-1]
return area
class LCModelStopper(Stopper):
"""Stopper based on learning curve extrapolation (LCE) to evaluate if the iterations of the learning algorithm
should be stopped. The LCE is based on a parametric learning curve model (LCM) which is modeling the score as a function of the number of training steps. Training steps can correspond to the number of training epochs, the number of training batches, the number of observed samples or any other quantity that is iterated through during the training process. The LCE is based on the following steps:
1. An early stopping condition is always checked first. If the early stopping condition is met, the LCE is not applied.
2. Then, some safeguard conditions are checked to ensure that the LCE can be applied (number of observed steps must be greater or equal to the number of parameters of the LCM).
3. If the LCM cannot be fitted (number of observed steps is less than number of parameters of the model), then the last observed step is compared to hitorical performance of others at the same step to check if it is a low-performing outlier (outlier in the direction of performing worse!) using the IQR criterion.
4. If the LCM can be fitted, a least square fit is performed to estimate the parameters of the LCM.
5. The probability of the current LC to perform worse than the best observed score at the maximum iteration is computed using Monte-Carlo Markov Chain (MCMC).
To use this stopper, you need to install the following dependencies:
.. code-block:: bash
$ jax>=0.3.25
$ numpyro
Args:
max_steps (int): The maximum number of training steps which can be performed.
min_steps (int, optional): The minimum number of training steps which can be performed. Defaults to ``1``.
lc_model (str, optional): The parameteric learning model to use. It should be a string in the following list: ``["lin2", "loglin2", "loglin3", "loglin4", "pow3","mmf4", "vapor3", "logloglin2", "hill3", "logpow3", "pow4", "exp4", "janoschek4", "weibull4", "ilog2"]``. The number in the name corresponds to the number of parameters of the parametric model. Defaults to ``"mmf4"``.
min_done_for_outlier_detection (int, optional): The minimum number of observed scores at the same step to check for if it is a lower-bound outlier. Defaults to ``10``.
iqr_factor_for_outlier_detection (float, optional): The IQR factor for outlier detection. The higher it is the more inclusive the condition will be (i.e. if set very large it is likely not going to detect any outliers). Defaults to ``1.5``.
prob_promotion (float, optional): The threshold probabily to stop the iterations. If the current learning curve has a probability greater than ``prob_promotion`` to be worse that the best observed score accross all evaluations then the current iterations are stopped. Defaults to ``0.9`` (i.e. probability of 0.9 of being worse).
early_stopping_patience (float, optional): The patience of the early stopping condition. If it is an ``int`` it is directly corresponding to a number of iterations. If it is a ``float`` then it is corresponding to a proportion between [0,1] w.r.t. ``max_steps``. Defaults to ``0.25`` (i.e. 25% of ``max_steps``).
objective_returned (str, optional): The returned objective. It can be a value in ``["last", "max", "alc"]`` where ``"last"`` corresponds to the last observed score, ``"max"`` corresponds to the maximum observed score and ``"alc"`` corresponds to the area under the learning curve. Defaults to "last".
random_state (int or np.RandomState, optional): The random state of estimation process. Defaults to ``None``.
Raises:
ValueError: parameters are not valid.
"""
def __init__(
self,
max_steps: int,
min_steps: int = 1,
lc_model="mmf4",
min_done_for_outlier_detection=10,
iqr_factor_for_outlier_detection=1.5,
prob_promotion=0.9,
early_stopping_patience=0.25,
objective_returned="last",
random_state=None,
) -> None:
super().__init__(max_steps=max_steps)
self.min_steps = min_steps
lc_model = "f_" + lc_model
lc_model_num_params = int(lc_model[-1])
lc_model = getattr(sys.modules[__name__], lc_model)
self.min_obs_to_fit = lc_model_num_params
self.min_done_for_outlier_detection = min_done_for_outlier_detection
self.iqr_factor_for_outlier_detection = iqr_factor_for_outlier_detection
self.prob_promotion = prob_promotion
if type(early_stopping_patience) is int:
self.early_stopping_patience = early_stopping_patience
elif type(early_stopping_patience) is float:
self.early_stopping_patience = int(early_stopping_patience * self.max_steps)
else:
raise ValueError("early_stopping_patience must be int or float")
self.objective_returned = objective_returned
self._rung = 0
# compute the step at which to stop based on steps allocation policy
max_rung = np.floor(
np.log(self.max_steps / self.min_steps) / np.log(self.min_obs_to_fit)
)
self.max_steps_ = int(self.min_steps * self.min_obs_to_fit**max_rung)
self.lc_model = BayesianLearningCurveRegressor(
f_model=lc_model,
f_model_num_params=lc_model_num_params,
random_state=random_state,
batch_size=self.max_steps_,
)
self._lc_objectives = []
def _compute_halting_step(self):
return self.min_steps * self.min_obs_to_fit**self._rung
def _retrieve_best_objective(self) -> float:
search_id, _ = self.job.id.split(".")
objectives = []
for obj in self.job.storage.load_out_from_all_jobs(search_id):
try:
objectives.append(float(obj))
except ValueError:
pass
if len(objectives) > 0:
return np.max(objectives)
else:
return np.max(self.observations[1])
def _get_competiting_objectives(self, rung) -> list:
search_id, _ = self.job.id.split(".")
values = self.job.storage.load_metadata_from_all_jobs(
search_id, f"_completed_rung_{rung}"
)
values = [float(v) for v in values]
return values
def observe(self, budget: float, objective: float):
super().observe(budget, objective)
self._budget = self.observed_budgets[-1]
self._lc_objectives.append(self.objective)
self._objective = self._lc_objectives[-1]
# For Early-Stopping based on Patience
if (
not (hasattr(self, "_local_best_objective"))
or self._objective > self._local_best_objective
):
self._local_best_objective = self._objective
self._local_best_step = self.step
halting_step = self._compute_halting_step()
if self._budget >= halting_step:
self.job.storage.store_job_metadata(
self.job.id, f"_completed_rung_{self._rung}", str(self._objective)
)
def stop(self) -> bool:
# Enforce Pre-conditions Before Learning-Curve based Early Discarding
if super().stop():
print("Stopped after reaching the maximum number of steps.")
self.infos_stopped = "max steps reached"
return True
if self.step - self._local_best_step >= self.early_stopping_patience:
print(
f"Stopped after reaching {self.early_stopping_patience} steps without improvement."
)
self.infos_stopped = "early stopping"
return True
# This condition will enforce the stopper to stop the evaluation at the first step
# for the first evaluation (The FABOLAS method does the same, bias the first samples with
# small budgets)
self.best_objective = self._retrieve_best_objective()
halting_step = self._compute_halting_step()
if self.step < max(self.min_steps, self.min_obs_to_fit):
if self.step >= halting_step:
competing_objectives = self._get_competiting_objectives(self._rung)
if len(competing_objectives) > self.min_done_for_outlier_detection:
q1 = np.quantile(
competing_objectives,
q=0.25,
)
q3 = np.quantile(
competing_objectives,
q=0.75,
)
iqr = q3 - q1
# lower than the minimum of a box plot
if (
self._objective
< q1 - self.iqr_factor_for_outlier_detection * iqr
):
print(
f"Stopped early because of abnormally low objective: {self._objective}"
)
self.infos_stopped = "outlier"
return True
self._rung += 1
return False
# Check if the halting budget condition is met
if self.step < halting_step:
return False
# Check if the evaluation should be stopped based on LC-Model
# Fit and predict the performance of the learning curve model
z_train = self.observed_budgets
y_train = self._lc_objectives
z_train, y_train = np.asarray(z_train), np.asarray(y_train)
self.lc_model.fit(z_train, y_train, update_prior=True)
# Check if the configuration is promotable based on its predicted objective value
p = self.lc_model.prob(
X=[self.max_steps], condition=lambda y_hat: y_hat <= self.best_objective
)[0]
# Return whether the configuration should be stopped
if p <= self.prob_promotion:
self._rung += 1
else:
print(
f"Stopped because the probability of performing worse is {p} > {self.prob_promotion}"
)
self.infos_stopped = f"prob={p:.3f}"
return True
@property
def objective(self):
if self.objective_returned == "last":
return self.observations[-1][-1]
elif self.objective_returned == "max":
return max(self.observations[-1])
elif self.objective_returned == "alc":
z, y = self.observations
return area_learning_curve(z, y, z_max=self.max_steps)
else:
raise ValueError("objective_returned must be one of 'last', 'best', 'alc'")
| 19,786 | 36.263653 | 401 | py |
deephyper | deephyper-master/deephyper/test/nas/linearRegHybrid/problem.py | from deephyper.nas.spacelib.tabular import OneLayerSpace
from deephyper.problem import NaProblem
from deephyper.test.nas.linearReg.load_data import load_data
Problem = NaProblem()
Problem.load_data(load_data)
Problem.search_space(OneLayerSpace)
Problem.hyperparameters(
batch_size=Problem.add_hyperparameter((1, 100), "batch_size"),
learning_rate=Problem.add_hyperparameter(
(1e-4, 1e-1, "log-uniform"), "learning_rate"
),
optimizer=Problem.add_hyperparameter(["adam", "nadam", "rmsprop"], "optimizer"),
num_epochs=1,
)
Problem.loss("mse")
Problem.metrics(["r2"])
Problem.objective("val_r2")
# Just to print your problem, to test its definition and imports in the current python environment.
if __name__ == "__main__":
print(Problem)
model = Problem.get_keras_model([1])
| 817 | 24.5625 | 99 | py |
deephyper | deephyper-master/deephyper/test/nas/linearReg/problem.py | from deephyper.nas.spacelib.tabular import OneLayerSpace
from deephyper.problem import NaProblem
from deephyper.test.nas.linearReg.load_data import load_data
Problem = NaProblem()
Problem.load_data(load_data)
Problem.search_space(OneLayerSpace)
Problem.hyperparameters(
batch_size=100, learning_rate=0.1, optimizer="adam", num_epochs=1
)
Problem.loss("mse")
Problem.metrics(["r2"])
Problem.objective("val_r2")
# Just to print your problem, to test its definition and imports in the current python environment.
if __name__ == "__main__":
print(Problem)
model = Problem.get_keras_model([1])
| 611 | 21.666667 | 99 | py |
deephyper | deephyper-master/deephyper/nas/_nx_search_space.py | import abc
import traceback
from collections.abc import Iterable
import networkx as nx
from deephyper.core.exceptions.nas.space import (
NodeAlreadyAdded,
StructureHasACycle,
WrongSequenceToSetOperations,
)
from deephyper.nas.node import MimeNode, Node, VariableNode
class NxSearchSpace(abc.ABC):
"""A NxSearchSpace is an search_space based on a networkx graph."""
def __init__(self, seed=None, **kwargs):
self.graph = nx.DiGraph()
self.seed = seed
self.output_node = None
def plot(self, path):
with open(path, "w") as f:
try:
nx.nx_agraph.write_dot(self.graph, f)
except Exception:
print("Error: can't create graphviz file...")
traceback.print_exc()
def __len__(self):
"""Number of VariableNodes in the current search_space.
Returns:
int: number of variable nodes in the current search_space.
"""
return len(self.nodes)
@property
def nodes(self):
"""Nodes of the current KSearchSpace.
Returns:
iterator: nodes of the current KSearchSpace.
"""
return list(self.graph.nodes)
def add_node(self, node):
"""Add a new node to the search_space.
Args:
node (Node): node to add to the search_space.
Raises:
TypeError: if 'node' is not an instance of Node.
NodeAlreadyAdded: if 'node' has already been added to the search_space.
"""
if not isinstance(node, Node):
raise TypeError("'node' argument should be an instance of Node!")
if node in self.nodes:
raise NodeAlreadyAdded(node)
self.graph.add_node(node)
def connect(self, node1, node2):
"""Create a new connection in the KSearchSpace graph.
The edge created corresponds to : node1 -> node2.
Args:
node1 (Node)
node2 (Node)
Raise:
StructureHasACycle: if the new edge is creating a cycle.
"""
assert isinstance(node1, Node)
assert isinstance(node2, Node)
self.graph.add_edge(node1, node2)
if not (nx.is_directed_acyclic_graph(self.graph)):
raise StructureHasACycle(
f"the connection between {node1} -> {node2} is creating a cycle in the search_space's graph."
)
@property
def size(self):
"""Size of the search space define by the search_space"""
s = 0
for n in filter(lambda n: isinstance(n, VariableNode), self.nodes):
if n.num_ops != 0:
if s == 0:
s = n.num_ops
else:
s *= n.num_ops
return s
@property
def max_num_ops(self):
"""Returns the maximum number of operations accross all VariableNodes of the struct.
Returns:
int: maximum number of Operations for a VariableNode in the current Structure.
"""
return max(map(lambda n: n.num_ops, self.variable_nodes))
@property
def num_nodes(self):
"""Returns the number of VariableNodes in the current Structure.
Returns:
int: number of VariableNodes in the current Structure.
"""
return len(list(self.variable_nodes))
@property
def variable_nodes(self):
"""Iterator of VariableNodes of the search_space.
Returns:
(Iterator(VariableNode)): generator of VariablesNodes of the search_space.
"""
return filter(lambda n: isinstance(n, VariableNode), self.nodes)
@property
def mime_nodes(self):
"""Iterator of MimeNodes of the search_space.
Returns:
(Iterator(MimeNode)): iterator of MimeNodes of the search_space.
"""
return filter(lambda n: isinstance(n, MimeNode), self.nodes)
def denormalize(self, indexes):
"""Denormalize a sequence of normalized indexes to get a sequence of absolute indexes. Useful when you want to compare the number of different search_spaces.
Args:
indexes (Iterable): a sequence of normalized indexes.
Returns:
list: A list of absolute indexes corresponding to operations choosen with relative indexes of `indexes`.
"""
assert isinstance(
indexes, Iterable
), 'Wrong argument, "indexes" should be of Iterable.'
if len(indexes) != self.num_nodes:
raise WrongSequenceToSetOperations(indexes, list(self.variable_nodes))
return [
vnode.denormalize(op_i) for op_i, vnode in zip(indexes, self.variable_nodes)
]
def set_output_node(self):
"""Set the output node of the search_space.
:meta private:
"""
if self.output_node is None:
nodes = list(self.graph.nodes())
self.output_node = []
for n in nodes:
if len(list(self.graph.successors(n))) == 0:
self.output_node.append(n)
if len(self.output_node) == 1:
self.output_node = self.output_node[0]
def create_tensor_aux(self, g, n, train=None):
"""Recursive function to create the tensors from the graph.
:meta private:
Args:
g (nx.DiGraph): a graph
n (nx.Node): a node
train (bool): True if the network is built for training, False if the network is built for validation/testing (for example False will deactivate Dropout).
Return:
the tensor represented by n.
"""
try:
if n._tensor is not None:
output_tensor = n._tensor
else:
pred = list(g.predecessors(n))
if len(pred) == 0:
output_tensor = n.create_tensor(train=train, seed=self.seed)
else:
tensor_list = list()
for s_i in pred:
tmp = self.create_tensor_aux(g, s_i, train=train)
if type(tmp) is list:
tensor_list.extend(tmp)
else:
tensor_list.append(tmp)
output_tensor = n.create_tensor(
tensor_list, train=train, seed=self.seed
)
return output_tensor
except TypeError:
raise RuntimeError(f"Failed to build tensors from :{n}")
@abc.abstractmethod
def choices(self):
"""Gives the possible choices for each decision variable of the search space.
Returns:
list: A list of tuple where each element corresponds to a discrete variable represented by ``(low, high)``.
"""
@abc.abstractmethod
def sample(self, choice=None):
"""Sample a ``tf.keras.Model`` from the search space.
Args:
choice (list, optional): A list of decision for the operations of this search space. Defaults to None, will generate a random sample.
Returns:
tf.keras.Model: A Tensorflow Keras model.
"""
@abc.abstractmethod
def build(self):
"""Build the current graph search space."""
| 7,301 | 30.747826 | 166 | py |
deephyper | deephyper-master/deephyper/nas/losses.py | """This module provides different loss functions. A loss can be defined by a keyword (str) or a callable following the ``tensorflow.keras`` interface. If it is a keyword it has to be available in ``tensorflow.keras`` or in ``deephyper.losses``. The loss functions availble in ``deephyper.losses`` are:
* Negative Log Likelihood (compatible with Tensorflow Probability): ``tfp_negloglik`` or ``tfp_nll``
"""
from collections import OrderedDict
import tensorflow as tf
from deephyper.core.utils import load_attr
def tfp_negloglik(y, rv_y):
"""Negative log likelihood for Tensorflow probability."""
return -rv_y.log_prob(y)
losses_func = OrderedDict()
losses_func["tfp_negloglik"] = losses_func["tfp_nll"] = tfp_negloglik
losses_obj = OrderedDict()
def selectLoss(name: str):
"""Return the loss defined by name.
Args:
name (str): a string referenced in DeepHyper, one referenced in keras or an attribute name to import.
Returns:
str or callable: a string suppossing it is referenced in the keras framework or a callable taking (y_true, y_pred) as inputs and returning a tensor.
"""
if callable(name):
return name
if losses_func.get(name) is None and losses_obj.get(name) is None:
try:
loaded_obj = load_attr(name)
return loaded_obj
except Exception:
return tf.keras.losses.get(
name
) # supposing it is referenced in keras losses
else:
if name in losses_func:
return losses_func[name]
else:
return losses_obj[name]()
| 1,605 | 34.688889 | 301 | py |
deephyper | deephyper-master/deephyper/nas/node.py | """This module provides the available node types to build a ``KSearchSpace``.
"""
import tensorflow as tf
import deephyper.core.exceptions
from deephyper.nas.operation import Operation
class Node:
"""Represents a node of a ``KSearchSpace``.
Args:
name (str): node name.
"""
# Number of 'Node' instances created
num = 0
def __init__(self, name="", *args, **kwargs):
Node.num += 1
self._num = Node.num
self._tensor = None
self.name = name
def __str__(self):
return f"{self.name}[id={self._num}]"
@property
def id(self):
return self._num
@property
def op(self):
raise NotImplementedError
def create_tensor(self, *args, **kwargs):
raise NotImplementedError
@staticmethod
def verify_operation(op):
if isinstance(op, Operation):
return op
elif isinstance(op, tf.keras.layers.Layer):
return Operation(op)
else:
raise RuntimeError(
f"Can't add this operation '{op.__name__}'. An operation should be either of type Operation or tf.keras.layers.Layer when is of type: {type(op)}"
)
class OperationNode(Node):
def __init__(self, name="", *args, **kwargs):
super().__init__(name=name, *args, **kwargs)
def create_tensor(self, inputs=None, train=True, seed=None, **kwargs):
if self._tensor is None:
if inputs is None:
try:
self._tensor = self.op(train=train, seed=None)
except TypeError:
raise RuntimeError(
f'Verify if node: "{self}" has incoming connexions!'
)
else:
self._tensor = self.op(inputs, train=train)
return self._tensor
class VariableNode(OperationNode):
"""This class represents a node of a graph where you have a set of possible operations. It means the agent will have to act to choose one of these operations.
>>> import tensorflow as tf
>>> from deephyper.nas.space.node import VariableNode
>>> vnode = VariableNode("VNode1")
>>> from deephyper.nas.space.op.op1d import Dense
>>> vnode.add_op(Dense(
... units=10,
... activation=tf.nn.relu))
>>> vnode.num_ops
1
>>> vnode.add_op(Dense(
... units=1000,
... activation=tf.nn.tanh))
>>> vnode.num_ops
2
>>> vnode.set_op(0)
>>> vnode.op.units
10
Args:
name (str): node name.
"""
def __init__(self, name=""):
super().__init__(name=name)
self._ops = list()
self._index = None
def __str__(self):
if self._index is not None:
return f"{super().__str__()}(Variable[{str(self.op)}])"
else:
return f"{super().__str__()}(Variable[?])"
def add_op(self, op):
self._ops.append(self.verify_operation(op))
@property
def num_ops(self):
return len(self._ops)
def set_op(self, index):
self.get_op(index).init(self)
def get_op(self, index):
assert "float" in str(type(index)) or "int" in str(
type(index)
), f"found type is : {type(index)}"
if "float" in str(type(index)):
self._index = self.denormalize(index)
else:
assert 0 <= index and index < len(
self._ops
), f"Number of possible operations is: {len(self._ops)}, but index given is: {index} (index starts from 0)!"
self._index = index
return self.op
def denormalize(self, index):
"""Denormalize a normalized index to get an absolute indexes. Useful when you want to compare the number of different search_spaces.
Args:
indexes (float|int): a normalized index.
Returns:
int: An absolute indexes corresponding to the operation choosen with the relative index of `index`.
"""
if type(index) is int:
return index
else:
assert 0.0 <= index and index <= 1.0
res = int(index * len(self._ops))
if index == 1.0:
res -= 1
return res
@property
def op(self):
if len(self._ops) == 0:
raise RuntimeError("This VariableNode doesn't have any operation yet.")
elif self._index is None:
raise RuntimeError(
'This VariableNode doesn\'t have any set operation, please use "set_op(index)" if you want to set one'
)
else:
return self._ops[self._index]
@property
def ops(self):
return self._ops
class ConstantNode(OperationNode):
"""A ConstantNode represents a node with a fixed operation. It means the agent will not make any new decision for this node. The common use case for this node is to add a tensor in the graph.
>>> import tensorflow as tf
>>> from deephyper.nas.space.node import ConstantNode
>>> from deephyper.nas.space.op.op1d import Dense
>>> cnode = ConstantNode(op=Dense(units=100, activation=tf.nn.relu), name='CNode1')
>>> cnode.op
Dense_100_relu
Args:
op (Operation, optional): operation to fix for this node. Defaults to None.
name (str, optional): node name. Defaults to ``''``.
"""
def __init__(self, op=None, name="", *args, **kwargs):
super().__init__(name=name)
if op is not None:
op = self.verify_operation(op)
op.init(self) # set operation
self._op = op
def set_op(self, op):
op = self.verify_operation(op)
op.init(self)
self._op = op
def __str__(self):
return f"{super().__str__()}(Constant[{str(self.op)}])"
@property
def op(self):
return self._op
class MirrorNode(OperationNode):
"""A MirrorNode is a node which reuse an other, it enable the reuse of tf.keras layers. This node will not add operations to choose.
Args:
node (Node): The targeted node to mirror.
>>> from deephyper.nas.space.node import VariableNode, MirrorNode
>>> from deephyper.nas.space.op.op1d import Dense
>>> vnode = VariableNode()
>>> vnode.add_op(Dense(10))
>>> vnode.add_op(Dense(20))
>>> mnode = MirrorNode(vnode)
>>> vnode.set_op(0)
>>> vnode.op
Dense_10
>>> mnode.op
Dense_10
"""
def __init__(self, node):
super().__init__(name=f"Mirror[{str(node)}]")
self._node = node
@property
def op(self):
return self._node.op
class MimeNode(OperationNode):
"""A MimeNode is a node which reuse an the choice made for an VariableNode, it enable the definition of a Cell based search_space. This node reuse the operation from the mimed VariableNode but only the choice made.
Args:
node (VariableNode): the VariableNode to mime.
>>> from deephyper.nas.space.node import VariableNode, MimeNode
>>> from deephyper.nas.space.op.op1d import Dense
>>> vnode = VariableNode()
>>> vnode.add_op(Dense(10))
>>> vnode.add_op(Dense(20))
>>> mnode = MimeNode(vnode)
>>> mnode.add_op(Dense(30))
>>> mnode.add_op(Dense(40))
>>> vnode.set_op(0)
>>> vnode.op
Dense_10
>>> mnode.op
Dense_30
"""
def __init__(self, node, name=""):
super().__init__(name=f"Mime[{name}][src={str(node)}]")
self.node = node
self._ops = list()
def add_op(self, op):
self._ops.append(self.verify_operation(op))
@property
def num_ops(self):
return len(self._ops)
def set_op(self):
if self.node._index is None:
raise deephyper.core.exceptions.DeephyperRuntimeError(
f"{str(self)} cannot be initialized because its source {str(self.node)} is not initialized!"
)
self._ops[self.node._index].init(self)
@property
def op(self):
if self.num_ops != self.node.num_ops:
raise deephyper.core.exceptions.DeephyperRuntimeError(
f"{str(self)} and {str(self.node)} should have the same number of opertions, when {str(self)} has {self.num_ops} and {str(self.node)} has {self.node.num_ops}!"
)
else:
return self._ops[self.node._index]
@property
def ops(self):
return self._ops
| 8,363 | 29.086331 | 218 | py |
deephyper | deephyper-master/deephyper/nas/_keras_search_space.py | import copy
import logging
import warnings
import networkx as nx
import numpy as np
import tensorflow as tf
from deephyper.core.exceptions.nas.space import (
InputShapeOfWrongType,
WrongSequenceToSetOperations,
)
from deephyper.nas._nx_search_space import NxSearchSpace
from deephyper.nas.node import ConstantNode
from deephyper.nas.operation import Tensor
from tensorflow import keras
from tensorflow.python.keras.utils.vis_utils import model_to_dot
logger = logging.getLogger(__name__)
class KSearchSpace(NxSearchSpace):
"""A KSearchSpace represents a search space of neural networks.
>>> import tensorflow as tf
>>> from deephyper.nas import KSearchSpace
>>> from deephyper.nas.node import ConstantNode, VariableNode
>>> from deephyper.nas.operation import operation, Identity
>>> Dense = operation(tf.keras.layers.Dense)
>>> Dropout = operation(tf.keras.layers.Dropout)
>>> class ExampleSpace(KSearchSpace):
... def build(self):
... # input nodes are automatically built based on `input_shape`
... input_node = self.input_nodes[0]
... # we want 4 layers maximum (Identity corresponds to not adding a layer)
... for i in range(4):
... node = VariableNode()
... self.connect(input_node, node)
... # we add 3 possible operations for each node
... node.add_op(Identity())
... node.add_op(Dense(100, "relu"))
... node.add_op(Dropout(0.2))
... input_node = node
... output = ConstantNode(op=Dense(self.output_shape[0]))
... self.connect(input_node, output)
... return self
...
>>>
>>> space = ExampleSpace(input_shape=(1,), output_shape=(1,)).build()
>>> space.sample().summary()
Args:
input_shape (list(tuple(int))): list of shapes of all inputs.
output_shape (tuple(int)): shape of output.
batch_size (list(tuple(int))): batch size of the input layer. If ``input_shape`` is defining a list of inputs, ``batch_size`` should also define a list of inputs.
Raises:
InputShapeOfWrongType: [description]
"""
def __init__(
self, input_shape, output_shape, batch_size=None, seed=None, *args, **kwargs
):
super().__init__()
self._random = np.random.RandomState(seed)
self.input_shape = input_shape
if type(input_shape) is tuple:
# we have only one input tensor here
op = Tensor(
keras.layers.Input(input_shape, name="input_0", batch_size=batch_size)
)
self.input_nodes = [ConstantNode(op=op, name="Input_0")]
elif type(input_shape) is list and all(
map(lambda x: type(x) is tuple, input_shape)
):
# we have a list of input tensors here
self.input_nodes = list()
for i in range(len(input_shape)):
batch_size = batch_size[i] if type(batch_size) is list else None
op = Tensor(
keras.layers.Input(
input_shape[i], name=f"input_{i}", batch_size=batch_size
)
)
inode = ConstantNode(op=op, name=f"Input_{i}")
self.input_nodes.append(inode)
else:
raise InputShapeOfWrongType(input_shape)
for node in self.input_nodes:
self.graph.add_node(node)
self.output_shape = output_shape
self.output_node = None
self._model = None
@property
def input(self):
return self.input_nodes
@property
def output(self):
return self.output_node
@property
def depth(self):
if self._model is None:
raise RuntimeError("Can't compute depth of model without creating a model.")
return len(self.longest_path)
@property
def longest_path(self):
if self._model is None:
raise RuntimeError(
"Can't compute longest path of model without creating a model."
)
nx_graph = nx.drawing.nx_pydot.from_pydot(model_to_dot(self._model))
return nx.algorithms.dag.dag_longest_path(nx_graph)
def set_ops(self, indexes):
"""Set the operations for each node of each cell of the search_space.
:meta private:
Args:
indexes (list): element of list can be float in [0, 1] or int.
Raises:
WrongSequenceToSetOperations: raised when 'indexes' is of a wrong length.
"""
if len(indexes) != len(list(self.variable_nodes)):
raise WrongSequenceToSetOperations(indexes, list(self.variable_nodes))
for op_i, node in zip(indexes, self.variable_nodes):
node.set_op(op_i)
for node in self.mime_nodes:
node.set_op()
self.set_output_node()
def create_model(self):
"""Create the tensors corresponding to the search_space.
:meta private:
Returns:
A keras.Model for the current search_space with the corresponding set of operations.
"""
# !the output layer does not have to be of the same shape as the data
# !this depends on the loss
if type(self.output_node) is list:
output_tensors = [
self.create_tensor_aux(self.graph, out) for out in self.output_node
]
for out_T in output_tensors:
output_n = int(out_T.name.split("/")[0].split("_")[-1])
out_S = self.output_shape[output_n]
if tf.keras.backend.is_keras_tensor(out_T):
out_T_shape = out_T.type_spec.shape
if out_T_shape[1:] != out_S:
warnings.warn(
f"The output tensor of shape {out_T_shape} doesn't match the expected shape {out_S}!",
RuntimeWarning,
)
input_tensors = [inode._tensor for inode in self.input_nodes]
self._model = keras.Model(inputs=input_tensors, outputs=output_tensors)
else:
output_tensors = self.create_tensor_aux(self.graph, self.output_node)
if tf.keras.backend.is_keras_tensor(output_tensors):
output_tensors_shape = output_tensors.type_spec.shape
if output_tensors_shape[1:] != self.output_shape:
warnings.warn(
f"The output tensor of shape {output_tensors_shape} doesn't match the expected shape {self.output_shape}!",
RuntimeWarning,
)
input_tensors = [inode._tensor for inode in self.input_nodes]
self._model = keras.Model(inputs=input_tensors, outputs=[output_tensors])
return self._model
def choices(self):
"""Gives the possible choices for each decision variable of the search space.
Returns:
list: A list of tuple where each element corresponds to a discrete variable represented by ``(low, high)``.
"""
return [(0, vnode.num_ops - 1) for vnode in self.variable_nodes]
def sample(self, choice=None):
"""Sample a ``tf.keras.Model`` from the search space.
Args:
choice (list, optional): A list of decision for the operations of this search space. Defaults to None, will generate a random sample.
Returns:
tf.keras.Model: A Tensorflow Keras model.
"""
if choice is None:
choice = [self._random.randint(c[0], c[1] + 1) for c in self.choices()]
self_copy = copy.deepcopy(self)
self_copy.set_ops(choice)
model = self_copy.create_model()
return model
| 7,837 | 34.789954 | 170 | py |
deephyper | deephyper-master/deephyper/nas/metrics.py | """This module provides different metric functions. A metric can be defined by a keyword (str) or a callable. If it is a keyword it has to be available in ``tensorflow.keras`` or in ``deephyper.netrics``. The loss functions availble in ``deephyper.metrics`` are:
* Sparse Perplexity: ``sparse_perplexity``
* R2: ``r2``
* AUC ROC: ``auroc``
* AUC Precision-Recall: ``aucpr``
"""
import functools
from collections import OrderedDict
import tensorflow as tf
from deephyper.core.utils import load_attr
def r2(y_true, y_pred):
SS_res = tf.math.reduce_sum(tf.math.square(y_true - y_pred), axis=0)
SS_tot = tf.math.reduce_sum(
tf.math.square(y_true - tf.math.reduce_mean(y_true, axis=0)), axis=0
)
output_scores = 1 - SS_res / (SS_tot + tf.keras.backend.epsilon())
r2 = tf.math.reduce_mean(output_scores)
return r2
def mae(y_true, y_pred):
return tf.keras.metrics.mean_absolute_error(y_true, y_pred)
def mse(y_true, y_pred):
return tf.keras.metrics.mean_squared_error(y_true, y_pred)
def rmse(y_true, y_pred):
return tf.math.sqrt(tf.math.reduce_mean(tf.math.square(y_pred - y_true)))
def acc(y_true, y_pred):
return tf.keras.metrics.categorical_accuracy(y_true, y_pred)
def sparse_perplexity(y_true, y_pred):
cross_entropy = tf.keras.losses.sparse_categorical_crossentropy(y_true, y_pred)
perplexity = tf.pow(2.0, cross_entropy)
return perplexity
def to_tfp(metric_func):
"""Convert a regular tensorflow-keras metric for tensorflow probability where the output is a distribution.
Args:
metric_func (func): A regular tensorflow-keras metric function.
"""
@functools.wraps(metric_func)
def wrapper(y_true, y_pred):
return metric_func(y_true, y_pred.mean())
wrapper.__name__ = f"tfp_{metric_func.__name__}"
return wrapper
# convert some metrics for Tensorflow Probability where the output of the model is
# a distribution
tfp_r2 = to_tfp(r2)
tfp_mae = to_tfp(mae)
tfp_mse = to_tfp(mse)
tfp_rmse = to_tfp(rmse)
metrics_func = OrderedDict()
metrics_func["mean_absolute_error"] = metrics_func["mae"] = mae
metrics_func["r2"] = r2
metrics_func["mean_squared_error"] = metrics_func["mse"] = mse
metrics_func["root_mean_squared_error"] = metrics_func["rmse"] = rmse
metrics_func["accuracy"] = metrics_func["acc"] = acc
metrics_func["sparse_perplexity"] = sparse_perplexity
metrics_func["tfp_r2"] = tfp_r2
metrics_func["tfp_mse"] = tfp_mse
metrics_func["tfp_mae"] = tfp_mae
metrics_func["tfp_rmse"] = tfp_rmse
metrics_obj = OrderedDict()
metrics_obj["auroc"] = lambda: tf.keras.metrics.AUC(name="auroc", curve="ROC")
metrics_obj["aucpr"] = lambda: tf.keras.metrics.AUC(name="aucpr", curve="PR")
def selectMetric(name: str):
"""Return the metric defined by name.
Args:
name (str): a string referenced in DeepHyper, one referenced in keras or an attribute name to import.
Returns:
str or callable: a string suppossing it is referenced in the keras framework or a callable taking (y_true, y_pred) as inputs and returning a tensor.
"""
if callable(name):
return name
if metrics_func.get(name) is None and metrics_obj.get(name) is None:
try:
return load_attr(name)
except Exception:
return name # supposing it is referenced in keras metrics
else:
if name in metrics_func:
return metrics_func[name]
else:
return metrics_obj[name]()
| 3,474 | 31.175926 | 262 | py |
deephyper | deephyper-master/deephyper/nas/__init__.py | from ._nx_search_space import NxSearchSpace
from ._keras_search_space import KSearchSpace
__all__ = ["NxSearchSpace", "KSearchSpace"]
| 135 | 26.2 | 45 | py |
deephyper | deephyper-master/deephyper/nas/trainer/_utils.py | from collections import OrderedDict
import tensorflow as tf
optimizers_keras = OrderedDict()
optimizers_keras["sgd"] = tf.keras.optimizers.SGD
optimizers_keras["rmsprop"] = tf.keras.optimizers.RMSprop
optimizers_keras["adagrad"] = tf.keras.optimizers.Adagrad
optimizers_keras["adam"] = tf.keras.optimizers.Adam
optimizers_keras["adadelta"] = tf.keras.optimizers.Adadelta
optimizers_keras["adamax"] = tf.keras.optimizers.Adamax
optimizers_keras["nadam"] = tf.keras.optimizers.Nadam
def selectOptimizer_keras(name):
"""Return the optimizer defined by name."""
if optimizers_keras.get(name) is None:
raise RuntimeError('"{0}" is not a defined optimizer for keras.'.format(name))
else:
return optimizers_keras[name]
def check_data_config(data_dict):
gen_keys = ["train_gen", "train_size", "valid_gen", "valid_size", "types", "shapes"]
ndarray_keys = ["train_X", "train_Y", "valid_X", "valid_Y"]
if all([k in data_dict.keys() for k in gen_keys]):
return "gen"
elif all([k in data_dict.keys() for k in ndarray_keys]):
return "ndarray"
else:
raise RuntimeError("Wrong data config...")
| 1,156 | 35.15625 | 88 | py |
deephyper | deephyper-master/deephyper/nas/trainer/_horovod.py | import logging
import time
from inspect import signature
import deephyper.nas.trainer._arch as a
import deephyper.nas.trainer._utils as U
import horovod.tensorflow.keras as hvd
import numpy as np
import tensorflow as tf
from deephyper.core.exceptions import DeephyperRuntimeError
from deephyper.nas.losses import selectLoss
from deephyper.nas.metrics import selectMetric
logger = logging.getLogger(__name__)
AUTOTUNE = tf.data.experimental.AUTOTUNE
class HorovodTrainer:
def __init__(self, config, model):
self.cname = self.__class__.__name__
self.config = config
self.model = model
self.callbacks = []
self.data = self.config[a.data]
# hyperparameters
self.config_hp = self.config[a.hyperparameters]
self.optimizer_name = self.config_hp.get(a.optimizer, "adam")
self.optimizer_eps = self.config_hp.get("epsilon", None)
self.batch_size = self.config_hp.get(a.batch_size, 32)
self.clipvalue = self.config_hp.get("clipvalue", None)
self.learning_rate = self.config_hp.get(a.learning_rate, 1e-3)
# augmentation strategy
if not self.config.get("augment", None) is None:
if not self.config["augment"].get("kwargs", None) is None:
self.augment_func = lambda inputs, outputs: self.config["augment"][
"func"
](inputs, outputs, **self.config["augment"]["kwargs"])
else:
self.augment_func = self.config["augment"]["func"]
self.num_epochs = self.config_hp[a.num_epochs]
self.verbose = (
self.config_hp.get("verbose", 1)
if self.config_hp.get("verbose", 1) and hvd.rank() == 0
else 0
)
self.setup_losses_and_metrics()
# DATA loading
self.data_config_type = None
self.train_size = None
self.valid_size = None
self.train_steps_per_epoch = None
self.valid_steps_per_epoch = None
self.load_data()
# DATA preprocessing
self.preprocessing_func = None
if self.config.get("preprocessing"):
self.preprocessing_func = self.config["preprocessing"]["func"]
self.preprocessor = None
self.preprocess_data()
# Dataset
self.dataset_train = None
self.set_dataset_train()
self.dataset_valid = None
self.set_dataset_valid()
self.model_compile()
self.train_history = None
self.init_history()
# Test on validation after each epoch
if self.verbose == 1:
logger.info("KerasTrainer instantiated")
model.summary(print_fn=logger.info)
def init_history(self):
self.train_history = dict()
self.train_history["n_parameters"] = self.model.count_params()
def setup_losses_and_metrics(self):
def selectL(loss):
if type(loss) is dict:
loss = {k: selectLoss(v) for k, v in loss.items()}
else:
loss = selectLoss(loss)
return loss
self.loss_metrics = selectL(self.config[a.loss_metric])
self.loss_weights = self.config.get("loss_weights")
self.class_weights = self.config.get("class_weights")
if self.loss_weights is None and type(self.loss_metrics) is dict:
self.loss_weights = [1.0 for _ in range(len(self.loss_metrics))]
if type(self.config[a.metrics]) is list:
self.metrics_name = [selectMetric(m) for m in self.config[a.metrics]]
else:
def selectM(metric):
if type(metric) is list:
return [selectMetric(m_i) for m_i in metric]
else:
return selectMetric(metric)
self.metrics_name = {
n: selectM(m) for n, m in self.config[a.metrics].items()
}
def load_data(self):
logger.debug("load_data")
self.data_config_type = U.check_data_config(self.data)
logger.debug(f"data config type: {self.data_config_type}")
if self.data_config_type == "gen":
self.load_data_generator()
elif self.data_config_type == "ndarray":
self.load_data_ndarray()
else:
raise DeephyperRuntimeError(
f"Data config is not supported by this Trainer: '{self.data_config_type}'!"
)
# prepare number of steps for training and validation
self.train_steps_per_epoch = self.train_size // self.batch_size
if self.train_steps_per_epoch * self.batch_size < self.train_size:
self.train_steps_per_epoch += 1
self.valid_steps_per_epoch = self.valid_size // self.batch_size
if self.valid_steps_per_epoch * self.batch_size < self.valid_size:
self.valid_steps_per_epoch += 1
self.train_steps_per_epoch //= hvd.size()
self.valid_steps_per_epoch //= hvd.size()
def load_data_generator(self):
self.train_gen = self.data["train_gen"]
self.valid_gen = self.data["valid_gen"]
self.data_types = self.data["types"]
self.data_shapes = self.data["shapes"]
self.train_size = self.data["train_size"]
self.valid_size = self.data["valid_size"]
def load_data_ndarray(self):
def f(x):
return type(x) is np.ndarray
# check data type
# Output data
if (
type(self.config[a.data][a.train_Y]) is np.ndarray
and type(self.config[a.data][a.valid_Y]) is np.ndarray
):
self.train_Y = self.config[a.data][a.train_Y]
self.valid_Y = self.config[a.data][a.valid_Y]
elif (
type(self.config[a.data][a.train_Y]) is list
and type(self.config[a.data][a.valid_Y]) is list
):
if not all(map(f, self.config[a.data][a.train_Y])) or not all(
map(f, self.config[a.data][a.valid_Y])
):
raise DeephyperRuntimeError(
"all outputs data should be of type np.ndarray !"
)
if (
len(self.config[a.data][a.train_Y]) > 1
and len(self.config[a.data][a.valid_Y]) > 1
):
self.train_Y = self.config[a.data][a.train_Y]
self.valid_Y = self.config[a.data][a.valid_Y]
else:
self.train_Y = self.config[a.data][a.train_Y][0]
self.valid_Y = self.config[a.data][a.valid_Y][0]
else:
raise DeephyperRuntimeError(
f"Data are of an unsupported type and should be of same type: type(self.config['data']['train_Y'])=={type(self.config[a.data][a.train_Y])} and type(self.config['data']['valid_Y'])=={type(self.config[a.valid_Y][a.valid_X])} !"
)
# Input data
if (
type(self.config[a.data][a.train_X]) is np.ndarray
and type(self.config[a.data][a.valid_X]) is np.ndarray
):
self.train_X = [self.config[a.data][a.train_X]]
self.valid_X = [self.config[a.data][a.valid_X]]
elif (
type(self.config[a.data][a.train_X]) is list
and type(self.config[a.data][a.valid_X]) is list
):
if not all(map(f, self.config[a.data][a.train_X])) or not all(
map(f, self.config[a.data][a.valid_X])
):
raise DeephyperRuntimeError(
"all inputs data should be of type np.ndarray !"
)
if (
len(self.config[a.data][a.train_X]) > 1
and len(self.config[a.data][a.valid_X]) > 1
):
self.train_X = self.config[a.data][a.train_X]
self.valid_X = self.config[a.data][a.valid_X]
else:
self.train_X = self.config[a.data][a.train_X][0]
self.valid_X = self.config[a.data][a.valid_X][0]
else:
raise DeephyperRuntimeError(
f"Data are of an unsupported type and should be of same type: type(self.config['data']['train_X'])=={type(self.config[a.data][a.train_X])} and type(self.config['data']['valid_X'])=={type(self.config[a.data][a.valid_X])} !"
)
# check data length
self.train_size = np.shape(self.train_X[0])[0]
if not all(map(lambda x: np.shape(x)[0] == self.train_size, self.train_X)):
raise DeephyperRuntimeError(
"All training inputs data should have same length!"
)
self.valid_size = np.shape(self.valid_X[0])[0]
if not all(map(lambda x: np.shape(x)[0] == self.valid_size, self.valid_X)):
raise DeephyperRuntimeError(
"All validation inputs data should have same length!"
)
def preprocess_data(self):
if self.data_config_type == "gen":
return
if self.preprocessor is not None:
raise DeephyperRuntimeError("You can only preprocess data one time.")
if self.preprocessing_func:
logger.debug(f"preprocess_data with: {str(self.preprocessing_func)}")
if len(np.shape(self.train_Y)) == 2:
data_train = np.concatenate((*self.train_X, self.train_Y), axis=1)
data_valid = np.concatenate((*self.valid_X, self.valid_Y), axis=1)
data = np.concatenate((data_train, data_valid), axis=0)
self.preprocessor = self.preprocessing_func()
dt_shp = np.shape(data_train)
tX_shp = [np.shape(x) for x in self.train_X]
preproc_data = self.preprocessor.fit_transform(data)
acc, self.train_X = 0, list()
for shp in tX_shp:
self.train_X.append(preproc_data[: dt_shp[0], acc : acc + shp[1]])
acc += shp[1]
self.train_Y = preproc_data[: dt_shp[0], acc:]
acc, self.valid_X = 0, list()
for shp in tX_shp:
self.valid_X.append(preproc_data[dt_shp[0] :, acc : acc + shp[1]])
acc += shp[1]
self.valid_Y = preproc_data[dt_shp[0] :, acc:]
else:
logger.info("no preprocessing function")
def set_dataset_train(self):
if self.data_config_type == "ndarray":
if type(self.train_Y) is list:
output_mapping = {
f"output_{i}": tY for i, tY in enumerate(self.train_Y)
}
else:
output_mapping = self.train_Y
self.dataset_train = tf.data.Dataset.from_tensor_slices(
(
{f"input_{i}": tX for i, tX in enumerate(self.train_X)},
output_mapping,
)
)
else: # self.data_config_type == "gen"
self.dataset_train = tf.data.Dataset.from_generator(
self.train_gen,
output_types=self.data_types,
output_shapes=(
{
f"input_{i}": tf.TensorShape(
[*self.data_shapes[0][f"input_{i}"]]
)
for i in range(len(self.data_shapes[0]))
},
tf.TensorShape([*self.data_shapes[1]]),
),
)
self.dataset_train = self.dataset_train.shard(
num_shards=hvd.size(), index=hvd.rank()
)
self.dataset_train = self.dataset_train.shuffle(
self.train_size // hvd.size(), reshuffle_each_iteration=True
)
self.dataset_train = self.dataset_train.repeat(self.num_epochs)
if hasattr(self, "augment_func"):
logger.info("Data augmentation set.")
self.dataset_train = self.dataset_train.map(
self.augment_func, num_parallel_calls=AUTOTUNE
)
self.dataset_train = self.dataset_train.batch(self.batch_size)
self.dataset_train = self.dataset_train.prefetch(AUTOTUNE)
# self.dataset_train = self.dataset_train.repeat()
def set_dataset_valid(self):
if self.data_config_type == "ndarray":
if type(self.valid_Y) is list:
output_mapping = {
f"output_{i}": vY for i, vY in enumerate(self.valid_Y)
}
else:
output_mapping = self.valid_Y
self.dataset_valid = tf.data.Dataset.from_tensor_slices(
(
{f"input_{i}": vX for i, vX in enumerate(self.valid_X)},
output_mapping,
)
)
else:
self.dataset_valid = tf.data.Dataset.from_generator(
self.valid_gen,
output_types=self.data_types,
output_shapes=(
{
f"input_{i}": tf.TensorShape(
[*self.data_shapes[0][f"input_{i}"]]
)
for i in range(len(self.data_shapes[0]))
},
tf.TensorShape([*self.data_shapes[1]]),
),
)
self.dataset_valid = self.dataset_valid.batch(self.batch_size).repeat()
def model_compile(self):
optimizer_fn = U.selectOptimizer_keras(self.optimizer_name)
opti_parameters = signature(optimizer_fn).parameters
params = {}
# "lr" and "learning_rate" is checked depending if Keras or Tensorflow optimizer is used
if "lr" in opti_parameters:
params["lr"] = self.learning_rate
elif "learning_rate" in opti_parameters:
params["learning_rate"] = self.learning_rate
else:
raise DeephyperRuntimeError(
f"The learning_rate parameter is not found amoung optimiser arguments: {opti_parameters}"
)
if "epsilon" in opti_parameters:
params["epsilon"] = self.optimizer_eps
if self.clipvalue is not None:
params["clipvalue"] = self.clipvalue
self.optimizer = hvd.DistributedOptimizer(optimizer_fn(**params))
if type(self.loss_metrics) is dict:
self.model.compile(
optimizer=self.optimizer,
loss=self.loss_metrics,
loss_weights=self.loss_weights,
metrics=self.metrics_name,
)
else:
self.model.compile(
optimizer=self.optimizer,
loss=self.loss_metrics,
metrics=self.metrics_name,
)
def predict(self, dataset: str = "valid", keep_normalize: bool = False) -> tuple:
"""[summary]
Args:
dataset (str, optional): 'valid' or 'train'. Defaults to 'valid'.
keep_normalize (bool, optional): if False then the preprocessing will be reversed after prediction. if True nothing will be reversed. Defaults to False.
Raises:
DeephyperRuntimeError: [description]
Returns:
tuple: (y_true, y_pred)
"""
if not (dataset == "valid" or dataset == "train"):
raise DeephyperRuntimeError(
"dataset parameter should be equal to: 'valid' or 'train'"
)
if dataset == "valid":
valid_steps = self.valid_size // self.batch_size
if valid_steps * self.batch_size < self.valid_size:
valid_steps += 1
y_pred = self.model.predict(self.dataset_valid, steps=valid_steps)
else: # dataset == 'train'
y_pred = self.model.predict(
self.dataset_train, steps=self.train_steps_per_epoch
)
if (
self.preprocessing_func
and not keep_normalize
and not self.data_config_type == "gen"
):
if dataset == "valid":
data_X, data_Y = self.valid_X, self.valid_Y
else: # dataset == 'train'
data_X, data_Y = self.train_X, self.train_Y
val_pred = np.concatenate((*data_X, y_pred), axis=1)
val_orig = np.concatenate((*data_X, data_Y), axis=1)
val_pred_trans = self.preprocessor.inverse_transform(val_pred)
val_orig_trans = self.preprocessor.inverse_transform(val_orig)
y_orig = val_orig_trans[:, -np.shape(data_Y)[1] :]
y_pred = val_pred_trans[:, -np.shape(data_Y)[1] :]
else:
if self.data_config_type == "ndarray":
y_orig = self.valid_Y if dataset == "valid" else self.train_Y
else:
gen = self.valid_gen() if dataset == "valid" else self.train_gen()
y_orig = np.array([e[-1] for e in gen])
return y_orig, y_pred
def evaluate(self, dataset="train"):
"""Evaluate the performance of your model for the same configuration.
Args:
dataset (str, optional): must be "train" or "valid". If "train" then metrics will be evaluated on the training dataset. If "valid" then metrics will be evaluated on the "validation" dataset. Defaults to 'train'.
Returns:
list: a list of scalar values corresponding do config loss & metrics.
"""
if dataset == "train":
return self.model.evaluate(
self.dataset_train, steps=self.train_steps_per_epoch
)
else:
return self.model.evaluate(
self.dataset_valid, steps=self.valid_steps_per_epoch
)
def train(
self, num_epochs: int = None, with_pred: bool = False, last_only: bool = False
):
"""Train the model.
Args:
num_epochs (int, optional): override the num_epochs passed to init the Trainer. Defaults to None, will use the num_epochs passed to init the Trainer.
with_pred (bool, optional): will compute a prediction after the training and will add ('y_true', 'y_pred') to the output history. Defaults to False, will skip it (use it to save compute time).
last_only (bool, optional): will compute metrics after the last epoch only. Defaults to False, will compute metrics after each training epoch (use it to save compute time).
Raises:
DeephyperRuntimeError: raised when the ``num_epochs < 0``.
Returns:
dict: a dictionnary corresponding to the training.
"""
num_epochs = self.num_epochs if num_epochs is None else num_epochs
self.init_history()
if num_epochs > 0:
time_start_training = time.time() # TIMING
if not last_only:
logger.info(
"Trainer is computing metrics on validation after each training epoch."
)
history = self.model.fit(
self.dataset_train,
verbose=self.verbose,
epochs=num_epochs,
steps_per_epoch=self.train_steps_per_epoch,
callbacks=self.callbacks,
validation_data=self.dataset_valid,
validation_steps=self.valid_steps_per_epoch,
)
else:
logger.info(
"Trainer is computing metrics on validation after the last training epoch."
)
if num_epochs > 1:
self.model.fit(
self.dataset_train,
verbose=self.verbose,
epochs=num_epochs - 1,
steps_per_epoch=self.train_steps_per_epoch,
callbacks=self.callbacks,
)
history = self.model.fit(
self.dataset_train,
epochs=1,
verbose=self.verbose,
steps_per_epoch=self.train_steps_per_epoch,
callbacks=self.callbacks,
validation_data=self.dataset_valid,
validation_steps=self.valid_steps_per_epoch,
)
time_end_training = time.time() # TIMING
self.train_history["training_time"] = (
time_end_training - time_start_training
)
self.train_history.update(history.history)
elif num_epochs < 0:
raise DeephyperRuntimeError(
f"Trainer: number of epochs should be >= 0: {num_epochs}"
)
if with_pred:
time_start_predict = time.time()
y_true, y_pred = self.predict(dataset="valid")
time_end_predict = time.time()
self.train_history["val_predict_time"] = (
time_end_predict - time_start_predict
)
self.train_history["y_true"] = y_true
self.train_history["y_pred"] = y_pred
return self.train_history
| 21,070 | 37.733456 | 241 | py |
deephyper | deephyper-master/deephyper/nas/trainer/_base.py | import inspect
import logging
import time
from inspect import signature
import deephyper.nas.trainer._arch as a
import deephyper.nas.trainer._utils as U
import numpy as np
import tensorflow as tf
from deephyper.core.exceptions import DeephyperRuntimeError
from deephyper.nas.losses import selectLoss
from deephyper.nas.metrics import selectMetric
logger = logging.getLogger(__name__)
class BaseTrainer:
def __init__(self, config, model):
self.cname = self.__class__.__name__
self.config = config
self.model = model
self.callbacks = []
self.data = self.config[a.data]
self.config_hp = self.config[a.hyperparameters]
self.optimizer_name = self.config_hp.get(a.optimizer, "adam")
self.optimizer_eps = self.config_hp.get("epsilon", None)
self.batch_size = self.config_hp.get(a.batch_size, 32)
self.learning_rate = self.config_hp.get(a.learning_rate, 1e-3)
self.num_epochs = self.config_hp.get(a.num_epochs, 1)
self.shuffle_data = self.config_hp.get(a.shuffle_data, True)
self.cache_data = self.config_hp.get(a.cache_data, True)
self.batch = self.config_hp.get("batch", True)
self.momentum = self.config_hp.get("momentum", 0.0)
self.nesterov = self.config_hp.get("nesterov", False)
self.label_smoothing = self.config_hp.get("label_smoothing", 0.0)
self.verbose = self.config_hp.get("verbose", 1)
# self.balanced = self.config_hp.get("balanced", False)
self.setup_losses_and_metrics()
# DATA loading
self.data_config_type = None
self.train_size = None
self.valid_size = None
self.train_steps_per_epoch = None
self.valid_steps_per_epoch = None
self.load_data()
# DATA preprocessing
self.preprocessing_func = None
if self.config.get("preprocessing"):
self.preprocessing_func = self.config["preprocessing"]["func"]
self.preprocessor = None
self.preprocess_data()
# Dataset
self.dataset_train = None
self.set_dataset_train()
self.dataset_valid = None
self.set_dataset_valid()
self.model_compile()
self.train_history = None
self.init_history()
# Test on validation after each epoch
if self.verbose == 1:
logger.info("KerasTrainer instantiated")
model.summary(print_fn=logger.info)
def init_history(self):
self.train_history = dict()
self.train_history["n_parameters"] = self.model.count_params()
def _select_loss(self, loss):
if type(loss) is dict:
loss = {k: selectLoss(v) for k, v in loss.items()}
else:
loss = selectLoss(loss)
if inspect.isclass(loss):
loss_parameters = signature(loss).parameters
params = {}
if "label_smoothing" in loss_parameters:
params["label_smoothing"] = self.label_smoothing
loss = loss(**params)
return loss
def setup_losses_and_metrics(self):
self.loss_metrics = self._select_loss(self.config[a.loss_metric])
self.loss_weights = self.config.get("loss_weights")
self.class_weights = self.config.get("class_weights")
if self.loss_weights is None and type(self.loss_metrics) is dict:
self.loss_weights = [1.0 for _ in range(len(self.loss_metrics))]
if type(self.config[a.metrics]) is list:
self.metrics_name = [selectMetric(m) for m in self.config[a.metrics]]
else:
def selectM(metric):
if type(metric) is list:
return [selectMetric(m_i) for m_i in metric]
else:
return selectMetric(metric)
self.metrics_name = {
n: selectM(m) for n, m in self.config[a.metrics].items()
}
def load_data(self):
logger.debug("load_data")
self.data_config_type = U.check_data_config(self.data)
logger.debug(f"data config type: {self.data_config_type}")
if self.data_config_type == "gen":
self.load_data_generator()
elif self.data_config_type == "ndarray":
self.load_data_ndarray()
else:
raise DeephyperRuntimeError(
f"Data config is not supported by this Trainer: '{self.data_config_type}'!"
)
# prepare number of steps for training and validation
self.train_steps_per_epoch = self.train_size // self.batch_size
if self.train_steps_per_epoch * self.batch_size < self.train_size:
self.train_steps_per_epoch += 1
self.valid_steps_per_epoch = self.valid_size // self.batch_size
if self.valid_steps_per_epoch * self.batch_size < self.valid_size:
self.valid_steps_per_epoch += 1
def load_data_generator(self):
self.train_gen = self.data["train_gen"]
self.valid_gen = self.data["valid_gen"]
self.data_types = self.data["types"]
self.data_shapes = self.data["shapes"]
self.train_size = self.data["train_size"]
self.valid_size = self.data["valid_size"]
def load_data_ndarray(self):
def f(x):
return type(x) is np.ndarray
# check data type
# Output data
if (
type(self.config[a.data][a.train_Y]) is np.ndarray
and type(self.config[a.data][a.valid_Y]) is np.ndarray
):
self.train_Y = self.config[a.data][a.train_Y]
self.valid_Y = self.config[a.data][a.valid_Y]
elif (
type(self.config[a.data][a.train_Y]) is list
and type(self.config[a.data][a.valid_Y]) is list
):
if not all(map(f, self.config[a.data][a.train_Y])) or not all(
map(f, self.config[a.data][a.valid_Y])
):
raise DeephyperRuntimeError(
"all outputs data should be of type np.ndarray !"
)
if (
len(self.config[a.data][a.train_Y]) > 1
and len(self.config[a.data][a.valid_Y]) > 1
):
self.train_Y = self.config[a.data][a.train_Y]
self.valid_Y = self.config[a.data][a.valid_Y]
else:
self.train_Y = self.config[a.data][a.train_Y][0]
self.valid_Y = self.config[a.data][a.valid_Y][0]
else:
raise DeephyperRuntimeError(
f"Data are of an unsupported type and should be of same type: type(self.config['data']['train_Y'])=={type(self.config[a.data][a.train_Y])} and type(self.config['data']['valid_Y'])=={type(self.config[a.valid_Y][a.valid_X])} !"
)
# Input data
if (
type(self.config[a.data][a.train_X]) is np.ndarray
and type(self.config[a.data][a.valid_X]) is np.ndarray
):
self.train_X = [self.config[a.data][a.train_X]]
self.valid_X = [self.config[a.data][a.valid_X]]
elif (
type(self.config[a.data][a.train_X]) is list
and type(self.config[a.data][a.valid_X]) is list
):
if not all(map(f, self.config[a.data][a.train_X])) or not all(
map(f, self.config[a.data][a.valid_X])
):
raise DeephyperRuntimeError(
"all inputs data should be of type np.ndarray !"
)
if (
len(self.config[a.data][a.train_X]) > 1
and len(self.config[a.data][a.valid_X]) > 1
):
self.train_X = self.config[a.data][a.train_X]
self.valid_X = self.config[a.data][a.valid_X]
else:
self.train_X = self.config[a.data][a.train_X][0]
self.valid_X = self.config[a.data][a.valid_X][0]
else:
raise DeephyperRuntimeError(
f"Data are of an unsupported type and should be of same type: type(self.config['data']['train_X'])=={type(self.config[a.data][a.train_X])} and type(self.config['data']['valid_X'])=={type(self.config[a.data][a.valid_X])} !"
)
logger.debug(f"{self.cname}: {len(self.train_X)} inputs")
# check data length
self.train_size = np.shape(self.train_X[0])[0]
if not all(map(lambda x: np.shape(x)[0] == self.train_size, self.train_X)):
raise DeephyperRuntimeError(
"All training inputs data should have same length!"
)
self.valid_size = np.shape(self.valid_X[0])[0]
if not all(map(lambda x: np.shape(x)[0] == self.valid_size, self.valid_X)):
raise DeephyperRuntimeError(
"All validation inputs data should have same length!"
)
def preprocess_data(self):
logger.debug("Starting preprocess of data")
if self.data_config_type == "gen":
logger.warn("Cannot preprocess data with generator!")
return
if self.preprocessor is not None:
raise DeephyperRuntimeError("You can only preprocess data one time.")
if self.preprocessing_func:
logger.debug(f"preprocess_data with: {str(self.preprocessing_func)}")
if all(
[
len(np.shape(tX)) == len(np.shape(self.train_Y))
for tX in self.train_X
]
):
data_train = np.concatenate((*self.train_X, self.train_Y), axis=-1)
data_valid = np.concatenate((*self.valid_X, self.valid_Y), axis=-1)
self.preprocessor = self.preprocessing_func()
tX_shp = [np.shape(x) for x in self.train_X]
preproc_data_train = self.preprocessor.fit_transform(data_train)
preproc_data_valid = self.preprocessor.transform(data_valid)
acc, self.train_X = 0, list()
for shp in tX_shp:
self.train_X.append(preproc_data_train[..., acc : acc + shp[1]])
acc += shp[1]
self.train_Y = preproc_data_train[..., acc:]
acc, self.valid_X = 0, list()
for shp in tX_shp:
self.valid_X.append(preproc_data_valid[..., acc : acc + shp[1]])
acc += shp[1]
self.valid_Y = preproc_data_valid[..., acc:]
else:
logger.warn(
f"Skipped preprocess because shape {np.shape(self.train_Y)} is not handled!"
)
else:
logger.info("Skipped preprocess of data because no function is defined!")
def set_dataset_train(self):
if self.data_config_type == "ndarray":
if type(self.train_Y) is list:
output_mapping = {
f"output_{i}": tY for i, tY in enumerate(self.train_Y)
}
else:
output_mapping = self.train_Y
self.dataset_train = tf.data.Dataset.from_tensor_slices(
(
{f"input_{i}": tX for i, tX in enumerate(self.train_X)},
output_mapping,
)
)
else: # self.data_config_type == "gen"
self.dataset_train = tf.data.Dataset.from_generator(
self.train_gen,
output_signature=self._get_output_signatures(),
)
if self.cache_data:
self.dataset_train = self.dataset_train.cache()
if self.shuffle_data:
self.dataset_train = self.dataset_train.shuffle(
self.train_size, reshuffle_each_iteration=True
)
if self.batch:
self.dataset_train = self.dataset_train.batch(self.batch_size)
self.dataset_train = self.dataset_train.prefetch(tf.data.AUTOTUNE).repeat(
self.num_epochs
)
def set_dataset_valid(self):
if self.data_config_type == "ndarray":
if type(self.valid_Y) is list:
output_mapping = {
f"output_{i}": vY for i, vY in enumerate(self.valid_Y)
}
else:
output_mapping = self.valid_Y
self.dataset_valid = tf.data.Dataset.from_tensor_slices(
(
{f"input_{i}": vX for i, vX in enumerate(self.valid_X)},
output_mapping,
)
)
else:
self.dataset_valid = tf.data.Dataset.from_generator(
self.valid_gen,
output_signature=self._get_output_signatures(valid=True),
)
self.dataset_valid = self.dataset_valid.cache()
self.dataset_valid = self.dataset_valid.batch(self.batch_size)
self.dataset_valid = self.dataset_valid.prefetch(tf.data.AUTOTUNE).repeat(
self.num_epochs
)
def _get_output_signatures(self, valid=False):
if self.batch or valid:
return (
{
f"input_{i}": tf.TensorSpec(
shape=(*self.data_shapes[0][f"input_{i}"],),
dtype=self.data_types[0][f"input_{i}"],
)
for i in range(len(self.data_shapes[0]))
},
tf.TensorSpec(
shape=(*self.data_shapes[1],),
dtype=self.data_types[1],
),
)
else:
return (
{
f"input_{i}": tf.TensorSpec(
shape=(
None,
*self.data_shapes[0][f"input_{i}"],
),
dtype=self.data_types[0][f"input_{i}"],
)
for i in range(len(self.data_shapes[0]))
},
tf.TensorSpec(
shape=(None, *self.data_shapes[1]),
dtype=self.data_types[1],
),
)
def _setup_optimizer(self):
optimizer_fn = U.selectOptimizer_keras(self.optimizer_name)
opti_parameters = signature(optimizer_fn).parameters
params = {}
if "lr" in opti_parameters:
params["lr"] = self.learning_rate
elif "learning_rate" in opti_parameters:
params["learning_rate"] = self.learning_rate
else:
raise DeephyperRuntimeError(
f"The learning_rate parameter is not found amoung optimiser arguments: {opti_parameters}"
)
if "epsilon" in opti_parameters:
params["epsilon"] = self.optimizer_eps
if "momentum" in opti_parameters:
params["momentum"] = self.momentum
if "nesterov" in opti_parameters:
params["nesterov"] = self.nesterov
self.optimizer = optimizer_fn(**params)
def model_compile(self):
self._setup_optimizer()
if type(self.loss_metrics) is dict:
self.model.compile(
optimizer=self.optimizer,
loss=self.loss_metrics,
loss_weights=self.loss_weights,
metrics=self.metrics_name,
)
else:
self.model.compile(
optimizer=self.optimizer,
loss=self.loss_metrics,
metrics=self.metrics_name,
)
def predict(self, dataset: str = "valid", keep_normalize: bool = False) -> tuple:
"""[summary]
Args:
dataset (str, optional): 'valid' or 'train'. Defaults to 'valid'.
keep_normalize (bool, optional): if False then the preprocessing will be reversed after prediction. if True nothing will be reversed. Defaults to False.
Raises:
DeephyperRuntimeError: [description]
Returns:
tuple: (y_true, y_pred)
"""
if not (dataset == "valid" or dataset == "train"):
raise DeephyperRuntimeError(
"dataset parameter should be equal to: 'valid' or 'train'"
)
if dataset == "valid":
y_pred = self.model.predict(
self.dataset_valid, steps=self.valid_steps_per_epoch
)
else: # dataset == 'train'
y_pred = self.model.predict(
self.dataset_train, steps=self.train_steps_per_epoch
)
if (
self.preprocessing_func
and not keep_normalize
and not self.data_config_type == "gen"
):
if dataset == "valid":
data_X, data_Y = self.valid_X, self.valid_Y
else: # dataset == 'train'
data_X, data_Y = self.train_X, self.train_Y
val_pred = np.concatenate((*data_X, y_pred), axis=1)
val_orig = np.concatenate((*data_X, data_Y), axis=1)
val_pred_trans = self.preprocessor.inverse_transform(val_pred)
val_orig_trans = self.preprocessor.inverse_transform(val_orig)
y_orig = val_orig_trans[:, -np.shape(data_Y)[1] :]
y_pred = val_pred_trans[:, -np.shape(data_Y)[1] :]
else:
if self.data_config_type == "ndarray":
y_orig = self.valid_Y if dataset == "valid" else self.train_Y
else:
gen = self.valid_gen() if dataset == "valid" else self.train_gen()
y_orig = np.array([e[-1] for e in gen])
return y_orig, y_pred
def evaluate(self, dataset="train"):
"""Evaluate the performance of your model for the same configuration.
Args:
dataset (str, optional): must be "train" or "valid". If "train" then metrics will be evaluated on the training dataset. If "valid" then metrics will be evaluated on the "validation" dataset. Defaults to 'train'.
Returns:
list: a list of scalar values corresponding do config loss & metrics.
"""
if dataset == "train":
return self.model.evaluate(
self.dataset_train, steps=self.train_steps_per_epoch
)
else:
return self.model.evaluate(
self.dataset_valid, steps=self.valid_steps_per_epoch
)
def train(
self, num_epochs: int = None, with_pred: bool = False, last_only: bool = False
):
"""Train the model.
Args:
num_epochs (int, optional): override the num_epochs passed to init the Trainer. Defaults to None, will use the num_epochs passed to init the Trainer.
with_pred (bool, optional): will compute a prediction after the training and will add ('y_true', 'y_pred') to the output history. Defaults to False, will skip it (use it to save compute time).
last_only (bool, optional): will compute metrics after the last epoch only. Defaults to False, will compute metrics after each training epoch (use it to save compute time).
Raises:
DeephyperRuntimeError: raised when the ``num_epochs < 0``.
Returns:
dict: a dictionnary corresponding to the training.
"""
num_epochs = self.num_epochs if num_epochs is None else num_epochs
self.init_history()
if num_epochs > 0:
time_start_training = time.time() # TIMING
if not last_only:
logger.info(
"Trainer is computing metrics on validation after each training epoch."
)
history = self.model.fit(
self.dataset_train,
verbose=self.verbose,
epochs=num_epochs,
steps_per_epoch=self.train_steps_per_epoch,
callbacks=self.callbacks,
validation_data=self.dataset_valid,
validation_steps=self.valid_steps_per_epoch,
class_weight=self.class_weights,
)
else:
logger.info(
"Trainer is computing metrics on validation after the last training epoch."
)
if num_epochs > 1:
self.model.fit(
self.dataset_train,
verbose=self.verbose,
epochs=num_epochs - 1,
steps_per_epoch=self.train_steps_per_epoch,
callbacks=self.callbacks,
class_weight=self.class_weights,
)
history = self.model.fit(
self.dataset_train,
epochs=1,
verbose=self.verbose,
steps_per_epoch=self.train_steps_per_epoch,
callbacks=self.callbacks,
validation_data=self.dataset_valid,
validation_steps=self.valid_steps_per_epoch,
class_weight=self.class_weights,
)
time_end_training = time.time() # TIMING
self.train_history["training_time"] = (
time_end_training - time_start_training
)
self.train_history.update(history.history)
elif num_epochs < 0:
raise DeephyperRuntimeError(
f"Trainer: number of epochs should be >= 0: {num_epochs}"
)
if with_pred:
time_start_predict = time.time()
y_true, y_pred = self.predict(dataset="valid")
time_end_predict = time.time()
self.train_history["val_predict_time"] = (
time_end_predict - time_start_predict
)
self.train_history["y_true"] = y_true
self.train_history["y_pred"] = y_pred
return self.train_history
| 21,923 | 37.0625 | 241 | py |
deephyper | deephyper-master/deephyper/nas/spacelib/tabular/one_layer.py | import tensorflow as tf
from deephyper.nas import KSearchSpace
from deephyper.nas.node import ConstantNode, VariableNode
from deephyper.nas.operation import operation, Concatenate
Dense = operation(tf.keras.layers.Dense)
Dropout = operation(tf.keras.layers.Dropout)
class OneLayerSpace(KSearchSpace):
def __init__(
self, input_shape, output_shape, batch_size=None, seed=None, regression=True
):
super().__init__(input_shape, output_shape, batch_size=batch_size, seed=seed)
self.regression = regression
def build(self):
if type(self.input_shape) is list:
vnodes = []
for i in range(len(self.input_shape)):
vn = self.gen_vnode()
vnodes.append(vn)
self.connect(self.input_nodes[i], vn)
print(i)
prev_node = ConstantNode(Concatenate(self, vnodes))
else:
prev_node = self.gen_vnode()
self.connect(self.input_nodes[0], prev_node)
output_node = ConstantNode(
Dense(
self.output_shape[0], activation=None if self.regression else "softmax"
)
)
self.connect(prev_node, output_node)
return self
def gen_vnode(self) -> VariableNode:
vnode = VariableNode()
for i in range(1, 1000):
vnode.add_op(Dense(i, tf.nn.relu))
return vnode
if __name__ == "__main__":
from tensorflow.keras.utils import plot_model
shapes = dict(input_shape=[(10,), (10,)], output_shape=(1,))
space = OneLayerSpace(**shapes).build()
model = space.sample()
plot_model(model)
| 1,655 | 27.551724 | 87 | py |
deephyper | deephyper-master/deephyper/nas/spacelib/tabular/supervised_reg_auto_encoder.py | import tensorflow as tf
from deephyper.nas import KSearchSpace
from deephyper.nas.node import ConstantNode, VariableNode
from deephyper.nas.operation import Identity, operation
Dense = operation(tf.keras.layers.Dense)
class SupervisedRegAutoEncoderSpace(KSearchSpace):
def __init__(
self,
input_shape,
output_shape,
batch_size=None,
seed=None,
units=[128, 64, 32, 16, 8, 16, 32, 64, 128],
num_layers=5,
):
super().__init__(input_shape, output_shape, batch_size=batch_size, seed=seed)
self.units = units
self.num_layers = num_layers
def build(self):
inp = self.input_nodes[0]
# auto-encoder
units = [128, 64, 32, 16, 8, 16, 32, 64, 128]
prev_node = inp
d = 1
for i in range(len(units)):
vnode = VariableNode()
vnode.add_op(Identity())
if d == 1 and units[i] < units[i + 1]:
d = -1
for u in range(min(2, units[i]), max(2, units[i]) + 1, 2):
vnode.add_op(Dense(u, tf.nn.relu))
latente_space = vnode
else:
for u in range(
min(units[i], units[i + d]), max(units[i], units[i + d]) + 1, 2
):
vnode.add_op(Dense(u, tf.nn.relu))
self.connect(prev_node, vnode)
prev_node = vnode
out2 = ConstantNode(op=Dense(self.output_shape[0][0], name="output_0"))
self.connect(prev_node, out2)
# regressor
prev_node = latente_space
# prev_node = inp
for _ in range(self.num_layers):
vnode = VariableNode()
for i in range(16, 129, 16):
vnode.add_op(Dense(i, tf.nn.relu))
self.connect(prev_node, vnode)
prev_node = vnode
out1 = ConstantNode(op=Dense(self.output_shape[1][0], name="output_1"))
self.connect(prev_node, out1)
return self
if __name__ == "__main__":
from tensorflow.keras.utils import plot_model
shapes = dict(input_shape=(100,), output_shape=[(100,), (10,)])
space = SupervisedRegAutoEncoderSpace(**shapes).build()
model = space.sample()
plot_model(model)
| 2,268 | 28.855263 | 85 | py |
deephyper | deephyper-master/deephyper/nas/spacelib/tabular/feed_forward.py | import tensorflow as tf
from deephyper.nas import KSearchSpace
from deephyper.nas.node import ConstantNode, VariableNode
from deephyper.nas.operation import Identity, operation
Dense = operation(tf.keras.layers.Dense)
class FeedForwardSpace(KSearchSpace):
"""Simple search space for a feed-forward neural network. No skip-connection. Looking over the number of units per layer and the number of layers.
Args:
input_shape (tuple, optional): True shape of inputs (no batch size dimension). Defaults to (2,).
output_shape (tuple, optional): True shape of outputs (no batch size dimension).. Defaults to (1,).
num_layers (int, optional): Maximum number of layers to have. Defaults to 10.
num_units (tuple, optional): Range of number of units such as range(start, end, step_size). Defaults to (1, 11).
regression (bool, optional): A boolean defining if the model is a regressor or a classifier. Defaults to True.
"""
def __init__(
self,
input_shape,
output_shape,
batch_size=None,
seed=None,
regression=True,
num_units=(1, 11),
num_layers=10,
):
super().__init__(input_shape, output_shape, batch_size=batch_size, seed=seed)
self.regression = regression
self.num_units = num_units
self.num_layers = num_layers
def build(self):
prev_node = self.input_nodes[0]
for _ in range(self.num_layers):
vnode = VariableNode()
vnode.add_op(Identity())
for i in range(*self.num_units):
vnode.add_op(Dense(i, tf.nn.relu))
self.connect(prev_node, vnode)
prev_node = vnode
output_node = ConstantNode(
Dense(
self.output_shape[0], activation=None if self.regression else "softmax"
)
)
self.connect(prev_node, output_node)
return self
if __name__ == "__main__":
from tensorflow.keras.utils import plot_model
shapes = dict(input_shape=(10,), output_shape=(1,))
space = FeedForwardSpace(**shapes).build()
model = space.sample()
plot_model(model)
| 2,179 | 32.030303 | 150 | py |
deephyper | deephyper-master/deephyper/nas/spacelib/tabular/dense_skipco.py | import collections
import tensorflow as tf
from deephyper.nas import KSearchSpace
from deephyper.nas.node import ConstantNode, VariableNode
from deephyper.nas.operation import operation, Zero, Connect, AddByProjecting, Identity
Dense = operation(tf.keras.layers.Dense)
Dropout = operation(tf.keras.layers.Dropout)
class DenseSkipCoSpace(KSearchSpace):
def __init__(
self,
input_shape,
output_shape,
batch_size=None,
seed=None,
regression=True,
num_layers=10,
dropout=0.0,
):
super().__init__(input_shape, output_shape, batch_size=batch_size, seed=seed)
self.regression = regression
self.num_layers = num_layers
self.dropout = dropout
def build(self):
source = prev_input = self.input_nodes[0]
# look over skip connections within a range of the 3 previous nodes
anchor_points = collections.deque([source], maxlen=3)
for _ in range(self.num_layers):
vnode = VariableNode()
self.add_dense_to_(vnode)
self.connect(prev_input, vnode)
# * Cell output
cell_output = vnode
cmerge = ConstantNode()
cmerge.set_op(AddByProjecting(self, [cell_output], activation="relu"))
for anchor in anchor_points:
skipco = VariableNode()
skipco.add_op(Zero())
skipco.add_op(Connect(self, anchor))
self.connect(skipco, cmerge)
prev_input = cmerge
# ! for next iter
anchor_points.append(prev_input)
if self.dropout >= 0.0:
dropout_node = ConstantNode(op=Dropout(rate=self.dropout))
self.connect(prev_input, dropout_node)
prev_input = dropout_node
output_node = ConstantNode(
Dense(
self.output_shape[0], activation=None if self.regression else "softmax"
)
)
self.connect(prev_input, output_node)
return self
def add_dense_to_(self, node):
node.add_op(Identity()) # we do not want to create a layer in this case
activations = [None, tf.nn.swish, tf.nn.relu, tf.nn.tanh, tf.nn.sigmoid]
for units in range(16, 97, 16):
for activation in activations:
node.add_op(Dense(units=units, activation=activation))
if __name__ == "__main__":
from tensorflow.keras.utils import plot_model
shapes = dict(input_shape=(10,), output_shape=(1,))
space = DenseSkipCoSpace(**shapes).build()
model = space.sample()
plot_model(model)
| 2,637 | 28.311111 | 87 | py |
deephyper | deephyper-master/deephyper/nas/run/_run_horovod.py | """The :func:`deephyper.nas.run.horovod.run` function is used to evaluate a deep neural network by enabling data-parallelism with Horovod to the :func:`deephyper.nas.run.alpha.run` function. This function will automatically apply the linear scaling rule to the learning rate and batch size given the current number of ranks (i.e., the initial learning rate and batch size are scaled by the number of ranks).
"""
import os
import traceback
import logging
import numpy as np
import tensorflow as tf
from deephyper.keras.callbacks import import_callback
import horovod.tensorflow.keras as hvd
import deephyper.nas.trainer._arch as a
from deephyper.nas.trainer import HorovodTrainer
from deephyper.nas.run._util import (
compute_objective,
load_config,
preproc_trainer,
save_history,
setup_data,
get_search_space,
)
logger = logging.getLogger(__name__)
# Default callbacks parameters
default_callbacks_config = {
"EarlyStopping": dict(
monitor="val_loss", min_delta=0, mode="min", verbose=0, patience=0
),
"ModelCheckpoint": dict(
monitor="val_loss",
mode="min",
save_best_only=True,
verbose=1,
filepath="model.h5",
save_weights_only=False,
),
"TensorBoard": dict(
log_dir="",
histogram_freq=0,
batch_size=32,
write_graph=False,
write_grads=False,
write_images=False,
update_freq="epoch",
),
"CSVLogger": dict(filename="training.csv", append=True),
"CSVExtendedLogger": dict(filename="training.csv", append=True),
"TimeStopping": dict(),
"ReduceLROnPlateau": dict(patience=5, verbose=0),
}
# Name of Callbacks reserved for root node
hvd_root_cb = ["ModelCheckpoint", "Tensorboard", "CSVLogger", "CSVExtendedLogger"]
def run_horovod(config: dict) -> float:
hvd.init()
# Threading configuration
if os.environ.get("OMP_NUM_THREADS", None) is not None:
logger.debug(f"OMP_NUM_THREADS is {os.environ.get('OMP_NUM_THREADS')}")
num_intra = int(os.environ.get("OMP_NUM_THREADS"))
tf.config.threading.set_intra_op_parallelism_threads(num_intra)
tf.config.threading.set_inter_op_parallelism_threads(2)
if os.environ.get("CUDA_VISIBLE_DEVICES") is not None:
devices = os.environ.get("CUDA_VISIBLE_DEVICES").split(",")
os.environ["CUDA_VISIBLE_DEVICES"] = devices[hvd.rank()]
config["seed"]
seed = config["seed"]
if seed is not None:
np.random.seed(seed)
tf.random.set_seed(seed)
load_config(config)
# Scale batch size and learning rate according to the number of ranks
initial_lr = config[a.hyperparameters][a.learning_rate]
batch_size = config[a.hyperparameters][a.batch_size] * hvd.size()
learning_rate = config[a.hyperparameters][a.learning_rate] * hvd.size()
logger.info(
f"Scaled: 'batch_size' from {config[a.hyperparameters][a.batch_size]} to {batch_size} "
)
logger.info(
f"Scaled: 'learning_rate' from {config[a.hyperparameters][a.learning_rate]} to {learning_rate} "
)
config[a.hyperparameters][a.batch_size] = batch_size
config[a.hyperparameters][a.learning_rate] = learning_rate
input_shape, output_shape = setup_data(config)
search_space = get_search_space(config, input_shape, output_shape, seed=seed)
# Initialize Horovod
model_created = False
try:
model = search_space.sample(config["arch_seq"])
model_created = True
except Exception:
logger.info("Error: Model creation failed...")
logger.info(traceback.format_exc())
if model_created:
# Setup callbacks only
callbacks = [
# Horovod: broadcast initial variable states from rank 0 to all other processes.
# This is necessary to ensure consistent initialization of all workers when
# training is started with random weights or restored from a checkpoint.
hvd.callbacks.BroadcastGlobalVariablesCallback(0),
# Horovod: average metrics among workers at the end of every epoch.
#
# Note: This callback must be in the list before the ReduceLROnPlateau,
# TensorBoard or other metrics-based callbacks.
hvd.callbacks.MetricAverageCallback(),
# Horovod: using `lr = 1.0 * hvd.size()` from the very beginning leads to worse final
# accuracy. Scale the learning rate `lr = 1.0` ---> `lr = 1.0 * hvd.size()` during
# the first five epochs. See https://arxiv.org/abs/1706.02677 for details.
# !initial_lr argument is not available in horovod==0.19.0
hvd.callbacks.LearningRateWarmupCallback(
warmup_epochs=5, verbose=0, initial_lr=initial_lr
),
]
cb_requires_valid = False # Callbacks requires validation data
callbacks_config = config[a.hyperparameters].get(a.callbacks, {})
if callbacks_config is not None:
for cb_name, cb_conf in callbacks_config.items():
if cb_name in default_callbacks_config:
# cb_bame in hvd_root_cb implies hvd.rank() == 0
if not (cb_name in hvd_root_cb) or hvd.rank() == 0:
default_callbacks_config[cb_name].update(cb_conf)
# Import and create corresponding callback
Callback = import_callback(cb_name)
callbacks.append(Callback(**default_callbacks_config[cb_name]))
if cb_name in ["EarlyStopping"]:
cb_requires_valid = "val" in cb_conf["monitor"].split("_")
else:
logger.error(f"'{cb_name}' is not an accepted callback!")
trainer = HorovodTrainer(config=config, model=model)
trainer.callbacks.extend(callbacks)
last_only, with_pred = preproc_trainer(config)
last_only = last_only and not cb_requires_valid
history = trainer.train(with_pred=with_pred, last_only=last_only)
# save history
if hvd.rank() == 0:
save_history(config.get("log_dir", None), history, config)
result = compute_objective(config["objective"], history)
else:
# penalising actions if model cannot be created
result = -1
if result < -10:
result = -10
return result
| 6,420 | 37.680723 | 407 | py |
deephyper | deephyper-master/deephyper/nas/run/_run_distributed_base_trainer.py | """The :func:`deephyper.nas.run.tf_distributed.run` function is used to deploy a data-distributed training (on a single node) with ``tensorflow.distribute.MirroredStrategy``. It follows the same training pipeline as :func:`deephyper.nas.run.alpha.run`. Two hyperparameters arguments can be used to activate or deactivate the linear scaling rule (aka ``lsr``) for the learning rate and batch size, respectively:
.. code-block:: python
Problem.hyperparameters(
...
lsr_batch_size=True,
lsr_learning_rate=True,
warmup_lr=True,
warmup_epochs=5
...
)
The hyperparameters of the form ``patience_{Callback}`` such as ``patience_EarlyStopping`` and ``patience_ReduceLROnPlateau`` are valid when the corresponding callback is declared:
.. code-block:: python
Problem.hyperparameters(
...
patience_ReduceLROnPlateau=5,
patience_EarlyStopping=10,
callbacks=dict(
ReduceLROnPlateau=dict(monitor="val_r2", mode="max", verbose=0),
EarlyStopping=dict(monitor="val_r2", min_delta=0, mode="max", verbose=0),
),
...
)
"""
import traceback
import logging
import numpy as np
import tensorflow as tf
from deephyper.keras.callbacks import import_callback
from deephyper.keras.callbacks import LearningRateWarmupCallback
from deephyper.nas.run._util import (
compute_objective,
load_config,
preproc_trainer,
save_history,
setup_data,
get_search_space,
default_callbacks_config,
)
from deephyper.nas.trainer import BaseTrainer
import deephyper.nas.trainer._arch as a
logger = logging.getLogger(__name__)
def run_distributed_base_trainer(config):
physical_devices = tf.config.list_physical_devices("GPU")
try:
for i in range(len(physical_devices)):
tf.config.experimental.set_memory_growth(physical_devices[i], True)
except Exception:
# Invalid device or cannot modify virtual devices once initialized.
pass
distributed_strategy = tf.distribute.MirroredStrategy()
n_replicas = distributed_strategy.num_replicas_in_sync
seed = config["seed"]
if seed is not None:
np.random.seed(seed)
tf.random.set_seed(seed)
load_config(config)
# Scale batch size and learning rate according to the number of ranks
initial_lr = config[a.hyperparameters][a.learning_rate]
if config[a.hyperparameters].get("lsr_batch_size"):
batch_size = config[a.hyperparameters][a.batch_size] * n_replicas
else:
batch_size = config[a.hyperparameters][a.batch_size]
if config[a.hyperparameters].get("lsr_learning_rate"):
learning_rate = config[a.hyperparameters][a.learning_rate] * n_replicas
else:
learning_rate = config[a.hyperparameters][a.learning_rate]
logger.info(
f"Scaled: 'batch_size' from {config[a.hyperparameters][a.batch_size]} to {batch_size} "
)
logger.info(
f"Scaled: 'learning_rate' from {config[a.hyperparameters][a.learning_rate]} to {learning_rate} "
)
config[a.hyperparameters][a.batch_size] = batch_size
config[a.hyperparameters][a.learning_rate] = learning_rate
input_shape, output_shape = setup_data(config)
search_space = get_search_space(config, input_shape, output_shape, seed=seed)
model_created = False
with distributed_strategy.scope():
try:
model = search_space.sample(config["arch_seq"])
model_created = True
except Exception:
logger.info("Error: Model creation failed...")
logger.info(traceback.format_exc())
else:
# Setup callbacks
callbacks = []
cb_requires_valid = False # Callbacks requires validation data
callbacks_config = config["hyperparameters"].get("callbacks")
if callbacks_config is not None:
for cb_name, cb_conf in callbacks_config.items():
if cb_name in default_callbacks_config:
default_callbacks_config[cb_name].update(cb_conf)
# Special dynamic parameters for callbacks
if cb_name == "ModelCheckpoint":
default_callbacks_config[cb_name][
"filepath"
] = f'best_model_{config["id"]}.h5'
# replace patience hyperparameter
if "patience" in default_callbacks_config[cb_name]:
patience = config["hyperparameters"].get(
f"patience_{cb_name}"
)
if patience is not None:
default_callbacks_config[cb_name]["patience"] = patience
# Import and create corresponding callback
Callback = import_callback(cb_name)
callbacks.append(Callback(**default_callbacks_config[cb_name]))
if cb_name in ["EarlyStopping"]:
cb_requires_valid = "val" in cb_conf["monitor"].split("_")
else:
logger.error(f"'{cb_name}' is not an accepted callback!")
# WarmupLR
if config[a.hyperparameters].get("warmup_lr"):
warmup_epochs = config[a.hyperparameters].get("warmup_epochs", 5)
callbacks.append(
LearningRateWarmupCallback(
n_replicas=n_replicas,
warmup_epochs=warmup_epochs,
verbose=0,
initial_lr=initial_lr,
)
)
trainer = BaseTrainer(config=config, model=model)
trainer.callbacks.extend(callbacks)
last_only, with_pred = preproc_trainer(config)
last_only = last_only and not cb_requires_valid
if model_created:
history = trainer.train(with_pred=with_pred, last_only=last_only)
# save history
save_history(config.get("log_dir", None), history, config)
result = compute_objective(config["objective"], history)
else:
# penalising actions if model cannot be created
result = -1
if result < -10 or np.isnan(result):
result = -10
return result
| 6,405 | 37.359281 | 410 | py |
deephyper | deephyper-master/deephyper/nas/run/_run_base_trainer.py | """The :func:`deephyper.nas.run.alpha.run` function is used to evaluate a deep neural network by loading the data, building the model, training the model and returning a scalar value corresponding to the objective defined in the used :class:`deephyper.problem.NaProblem`.
"""
import os
import traceback
import logging
import numpy as np
import tensorflow as tf
from deephyper.keras.callbacks import import_callback
from deephyper.nas.run._util import (
compute_objective,
load_config,
preproc_trainer,
setup_data,
get_search_space,
default_callbacks_config,
HistorySaver,
)
from deephyper.nas.trainer import BaseTrainer
logger = logging.getLogger(__name__)
def run_base_trainer(job):
config = job.parameters
config["job_id"] = job.id
tf.keras.backend.clear_session()
# tf.config.optimizer.set_jit(True)
# setup history saver
if config.get("log_dir") is None:
config["log_dir"] = "."
save_dir = os.path.join(config["log_dir"], "save")
saver = HistorySaver(config, save_dir)
saver.write_config()
saver.write_model(None)
# GPU Configuration if available
physical_devices = tf.config.list_physical_devices("GPU")
try:
for i in range(len(physical_devices)):
tf.config.experimental.set_memory_growth(physical_devices[i], True)
except Exception:
# Invalid device or cannot modify virtual devices once initialized.
logger.info("error memory growth for GPU device")
# Threading configuration
if (
len(physical_devices) == 0
and os.environ.get("OMP_NUM_THREADS", None) is not None
):
logger.info(f"OMP_NUM_THREADS is {os.environ.get('OMP_NUM_THREADS')}")
num_intra = int(os.environ.get("OMP_NUM_THREADS"))
try:
tf.config.threading.set_intra_op_parallelism_threads(num_intra)
tf.config.threading.set_inter_op_parallelism_threads(2)
except RuntimeError: # Session already initialized
pass
tf.config.set_soft_device_placement(True)
seed = config.get("seed")
if seed is not None:
np.random.seed(seed)
tf.random.set_seed(seed)
load_config(config)
input_shape, output_shape = setup_data(config)
search_space = get_search_space(config, input_shape, output_shape, seed=seed)
model_created = False
try:
model = search_space.sample(config["arch_seq"])
model_created = True
except Exception:
logger.info("Error: Model creation failed...")
logger.info(traceback.format_exc())
if model_created:
# Setup callbacks
callbacks = []
cb_requires_valid = False # Callbacks requires validation data
callbacks_config = config["hyperparameters"].get("callbacks")
if callbacks_config is not None:
for cb_name, cb_conf in callbacks_config.items():
if cb_name in default_callbacks_config:
default_callbacks_config[cb_name].update(cb_conf)
# Special dynamic parameters for callbacks
if cb_name == "ModelCheckpoint":
default_callbacks_config[cb_name]["filepath"] = saver.model_path
# replace patience hyperparameter
if "patience" in default_callbacks_config[cb_name]:
patience = config["hyperparameters"].get(f"patience_{cb_name}")
if patience is not None:
default_callbacks_config[cb_name]["patience"] = patience
# Import and create corresponding callback
Callback = import_callback(cb_name)
callbacks.append(Callback(**default_callbacks_config[cb_name]))
if cb_name in ["EarlyStopping"]:
cb_requires_valid = "val" in cb_conf["monitor"].split("_")
else:
logger.error(f"'{cb_name}' is not an accepted callback!")
trainer = BaseTrainer(config=config, model=model)
trainer.callbacks.extend(callbacks)
last_only, with_pred = preproc_trainer(config)
last_only = last_only and not cb_requires_valid
history = trainer.train(with_pred=with_pred, last_only=last_only)
# save history
saver.write_history(history)
result = compute_objective(config["objective"], history)
else:
# penalising actions if model cannot be created
logger.info("Model could not be created returning -Inf!")
result = -float("inf")
if np.isnan(result):
logger.info("Computed objective is NaN returning -Inf instead!")
result = -float("inf")
return result
| 4,751 | 34.2 | 271 | py |
deephyper | deephyper-master/deephyper/nas/operation/_merge.py | import deephyper as dh
import tensorflow as tf
from ._base import Operation
class Concatenate(Operation):
"""Concatenate operation.
Args:
graph:
node (Node):
stacked_nodes (list(Node)): nodes to concatenate
axis (int): axis to concatenate
"""
def __init__(self, search_space, stacked_nodes=None, axis=-1):
self.search_space = search_space
self.node = None # current_node of the operation
self.stacked_nodes = stacked_nodes
self.axis = axis
def __str__(self):
return "Concatenate"
def init(self, current_node):
self.node = current_node
if self.stacked_nodes is not None:
for n in self.stacked_nodes:
self.search_space.connect(n, self.node)
def __call__(self, values, **kwargs):
# case where there is no inputs
if len(values) == 0:
return []
len_shp = max([len(x.get_shape()) for x in values])
if len_shp > 4:
raise RuntimeError(
f"This concatenation is for 2D or 3D tensors only but a {len_shp-1}D is passed!"
)
# zeros padding
if len(values) > 1:
if all(
map(
lambda x: len(x.get_shape()) == len_shp
or len(x.get_shape()) == (len_shp - 1),
values,
)
): # all tensors should have same number of dimensions 2d or 3d, but we can also accept a mix of 2d en 3d tensors
# we have a mix of 2d and 3d tensors so we are expanding 2d tensors to be 3d with last_dim==1
for i, v in enumerate(values):
if len(v.get_shape()) < len_shp:
values[i] = tf.keras.layers.Reshape(
(*tuple(v.get_shape()[1:]), 1)
)(v)
# for 3d tensors concatenation is applied along last dim (axis=-1), so we are applying a zero padding to make 2nd dimensions (ie. shape()[1]) equals
if len_shp == 3:
max_len = max(map(lambda x: int(x.get_shape()[1]), values))
paddings = map(lambda x: max_len - int(x.get_shape()[1]), values)
for i, (p, v) in enumerate(zip(paddings, values)):
lp = p // 2
rp = p - lp
values[i] = tf.keras.layers.ZeroPadding1D(padding=(lp, rp))(v)
# elif len_shp == 2 nothing to do
else:
raise RuntimeError(
f"All inputs of concatenation operation should have same shape length:\n"
f"number_of_inputs=={len(values)}\n"
f"shape_of_inputs=={[str(x.get_shape()) for x in values]}"
)
# concatenation
if len(values) > 1:
out = tf.keras.layers.Concatenate(axis=-1)(values)
else:
out = values[0]
return out
class AddByPadding(Operation):
"""Add operation. If tensor are of different shapes a padding will be applied before adding them.
Args:
search_space (KSearchSpace): [description]. Defaults to None.
activation ([type], optional): Activation function to apply after adding ('relu', tanh', 'sigmoid'...). Defaults to None.
stacked_nodes (list(Node)): nodes to add.
axis (int): axis to concatenate.
"""
def __init__(self, search_space, stacked_nodes=None, activation=None, axis=-1):
self.search_space = search_space
self.node = None # current_node of the operation
self.stacked_nodes = stacked_nodes
self.activation = activation
self.axis = axis
def init(self, current_node):
self.node = current_node
if self.stacked_nodes is not None:
for n in self.stacked_nodes:
self.search_space.connect(n, self.node)
def __call__(self, values, **kwargs):
# case where there is no inputs
if len(values) == 0:
return []
values = values[:]
max_len_shp = max([len(x.get_shape()) for x in values])
# zeros padding
if len(values) > 1:
for i, v in enumerate(values):
if len(v.get_shape()) < max_len_shp:
values[i] = tf.keras.layers.Reshape(
(
*tuple(v.get_shape()[1:]),
*tuple(1 for i in range(max_len_shp - len(v.get_shape()))),
)
)(v)
def max_dim_i(i):
return max(map(lambda x: int(x.get_shape()[i]), values))
max_dims = [None] + list(map(max_dim_i, range(1, max_len_shp)))
def paddings_dim_i(i):
return list(map(lambda x: max_dims[i] - int(x.get_shape()[i]), values))
paddings_dim = list(map(paddings_dim_i, range(1, max_len_shp)))
for i in range(len(values)):
paddings = list()
for j in range(len(paddings_dim)):
p = paddings_dim[j][i]
lp = p // 2
rp = p - lp
paddings.append([lp, rp])
if sum(map(sum, paddings)) != 0:
values[i] = dh.layers.Padding(paddings)(values[i])
# concatenation
if len(values) > 1:
out = tf.keras.layers.Add()(values)
if self.activation is not None:
out = tf.keras.layers.Activation(self.activation)(out)
else:
out = values[0]
return out
class AddByProjecting(Operation):
"""Add operation. If tensors are of different shapes a projection will be applied before adding them.
Args:
search_space (KSearchSpace): [description]. Defaults to None.
activation ([type], optional): Activation function to apply after adding ('relu', tanh', 'sigmoid'...). Defaults to None.
stacked_nodes (list(Node)): nodes to add.
axis (int): axis to concatenate.
"""
def __init__(self, search_space, stacked_nodes=None, activation=None, axis=-1):
self.search_space = search_space
self.node = None # current_node of the operation
self.stacked_nodes = stacked_nodes
self.activation = activation
self.axis = axis
def init(self, current_node):
self.node = current_node
if self.stacked_nodes is not None:
for n in self.stacked_nodes:
self.search_space.connect(n, self.node)
def __call__(self, values, seed=None, **kwargs):
# case where there is no inputs
if len(values) == 0:
return []
values = values[:]
max_len_shp = max([len(x.get_shape()) for x in values])
# projection
if len(values) > 1:
for i, v in enumerate(values):
if len(v.get_shape()) < max_len_shp:
values[i] = tf.keras.layers.Reshape(
(
*tuple(v.get_shape()[1:]),
*tuple(1 for i in range(max_len_shp - len(v.get_shape()))),
)
)(v)
proj_size = values[0].get_shape()[self.axis]
for i in range(len(values)):
if values[i].get_shape()[self.axis] != proj_size:
values[i] = tf.keras.layers.Dense(
units=proj_size,
kernel_initializer=tf.keras.initializers.glorot_uniform(
seed=seed
),
)(values[i])
# concatenation
if len(values) > 1:
out = tf.keras.layers.Add()(values)
if self.activation is not None:
out = tf.keras.layers.Activation(self.activation)(out)
else:
out = values[0]
return out
| 8,004 | 35.221719 | 164 | py |
deephyper | deephyper-master/deephyper/nas/operation/_base.py | import tensorflow as tf
class Operation:
"""Interface of an operation.
>>> import tensorflow as tf
>>> from deephyper.nas.space.op import Operation
>>> Operation(layer=tf.keras.layers.Dense(10))
Dense
Args:
layer (Layer): a ``tensorflow.keras.layers.Layer``.
"""
def __init__(self, layer: tf.keras.layers.Layer):
assert isinstance(layer, tf.keras.layers.Layer)
self.from_keras_layer = True
self._layer = layer
def __str__(self):
return self.__repr__()
def __repr__(self):
if hasattr(self, "from_keras_layer"):
return type(self._layer).__name__
else:
try:
return str(self)
except Exception:
return type(self).__name__
def __call__(self, tensors: list, seed: int = None, **kwargs):
"""
Args:
tensors (list): a list of incoming tensors.
Returns:
tensor: an output tensor.
"""
if len(tensors) == 1:
out = self._layer(tensors[0])
else:
out = self._layer(tensors)
return out
def init(self, current_node):
"""Preprocess the current operation."""
def operation(cls):
"""Dynamically creates a sub-class of Operation from a Keras layer.
Args:
cls (tf.keras.layers.Layer): takes a Keras layer class as input and return an operation class corresponding to this layer.
"""
def __init__(self, *args, **kwargs):
self._args = args
self._kwargs = kwargs
self._layer = None
def __repr__(self):
return cls.__name__
def __call__(self, inputs, **kwargs):
if self._layer is None:
self._layer = cls(*self._args, **self._kwargs)
if len(inputs) == 1:
out = self._layer(inputs[0])
else:
out = self._layer(inputs)
return out
cls_attrs = dict(__init__=__init__, __repr__=__repr__, __call__=__call__)
op_class = type(cls.__name__, (Operation,), cls_attrs)
return op_class
class Identity(Operation):
def __init__(self):
pass
def __call__(self, inputs, **kwargs):
assert (
len(inputs) == 1
), f"{type(self).__name__} as {len(inputs)} inputs when 1 is required."
return inputs[0]
class Tensor(Operation):
def __init__(self, tensor, *args, **kwargs):
self.tensor = tensor
def __str__(self):
return str(self.tensor)
def __call__(self, *args, **kwargs):
return self.tensor
class Zero(Operation):
def __init__(self):
self.tensor = []
def __str__(self):
return "Zero"
def __call__(self, *args, **kwargs):
return self.tensor
class Connect(Operation):
"""Connection node.
Represents a possibility to create a connection between n1 -> n2.
Args:
graph (nx.DiGraph): a graph
source_node (Node): source
"""
def __init__(self, search_space, source_node, *args, **kwargs):
self.search_space = search_space
self.source_node = source_node
self.destin_node = None
def __str__(self):
if type(self.source_node) is list:
if len(self.source_node) > 0:
ids = str(self.source_node[0].id)
for n in self.source_node[1:]:
ids += "," + str(n.id)
else:
ids = "None"
else:
ids = self.source_node.id
if self.destin_node is None:
return f"{type(self).__name__}_{ids}->?"
else:
return f"{type(self).__name__}_{ids}->{self.destin_node.id}"
def init(self, current_node):
"""Set the connection in the search_space graph from n1 -> n2."""
self.destin_node = current_node
if type(self.source_node) is list:
for n in self.source_node:
self.search_space.connect(n, self.destin_node)
else:
self.search_space.connect(self.source_node, self.destin_node)
def __call__(self, value, *args, **kwargs):
return value
| 4,141 | 25.382166 | 130 | py |
deephyper | deephyper-master/deephyper/problem/_neuralarchitecture.py | from collections import OrderedDict
from copy import deepcopy
from inspect import signature
import ConfigSpace.hyperparameters as csh
import tensorflow as tf
from deephyper.core.exceptions.problem import (
NaProblemError,
ProblemLoadDataIsNotCallable,
ProblemPreprocessingIsNotCallable,
SearchSpaceBuilderMissingParameter,
WrongProblemObjective,
)
from deephyper.nas.run._util import get_search_space, setup_data
from deephyper.problem import HpProblem
class NaProblem:
"""A Neural Architecture Problem specification for Neural Architecture Search.
>>> from deephyper.problem import NaProblem
>>> from deephyper.nas.preprocessing import minmaxstdscaler
>>> from deepspace.tabular import OneLayerSpace
>>> Problem = NaProblem()
>>> Problem.load_data(load_data)
>>> Problem.preprocessing(minmaxstdscaler)
>>> Problem.search_space(OneLayerSpace)
>>> Problem.hyperparameters(
... batch_size=100,
... learning_rate=0.1,
... optimizer='adam',
... num_epochs=10,
... callbacks=dict(
... EarlyStopping=dict(
... monitor='val_r2',
... mode='max',
... verbose=0,
... patience=5
... )
... )
... )
>>> Problem.loss('mse')
>>> Problem.metrics(['r2'])
>>> Problem.objective('val_r2__last')
"""
def __init__(self):
self._space = OrderedDict()
self._hp_space = HpProblem()
self._space["metrics"] = []
self._space["hyperparameters"] = dict(verbose=0)
def __repr__(self):
preprocessing = (
None
if self._space.get("preprocessing") is None
else module_location(self._space["preprocessing"]["func"])
)
hps = "".join(
[
f"\n * {h}: {self._space['hyperparameters'][h]}"
for h in self._space["hyperparameters"]
]
)
if type(self._space["metrics"]) is list:
metrics = "".join([f"\n * {m}" for m in self._space["metrics"]])
else:
metrics = "".join(
[f"\n * {m[0]}: {m[1]}" for m in self._space["metrics"].items()]
)
objective = self._space["objective"]
if not type(objective) is str:
objective = module_location(objective)
out = (
f"Problem is:\n"
f" - search space : {module_location(self._space['search_space']['class'])}\n"
f" - data loading : {module_location(self._space['load_data']['func'])}\n"
f" - preprocessing : {preprocessing}\n"
f" - hyperparameters: {hps}\n"
f" - loss : {self._space['loss']}\n"
f" - metrics : {metrics}\n"
f" - objective : {objective}\n"
)
return out
def load_data(self, func: callable, **kwargs):
"""Define the function loading the data.
.. code-block:: python
Problem.load_data(load_data, load_data_kwargs)
This ``load_data`` callable can follow two different interfaces: Numpy arrays or generators.
1. **Numpy arrays**:
In the case of Numpy arrays, the callable passed to ``Problem.load_data(...)`` has to return the following tuple: ``(X_train, y_train), (X_valid, y_valid)``. In the most simple case where the model takes a single input, each of these elements is a Numpy array. Generally, ``X_train`` and ``y_train`` have to be of the same length (i.e., same ``array.shape[0]``) which is also the case for ``X_valid`` and ``y_valid``. Similarly, the shape of the elements of ``X_train`` and ``X_valid`` which is also the case for ``y_train`` and ``y_valid``. An example ``load_data`` function can be
.. code-block:: python
import numpy as np
def load_data(N=100):
X = np.zeros((N, 1))
y = np.zeros((N,1))
return (X, y), (X, y)
It is also possible for the model to take several inputs. In fact, experimentaly it can be notices that separating some inputs with different inputs can significantly help the learning of the model. Also, sometimes different inputs may be of the "types" for example two molecular fingerprints. In this case, it can be very interesting to share the weights of the model to process these two inputs. In the case of multi-inputs models the ``load_data`` function will also return ``(X_train, y_train), (X_valid, y_valid)`` bu where ``X_train`` and ``X_valid`` are two lists of Numpy arrays. For example, the following is correct:
.. code-block:: python
import numpy as np
def load_data(N=100):
X = np.zeros((N, 1))
y = np.zeros((N,1))
return ([X, X], y), ([X, X], y)
2. **Generators**:
Returning generators with a single input:
.. code-block:: python
def load_data(N=100):
tX, ty = np.zeros((N,1)), np.zeros((N,1))
vX, vy = np.zeros((N,1)), np.zeros((N,1))
def train_gen():
for x, y in zip(tX, ty):
yield ({"input_0": x}, y)
def valid_gen():
for x, y in zip(vX, vy):
yield ({"input_0": x}, y)
res = {
"train_gen": train_gen,
"train_size": N,
"valid_gen": valid_gen,
"valid_size": N,
"types": ({"input_0": tf.float64}, tf.float64),
"shapes": ({"input_0": (1, )}, (1, ))
}
return res
Returning generators with multiple inputs:
.. code-block:: python
def load_data(N=100):
tX0, tX1, ty = np.zeros((N,1)), np.zeros((N,1)), np.zeros((N,1)),
vX0, vX1, vy = np.zeros((N,1)), np.zeros((N,1)), np.zeros((N,1)),
def train_gen():
for x0, x1, y in zip(tX0, tX1, ty):
yield ({
"input_0": x0,
"input_1": x1
}, y)
def valid_gen():
for x0, x1, y in zip(vX0, vX1, vy):
yield ({
"input_0": x0,
"input_1": x1
}, y)
res = {
"train_gen": train_gen,
"train_size": N,
"valid_gen": valid_gen,
"valid_size": N,
"types": ({"input_0": tf.float64, "input_1": tf.float64}, tf.float64),
"shapes": ({"input_0": (5, ), "input_1": (5, )}, (1, ))
}
return res
Args:
func (callable): the load data function.
"""
if not callable(func):
raise ProblemLoadDataIsNotCallable(func)
self._space["load_data"] = {"func": func, "kwargs": kwargs}
def augment(self, func: callable, **kwargs):
"""
:meta private:
"""
if not callable(func):
raise ProblemLoadDataIsNotCallable(func)
self._space["augment"] = {"func": func, "kwargs": kwargs}
def search_space(self, space_class, **kwargs):
"""Set a search space for neural architecture search.
Args:
space_class (KSearchSpace): an object of type ``KSearchSpace`` which has to implement the ``build()`` method.
Raises:
SearchSpaceBuilderMissingParameter: raised when either of ``(input_shape, output_shape)`` are missing parameters of ``func``.
"""
sign = signature(space_class)
if "input_shape" not in sign.parameters:
raise SearchSpaceBuilderMissingParameter("input_shape")
if "output_shape" not in sign.parameters:
raise SearchSpaceBuilderMissingParameter("output_shape")
self._space["search_space"] = {"class": space_class, "kwargs": kwargs}
def add_hyperparameter(
self, value, name: str = None, default_value=None
) -> csh.Hyperparameter:
"""Add hyperparameters to search the neural architecture search problem.
>>> Problem.hyperparameters(
... batch_size=problem.add_hyperparameter((32, 256), "batch_size")
... )
Args:
value: a hyperparameter description.
name: a name of the defined hyperparameter, the same as the current key.
default_value (Optional): a default value of the hyperparameter.
Returns:
Hyperparameter: the defined hyperparameter.
"""
return self._hp_space.add_hyperparameter(value, name, default_value)
def preprocessing(self, func: callable):
"""Define how to preprocess your data.
Args:
func (callable): a function which returns a preprocessing scikit-learn ``Pipeline``.
"""
if not callable(func):
raise ProblemPreprocessingIsNotCallable(func)
self._space["preprocessing"] = {"func": func}
def hyperparameters(self, **kwargs):
"""Define hyperparameters used to evaluate generated architectures.
Hyperparameters can be defined such as:
.. code-block:: python
Problem.hyperparameters(
batch_size=256,
learning_rate=0.01,
optimizer="adam",
num_epochs=20,
verbose=0,
callbacks=dict(...),
)
"""
if self._space.get("hyperparameters") is None:
self._space["hyperparameters"] = dict()
self._space["hyperparameters"].update(kwargs)
def loss(self, loss, loss_weights=None, class_weights=None):
"""Define the loss used to train generated architectures.
It can be a ``str`` corresponding to a Keras loss function:
.. code-block:: python
problem.loss("categorical_crossentropy")
A custom loss function can also be defined:
.. code-block:: python
def NLL(y, rv_y):
return -rv_y.log_prob(y)
problem.loss(NLL)
The loss can be automatically searched:
.. code-block:: python
problem.loss(
problem.add_hyperparameter(
["mae", "mse", "huber_loss", "log_cosh", "mape", "msle"], "loss"
)
)
It is possible to define a different loss for each output:
.. code-block:: python
problem.loss(
loss={"output_0": "mse", "output_1": "mse"},
loss_weights={"output_0": 0.0, "output_1": 1.0},
)
Args:
loss (str or callable orlist): a string indicating a specific loss function.
loss_weights (list): Optional.
class_weights (dict): Optional.
"""
if not (type(loss) is csh.CategoricalHyperparameter):
if not type(loss) is str and not callable(loss) and not type(loss) is dict:
raise RuntimeError(
f"The loss should be either a str, dict or a callable when it's of type {type(loss)}"
)
if (
type(loss) is dict
and loss_weights is not None
and len(loss) != len(loss_weights)
):
raise RuntimeError(
f"The losses list (len={len(loss)}) and the weights list (len={len(loss_weights)}) should be of same length!"
)
self._space["loss"] = loss
if loss_weights is not None:
self._space["loss_weights"] = loss_weights
if class_weights is not None:
self._space["class_weights"] = class_weights
def metrics(self, metrics=None):
"""Define a list of metrics for the training of generated architectures.
A list of metrics can be defined to be monitored or used as an objective. It can be a keyword or a callable. For example, if it is a keyword:
.. code-block:: python
problem.metrics(["acc"])
In case you need multiple metrics:
.. code-block:: python
problem.metrics["mae", "mse"]
In case you want to use a custom metric:
.. code-block:: python
def sparse_perplexity(y_true, y_pred):
cross_entropy = tf.keras.losses.sparse_categorical_crossentropy(y_true, y_pred)
perplexity = tf.pow(2.0, cross_entropy)
return perplexity
problem.metrics([sparse_perplexity])
Args:
metrics (list(str or callable) or dict): If ``str`` the metric should be defined in Keras or in DeepHyper. If ``callable`` it should take 2 arguments ``(y_pred, y_true)`` which are a prediction and a true value respectively.
"""
if metrics is None:
metrics = []
self._space["metrics"] = metrics
def check_objective(self, objective):
"""
:meta private:
"""
if not type(objective) is str and not callable(objective):
raise WrongProblemObjective(objective)
# elif type(objective) is str:
# list_suffix = ["__min", "__max", "__last"]
# for suffix in list_suffix:
# if suffix in objective:
# objective = objective.replace(suffix, "")
# break # only one suffix autorized
# objective = objective.replace("val_", "")
# possible_names = list()
# if type(self._space["metrics"]) is dict:
# metrics = list(self._space["metrics"].values())
# for k in self._space["metrics"].keys():
# objective = objective.replace(f"{k}_", "")
# else: # assuming it s a list
# metrics = self._space["metrics"]
# for val in ["loss"] + metrics:
# if callable(val):
# possible_names.append(val.__name__)
# else:
# possible_names.append(val)
# if not (objective in possible_names):
# raise WrongProblemObjective(objective, possible_names)
def objective(self, objective):
"""Define the objective you want to maximize for the search.
If you want to use the validation accuracy at the last epoch:
.. code-block:: python
problem.objective("val_acc")
.. note:: Be sure to define ``acc`` in the ``problem.metrics(["acc"])``.
It can accept some prefix and suffix such as ``__min, __max, __last``:
.. code-block:: python
problem.objective("-val_acc__max")
It can be a ``callable``:
.. code-block:: python
def myobjective(history: dict) -> float:
return history["val_acc"][-1]
problem.objective(myobjective)
Args:
objective (str or callable): The objective will be maximized. If ``objective`` is ``str`` then it should be either 'loss' or a defined metric. You can use the ``'val_'`` prefix when you want to select the objective on the validation set. You can use one of ``['__min', '__max', '__last']`` which respectively means you want to select the min, max or last value among all epochs. Using '__last' will save a consequent compute time because the evaluation will not compute metrics on validation set for all epochs but the last. If ``objective`` is callable it should return a scalar value (i.e. float) and it will take a ``dict`` parameter. The ``dict`` will contain keys corresponding to loss and metrics such as ``['loss', 'val_loss', 'r2', 'val_r2]``, it will also contains ``'n_parameters'`` which corresponds to the number of trainable parameters of the current model, ``'training_time'`` which corresponds to the time required to train the model, ``'predict_time'`` which corresponds to the time required to make a prediction over the whole validation set. If this callable has a ``'__last'`` suffix then the evaluation will only compute validation loss/metrics for the last epoch. If this callable has contains 'with_pred' in its name then the ``dict`` will have two other keys ``['y_pred', 'y_true']`` where ``'y_pred`` corresponds to prediction of the model on validation set and ``'y_true'`` corresponds to real prediction.
Raise:
WrongProblemObjective: raised when the objective is of a wrong definition.
"""
if (
not self._space.get("loss") is None
and not self._space.get("metrics") is None
):
self.check_objective(objective)
else:
raise NaProblemError(
".loss and .metrics should be defined before .objective!"
)
self._space["objective"] = objective
@property
def space(self):
keys = list(self._space.keys())
keys.sort()
space = OrderedDict(**{d: self._space[d] for d in keys})
return space
def build_search_space(self, seed=None):
"""Build and return a search space object using the infered data shapes after loading data.
Returns:
KSearchSpace: A search space instance.
"""
config = self.space
input_shape, output_shape, _ = setup_data(config, add_to_config=False)
search_space = get_search_space(config, input_shape, output_shape, seed=seed)
return search_space
def get_keras_model(self, arch_seq: list) -> tf.keras.Model:
"""Get a keras model object from a set of decisions in the current search space.
Args:
arch_seq (list): a list of int of floats describing a choice of operations for the search space defined in the current problem.
"""
search_space = self.build_search_space()
return search_space.sample(arch_seq)
def gen_config(self, arch_seq: list, hp_values: list) -> dict:
"""Generate a ``dict`` configuration from the ``arch_seq`` and ``hp_values`` passed.
Args:
arch_seq (list): a valid embedding of a neural network described by the search space of the current ``NaProblem``.
hp_values (list): a valid list of hyperparameters corresponding to the defined hyperparameters of the current ``NaProblem``.
"""
config = deepcopy(self.space)
# architecture DNA
config["arch_seq"] = arch_seq
# replace hp values in the config
hp_names = self._hp_space._space.get_hyperparameter_names()
for hp_name, hp_value in zip(hp_names, hp_values):
if hp_name == "loss":
config["loss"] = hp_value
else:
config["hyperparameters"][hp_name] = hp_value
return config
def extract_hp_values(self, config):
"""Extract the value of hyperparameters present in ``config`` based on the defined hyperparameters in the current ``NaProblem``"""
hp_names = self.hyperparameter_names
hp_values = []
for hp_name in hp_names:
if hp_name == "loss":
hp_values.append(config["loss"])
else:
hp_values.append(config["hyperparameters"][hp_name])
return hp_values
@property
def hyperparameter_names(self):
"""The list of hyperparameters names."""
return self._hp_space.hyperparameter_names
@property
def default_hp_configuration(self):
"""The default configuration as a dictionnary."""
return self._hp_space.default_configuration
def module_location(attr):
"""
:meta private:
"""
return f"{attr.__module__}.{attr.__name__}"
| 19,985 | 36.287313 | 1,442 | py |
deephyper | deephyper-master/deephyper/keras/callbacks/learning_rate_warmup.py | """
Adapted from Horovod implementation: https://github.com/horovod/horovod/blob/master/horovod/keras/callbacks.py
"""
import tensorflow as tf
class LearningRateScheduleCallback(tf.keras.callbacks.Callback):
def __init__(
self,
initial_lr,
multiplier,
start_epoch=0,
end_epoch=None,
staircase=True,
momentum_correction=True,
steps_per_epoch=None,
*args
):
super(LearningRateScheduleCallback, self).__init__(*args)
self.start_epoch = start_epoch
self.end_epoch = end_epoch
self.staircase = staircase
self.momentum_correction = momentum_correction
self.initial_lr = initial_lr
self.restore_momentum = None
self.steps_per_epoch = steps_per_epoch
self.current_epoch = None
if not callable(multiplier):
self.staircase = True
self.multiplier = lambda epoch: multiplier
else:
self.multiplier = multiplier
if self.initial_lr is None:
raise ValueError("Parameter `initial_lr` is required")
def _autodetect_steps_per_epoch(self):
if self.params.get("steps"):
# The number of steps is provided in the parameters.
return self.params["steps"]
elif self.params.get("samples") and self.params.get("batch_size"):
# Compute the number of steps per epoch using # of samples and a batch size.
return self.params["samples"] // self.params["batch_size"]
else:
raise ValueError(
"Could not autodetect the number of steps per epoch. "
"Please specify the steps_per_epoch parameter to the "
"%s() or upgrade to the latest version of Keras."
% self.__class__.__name__
)
def _adjust_learning_rate(self, epoch):
old_lr = tf.keras.backend.get_value(self.model.optimizer.lr)
new_lr = self.initial_lr * self.multiplier(epoch)
tf.keras.backend.set_value(self.model.optimizer.lr, new_lr)
if hasattr(self.model.optimizer, "momentum") and self.momentum_correction:
# See the paper cited above for more information about momentum correction.
self.restore_momentum = tf.keras.backend.get_value(
self.model.optimizer.momentum
)
tf.keras.backend.set_value(
self.model.optimizer.momentum, self.restore_momentum * new_lr / old_lr
)
def _restore_momentum_if_needed(self):
if self.restore_momentum:
tf.keras.backend.set_value(
self.model.optimizer.momentum, self.restore_momentum
)
self.restore_momentum = None
def on_train_begin(self, logs=None):
if self.initial_lr is None:
self.initial_lr = tf.keras.backend.get_value(self.model.optimizer.lr)
if not self.staircase and not self.steps_per_epoch:
self.steps_per_epoch = self._autodetect_steps_per_epoch()
def on_epoch_begin(self, epoch, logs=None):
self.current_epoch = epoch
def on_batch_begin(self, batch, logs=None):
if self.current_epoch < self.start_epoch or (
self.end_epoch is not None and self.current_epoch >= self.end_epoch
):
# Outside of the adjustment scope.
return
if self.staircase and batch == 0:
# Do on first batch of every epoch.
self._adjust_learning_rate(self.current_epoch)
elif not self.staircase:
epoch = self.current_epoch + float(batch) / self.steps_per_epoch
self._adjust_learning_rate(epoch)
def on_batch_end(self, batch, logs=None):
self._restore_momentum_if_needed()
def on_epoch_end(self, epoch, logs=None):
if logs is not None:
# Log current learning rate.
logs["lr"] = tf.keras.backend.get_value(self.model.optimizer.lr)
class LearningRateWarmupCallback(LearningRateScheduleCallback):
def __init__(
self,
n_replicas,
initial_lr,
warmup_epochs=5,
momentum_correction=True,
steps_per_epoch=None,
verbose=0,
*args
):
def multiplier(epoch):
# Adjust epoch to produce round numbers at the end of each epoch, so that TensorBoard
# learning rate graphs look better.
epoch += 1.0 / self.steps_per_epoch
return 1.0 / n_replicas * (epoch * (n_replicas - 1) / warmup_epochs + 1)
super(LearningRateWarmupCallback, self).__init__(
initial_lr,
multiplier,
start_epoch=0,
end_epoch=warmup_epochs,
staircase=False,
momentum_correction=momentum_correction,
steps_per_epoch=steps_per_epoch,
*args
)
self.verbose = verbose
def on_epoch_end(self, epoch, logs=None):
super(LearningRateWarmupCallback, self).on_epoch_end(epoch, logs)
if epoch == self.end_epoch - 1 and self.verbose > 0:
new_lr = tf.keras.backend.get_value(self.model.optimizer.lr)
print(
"\nEpoch %d: finished gradual learning rate warmup to %g."
% (epoch + 1, new_lr)
)
| 5,317 | 35.930556 | 110 | py |
deephyper | deephyper-master/deephyper/keras/callbacks/utils.py | from typing import Type
import deephyper
import deephyper.core.exceptions
import tensorflow as tf
def import_callback(cb_name: str) -> Type[tf.keras.callbacks.Callback]:
"""Import a callback class from its name.
Args:
cb_name (str): class name of the callback to import fron ``tensorflow.keras.callbacks`` or ``deephyper.keras.callbacks``.
Raises:
DeephyperRuntimeError: raised if the class name of the callback is not registered in corresponding packages.
Returns:
tensorflow.keras.callbacks.Callback: the class corresponding to the given class name.
"""
if cb_name in dir(tf.keras.callbacks):
return getattr(tf.keras.callbacks, cb_name)
elif cb_name in dir(deephyper.keras.callbacks):
return getattr(deephyper.keras.callbacks, cb_name)
else:
raise deephyper.core.exceptions.DeephyperRuntimeError(
f"Callback '{cb_name}' is not registered in tensorflow.keras and deephyper.keras.callbacks."
)
| 1,000 | 34.75 | 129 | py |
deephyper | deephyper-master/deephyper/keras/callbacks/stop_if_unfeasible.py | import time
import tensorflow as tf
class StopIfUnfeasible(tf.keras.callbacks.Callback):
def __init__(self, time_limit=600, patience=20):
super().__init__()
self.time_limit = time_limit
self.timing = list()
self.stopped = False # boolean set to True if the model training has been stopped due to time_limit condition
self.patience = patience
def set_params(self, params):
self.params = params
if self.params["steps"] is None:
self.steps = self.params["samples"] // self.params["batch_size"]
self.steps = self.params["samples"] // self.params["batch_size"]
if self.steps * self.params["batch_size"] < self.params["samples"]:
self.steps += 1
else:
self.steps = self.params["steps"]
def on_batch_begin(self, batch, logs=None):
"""Called at the beginning of a training batch in `fit` methods.
Subclasses should override for any actions to run.
Args:
batch (int): index of batch within the current epoch.
logs (dict): has keys `batch` and `size` representing the current
batch number and the size of the batch.
"""
self.timing.append(time.time())
def on_batch_end(self, batch, logs=None):
"""Called at the end of a training batch in `fit` methods.
Subclasses should override for any actions to run.
Args:
batch (int): index of batch within the current epoch.
logs (dict): metric results for this batch.
"""
self.timing[-1] = time.time() - self.timing[-1]
self.avr_batch_time = sum(self.timing) / len(self.timing)
self.estimate_training_time = sum(self.timing) + self.avr_batch_time * (
self.steps - len(self.timing)
)
if (
len(self.timing) >= self.patience
and self.estimate_training_time > self.time_limit
):
self.stopped = True
self.model.stop_training = True
| 2,047 | 36.236364 | 118 | py |
deephyper | deephyper-master/deephyper/keras/callbacks/stop_on_timeout.py | from datetime import datetime
from tensorflow.keras.callbacks import Callback
class TerminateOnTimeOut(Callback):
def __init__(self, timeout_in_min=10):
super(TerminateOnTimeOut, self).__init__()
self.run_timestamp = None
self.timeout_in_sec = timeout_in_min * 60
# self.validation_data = validation_data
def on_train_begin(self, logs={}):
self.run_timestamp = datetime.now()
def on_batch_end(self, epoch, logs={}):
run_end = datetime.now()
run_duration = run_end - self.run_timestamp
run_in_sec = run_duration.total_seconds() # / (60 * 60)
# print(' - current training time = %2.3fs/%2.3fs' % (run_in_sec, self.timeout_in_sec))
if self.timeout_in_sec != -1:
if run_in_sec >= self.timeout_in_sec:
print(
" - timeout: training time = %2.3fs/%2.3fs"
% (run_in_sec, self.timeout_in_sec)
)
# print('TimeoutRuntime: %2.3fs, Maxtime: %2.3fs' % (run_in_sec, self.timeout_in_sec))
self.model.stop_training = True
# if self.validation_data is not None:
# x, y = self.validation_data[0], self.validation_data[1]
# loss, acc = self.model.evaluate(x,y)
# #print(self.model.history.keys())
| 1,364 | 40.363636 | 102 | py |
deephyper | deephyper-master/deephyper/keras/callbacks/csv_extended_logger.py | import collections
import io
import time
import csv
import numpy as np
import six
import tensorflow as tf
from tensorflow.python.lib.io import file_io
from tensorflow.python.util.compat import collections_abc
class CSVExtendedLogger(tf.keras.callbacks.Callback):
"""Callback that streams epoch results to a csv file.
Supports all values that can be represented as a string,
including 1D iterables such as np.ndarray.
Example:
.. code-block:: python
csv_logger = CSVLogger('training.log')
model.fit(X_train, Y_train, callbacks=[csv_logger])
Args:
filename: filename of the csv file, e.g. 'run/log.csv'.
separator: string used to separate elements in the csv file.
append: True: append if file exists (useful for continuing
training). False: overwrite existing file,
"""
def __init__(self, filename, separator=",", append=False):
self.sep = separator
self.filename = filename
self.append = append
self.writer = None
self.keys = None
self.append_header = True
if six.PY2:
self.file_flags = "b"
self._open_args = {}
else:
self.file_flags = ""
self._open_args = {"newline": "\n"}
self.timestamp = None
super(CSVExtendedLogger, self).__init__()
def on_train_begin(self, logs=None):
if self.append:
if file_io.file_exists(self.filename):
with open(self.filename, "r" + self.file_flags) as f:
self.append_header = not bool(len(f.readline()))
mode = "a"
else:
mode = "w"
self.csv_file = io.open(
self.filename, mode + self.file_flags, **self._open_args
)
def on_epoch_begin(self, epoch, logs=None):
self.timestamp = time.time()
def on_epoch_end(self, epoch, logs=None):
timestamp = time.time()
duration = timestamp - self.timestamp # duration of curent epoch
logs = logs or {}
def handle_value(k):
is_zero_dim_ndarray = isinstance(k, np.ndarray) and k.ndim == 0
if isinstance(k, six.string_types):
return k
elif isinstance(k, collections_abc.Iterable) and not is_zero_dim_ndarray:
return '"[%s]"' % (", ".join(map(str, k)))
else:
return k
if self.keys is None:
self.keys = sorted(logs.keys())
if self.model.stop_training:
# We set NA so that csv parsers do not fail for this last epoch.
logs = {k: logs[k] if k in logs else (k, "NA") for k in self.keys}
if not self.writer:
class CustomDialect(csv.excel):
delimiter = self.sep
fieldnames = ["epoch", "timestamp", "duration"] + self.keys
if six.PY2:
fieldnames = [f"{x}" for x in fieldnames]
self.writer = csv.DictWriter(
self.csv_file, fieldnames=fieldnames, dialect=CustomDialect
)
if self.append_header:
self.writer.writeheader()
row_dict = collections.OrderedDict(
{"epoch": epoch, "timestamp": timestamp, "duration": duration}
)
row_dict.update((key, handle_value(logs[key])) for key in self.keys)
self.writer.writerow(row_dict)
self.csv_file.flush()
def on_train_end(self, logs=None):
self.csv_file.close()
self.writer = None
| 3,539 | 30.891892 | 85 | py |
deephyper | deephyper-master/deephyper/keras/callbacks/__init__.py | from deephyper.keras.callbacks.utils import import_callback
from deephyper.keras.callbacks.stop_if_unfeasible import StopIfUnfeasible
from deephyper.keras.callbacks.csv_extended_logger import CSVExtendedLogger
from deephyper.keras.callbacks.time_stopping import TimeStopping
from deephyper.keras.callbacks.learning_rate_warmup import (
LearningRateScheduleCallback,
LearningRateWarmupCallback,
)
__all__ = [
"import_callback",
"StopIfUnfeasible",
"CSVExtendedLogger",
"TimeStopping",
"LearningRateScheduleCallback",
"LearningRateWarmupCallback",
]
| 581 | 31.333333 | 75 | py |
deephyper | deephyper-master/deephyper/keras/callbacks/time_stopping.py | """Callback that stops training when a specified amount of time has passed.
source: https://github.com/tensorflow/addons/blob/master/tensorflow_addons/callbacks/time_stopping.py
"""
import datetime
import time
import tensorflow as tf
class TimeStopping(tf.keras.callbacks.Callback):
"""Stop training when a specified amount of time has passed.
Args:
seconds: maximum amount of time before stopping.
Defaults to 86400 (1 day).
verbose: verbosity mode. Defaults to 0.
"""
def __init__(self, seconds: int = 86400, verbose: int = 0):
super().__init__()
self.seconds = seconds
self.verbose = verbose
self.stopped_epoch = None
def on_train_begin(self, logs=None):
self.stopping_time = time.time() + self.seconds
def on_epoch_end(self, epoch, logs={}):
if time.time() >= self.stopping_time:
self.model.stop_training = True
self.stopped_epoch = epoch
def on_train_end(self, logs=None):
if self.stopped_epoch is not None and self.verbose > 0:
formatted_time = datetime.timedelta(seconds=self.seconds)
msg = "Timed stopping at epoch {} after training for {}".format(
self.stopped_epoch + 1, formatted_time
)
print(msg)
def get_config(self):
config = {"seconds": self.seconds, "verbose": self.verbose}
base_config = super().get_config()
return {**base_config, **config}
| 1,499 | 30.25 | 101 | py |
deephyper | deephyper-master/deephyper/keras/layers/_mpnn.py | import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow.keras import activations
from tensorflow.keras.layers import Dense
class SparseMPNN(tf.keras.layers.Layer):
"""Message passing cell.
Args:
state_dim (int): number of output channels.
T (int): number of message passing repetition.
attn_heads (int): number of attention heads.
attn_method (str): type of attention methods.
aggr_method (str): type of aggregation methods.
activation (str): type of activation functions.
update_method (str): type of update functions.
"""
def __init__(
self,
state_dim,
T,
aggr_method,
attn_method,
update_method,
attn_head,
activation,
):
super(SparseMPNN, self).__init__(self)
self.state_dim = state_dim
self.T = T
self.activation = activations.get(activation)
self.aggr_method = aggr_method
self.attn_method = attn_method
self.attn_head = attn_head
self.update_method = update_method
def build(self, input_shape):
self.embed = tf.keras.layers.Dense(self.state_dim, activation=self.activation)
self.MP = MessagePassing(
self.state_dim,
self.aggr_method,
self.activation,
self.attn_method,
self.attn_head,
self.update_method,
)
self.built = True
def call(self, inputs, **kwargs):
"""Apply the layer on input tensors.
Args:
inputs (list):
X (tensor): node feature tensor (batch size * # nodes * # node features)
A (tensor): edge pair tensor (batch size * # edges * 2), one is source ID, one is target ID
E (tensor): edge feature tensor (batch size * # edges * # edge features)
mask (tensor): node mask tensor to mask out non-existent nodes (batch size * # nodes)
degree (tensor): node degree tensor for GCN attention (batch size * # edges)
Returns:
X (tensor): results after several repetitions of edge network, attention, aggregation and update function (batch size * # nodes * # node features)
"""
# the input contains a list of five tensors
X, A, E, mask, degree = inputs
# edge pair needs to be in the int format
A = tf.cast(A, tf.int32)
# this is a limitation of MPNN in general, the node feature is mapped to (batch size * # nodes * # node
# features)
X = self.embed(X)
# run T times message passing
for _ in range(self.T):
X = self.MP([X, A, E, mask, degree])
return X
class MessagePassing(tf.keras.layers.Layer):
"""Message passing layer.
Args:
state_dim (int): number of output channels.
attn_heads (int): number of attention heads.
attn_method (str): type of attention methods.
aggr_method (str): type of aggregation methods.
activation (str): type of activation functions.
update_method (str): type of update functions.
"""
def __init__(
self, state_dim, aggr_method, activation, attn_method, attn_head, update_method
):
super(MessagePassing, self).__init__(self)
self.state_dim = state_dim
self.aggr_method = aggr_method
self.activation = activation
self.attn_method = attn_method
self.attn_head = attn_head
self.update_method = update_method
def build(self, input_shape):
self.message_passer = MessagePasserNNM(
self.state_dim,
self.attn_head,
self.attn_method,
self.aggr_method,
self.activation,
)
if self.update_method == "gru":
self.update_functions = UpdateFuncGRU(self.state_dim)
elif self.update_method == "mlp":
self.update_functions = UpdateFuncMLP(self.state_dim, self.activation)
self.built = True
def call(self, inputs, **kwargs):
"""Apply the layer on input tensors.
Args:
inputs (list):
X (tensor): node feature tensor (batch size * # nodes * state dimension)
A (tensor): edge pair tensor (batch size * # edges * 2), one is source ID, one is target ID
E (tensor): edge feature tensor (batch size * # edges * # edge features)
mask (tensor): node mask tensor to mask out non-existent nodes (batch size * # nodes)
degree (tensor): node degree tensor for GCN attention (batch size * # edges)
Returns:
updated_nodes (tensor): results after edge network, attention, aggregation and update function (batch size * # nodes * state dimension)
"""
# the input contains a list of five tensors
X, A, E, mask, degree = inputs
# use the message passing to generate aggregated results
# agg_m (batch size * # nodes * state dimension)
agg_m = self.message_passer([X, A, E, degree])
# expand the mask to (batch size * # nodes * state dimension)
mask = tf.tile(mask[..., None], [1, 1, self.state_dim])
# use the mask to screen out non-existent nodes
# agg_m (batch size * # nodes * state dimension)
agg_m = tf.multiply(agg_m, mask)
# update function using the old node feature X and new aggregated node feature agg_m
# updated_nodes (batch size * # nodes * state dimension)
updated_nodes = self.update_functions([X, agg_m])
# use the mask to screen out non-existent nodes
# updated_nodes (batch size * # nodes * state dimension)
updated_nodes = tf.multiply(updated_nodes, mask)
return updated_nodes
class MessagePasserNNM(tf.keras.layers.Layer):
"""Message passing kernel.
Args:
state_dim (int): number of output channels.
attn_heads (int): number of attention heads.
attn_method (str): type of attention methods.
aggr_method (str): type of aggregation methods.
activation (str): type of activation functions.
"""
def __init__(self, state_dim, attn_heads, attn_method, aggr_method, activation):
super(MessagePasserNNM, self).__init__()
self.state_dim = state_dim
self.attn_heads = attn_heads
self.attn_method = attn_method
self.aggr_method = aggr_method
self.activation = activation
def build(self, input_shape):
self.nn1 = tf.keras.layers.Dense(units=32, activation=tf.nn.relu)
self.nn2 = tf.keras.layers.Dense(units=32, activation=tf.nn.relu)
self.nn3 = tf.keras.layers.Dense(
units=self.attn_heads * self.state_dim * self.state_dim,
activation=tf.nn.relu,
)
if self.attn_method == "gat":
self.attn_func = AttentionGAT(self.state_dim, self.attn_heads)
elif self.attn_method == "sym-gat":
self.attn_func = AttentionSymGAT(self.state_dim, self.attn_heads)
elif self.attn_method == "cos":
self.attn_func = AttentionCOS(self.state_dim, self.attn_heads)
elif self.attn_method == "linear":
self.attn_func = AttentionLinear(self.state_dim, self.attn_heads)
elif self.attn_method == "gen-linear":
self.attn_func = AttentionGenLinear(self.state_dim, self.attn_heads)
elif self.attn_method == "const":
self.attn_func = AttentionConst(self.state_dim, self.attn_heads)
elif self.attn_method == "gcn":
self.attn_func = AttentionGCN(self.state_dim, self.attn_heads)
self.bias = self.add_weight(
name="attn_bias", shape=[self.state_dim], initializer="zeros"
)
self.built = True
def call(self, inputs, **kwargs):
"""Apply the layer on input tensors.
Args:
inputs (list):
X (tensor): node feature tensor (batch size * # nodes * state dimension)
A (tensor): edge pair tensor (batch size * # edges * 2), one is source ID, one is target ID
E (tensor): edge feature tensor (batch size * # edges * # edge features)
degree (tensor): node degree tensor for GCN attention (batch size * # edges)
Returns:
output (tensor): results after edge network, attention and aggregation (batch size * # nodes * state dimension)
"""
# Edge network to transform edge information to message weight
# the input contains a list of four tensors
X, A, E, degree = inputs
# N is the number of nodes (scalar)
N = K.int_shape(X)[1]
# extract target and source IDs from the edge pair
# targets (batch size * # edges)
# sources (batch size * # edges)
targets, sources = A[..., -2], A[..., -1]
# the first edge network layer that maps edge features to a weight tensor W
# W (batch size * # edges * 128)
W = self.nn1(E)
# W (batch size * # edges * 128)
W = self.nn2(W)
# W (batch size * # edges * state dimension ** 2)
W = self.nn3(W)
# reshape W to (batch size * # edges * #attention heads * state dimension * state dimension)
W = tf.reshape(
W, [-1, tf.shape(E)[1], self.attn_heads, self.state_dim, self.state_dim]
)
# expand the dimension of node features to
# (batch size * # nodes * state dimension * #attention heads)
X = tf.tile(X[..., None], [1, 1, 1, self.attn_heads])
# transpose the node features to
# (batch size * # nodes * #attention heads * node features)
X = tf.transpose(X, [0, 1, 3, 2])
# attention added to the message weight
# attn_coef (batch size * # edges * #attention heads * state dimension)
attn_coef = self.attn_func([X, N, targets, sources, degree])
# gather source node features
# The batch_dims argument lets you gather different items from each element of a batch.
# Using batch_dims=1 is equivalent to having an outer loop over the first axis of params and indices:
# Here is an example from https://www.tensorflow.org/api_docs/python/tf/gather
# params = tf.constant([
# [0, 0, 1, 0, 2],
# [3, 0, 0, 0, 4],
# [0, 5, 0, 6, 0]])
# indices = tf.constant([
# [2, 4],
# [0, 4],
# [1, 3]])
# tf.gather(params, indices, axis=1, batch_dims=1).numpy()
# array([[1, 2],
# [3, 4],
# [5, 6]], dtype=int32)
# messages (batch size * # edges * #attention heads * state dimension)
messages = tf.gather(X, sources, batch_dims=1, axis=1)
# messages (batch size * # edges * #attention heads * state dimension * 1)
messages = messages[..., None]
# W (batch size * # edges * #attention heads * state dimension * state dimension)
# messages (batch size * # edges * #attention heads * state dimension * 1)
# --> messages (batch size * # edges * #attention heads * state dimension * 1)
messages = tf.matmul(W, messages)
# messages (batch size * # edges * #attention heads * state dimension)
messages = messages[..., 0]
# attn_coef (batch size * # edges * # attention heads * state dimension)
# messages (batch size * # edges * # attention heads * state dimension)
# --> output (batch size * # edges * # attention heads * state dimension)
output = attn_coef * messages
# batch size
num_rows = tf.shape(targets)[0]
# [0, ..., batch size] (batch size)
rows_idx = tf.range(num_rows)
# N is # nodes, add this to distinguish each batch
segment_ids_per_row = targets + N * tf.expand_dims(rows_idx, axis=1)
# Aggregation to summarize neighboring node messages
# output (batch size * # nodes * # attention heads * state dimension)
if self.aggr_method == "max":
output = tf.math.unsorted_segment_max(
output, segment_ids_per_row, N * num_rows
)
elif self.aggr_method == "mean":
output = tf.math.unsorted_segment_mean(
output, segment_ids_per_row, N * num_rows
)
elif self.aggr_method == "sum":
output = tf.math.unsorted_segment_sum(
output, segment_ids_per_row, N * num_rows
)
# output the mean of all attention heads
# output (batch size * # nodes * # attention heads * state dimension)
output = tf.reshape(output, [-1, N, self.attn_heads, self.state_dim])
# output (batch size * # nodes * state dimension)
output = tf.reduce_mean(output, axis=-2)
# add bias, output (batch size * # nodes * state dimension)
output = K.bias_add(output, self.bias)
return output
class UpdateFuncGRU(tf.keras.layers.Layer):
"""Gated recurrent unit update function.
Check details here https://arxiv.org/abs/1412.3555
Args:
state_dim (int): number of output channels.
"""
def __init__(self, state_dim):
super(UpdateFuncGRU, self).__init__()
self.state_dim = state_dim
def build(self, input_shape):
self.concat_layer = tf.keras.layers.Concatenate(axis=1)
self.GRU = tf.keras.layers.GRU(self.state_dim)
self.built = True
def call(self, inputs, **kwargs):
"""Apply the layer on input tensors.
Args:
inputs (list):
old_state (tensor): node hidden feature tensor (batch size * # nodes * state dimension)
agg_messages (tensor): node hidden feature tensor (batch size * # nodes * state dimension)
Returns:
activation (tensor): activated tensor from update function (batch size * # nodes * state dimension)
"""
# Remember node dim
# old_state (batch size * # nodes * state dimension)
# agg_messages (batch size * # nodes * state dimension)
old_state, agg_messages = inputs
# B is batch size
# N is # nodes
# F is # node features = state dimension
B, N, F = K.int_shape(old_state)
# similar to B, N, F
B1, N1, F1 = K.int_shape(agg_messages)
# reshape so GRU can be applied, concat so old_state and messages are in sequence
# old_state (batch size * # nodes * 1 * state dimension)
old_state = tf.reshape(old_state, [-1, 1, F])
# agg_messages (batch size * # nodes * 1 * state dimension)
agg_messages = tf.reshape(agg_messages, [-1, 1, F1])
# agg_messages (batch size * # nodes * 2 * state dimension)
concat = self.concat_layer([old_state, agg_messages])
# Apply GRU and then reshape so it can be returned
# activation (batch size * # nodes * state dimension)
activation = self.GRU(concat)
activation = tf.reshape(activation, [-1, N, F])
return activation
class UpdateFuncMLP(tf.keras.layers.Layer):
"""Multi-layer perceptron update function.
Args:
state_dim (int): number of output channels.
activation (str): the type of activation functions.
"""
def __init__(self, state_dim, activation):
super(UpdateFuncMLP, self).__init__()
self.state_dim = state_dim
self.activation = activation
def build(self, input_shape):
self.concat_layer = tf.keras.layers.Concatenate(axis=-1)
self.dense = tf.keras.layers.Dense(
self.state_dim, activation=self.activation, kernel_initializer="zeros"
)
def call(self, inputs, **kwargs):
"""Apply the layer on input tensors.
Args:
inputs (list):
old_state (tensor): node hidden feature tensor
agg_messages (tensor): node hidden feature tensor
Returns:
activation (tensor): activated tensor from update function (
"""
old_state, agg_messages = inputs
concat = self.concat_layer([old_state, agg_messages])
activation = self.dense(concat)
return activation
class AttentionGAT(tf.keras.layers.Layer):
"""GAT Attention. Check details here https://arxiv.org/abs/1710.10903
The attention coefficient between node :math:`i` and :math:`j` is calculated as:
.. math::
\\text{LeakyReLU}(\\textbf{a}(\\textbf{Wh}_i||\\textbf{Wh}_j))
where :math:`\\textbf{a}` is a trainable vector, and :math:`||` represents concatenation.
Args:
state_dim (int): number of output channels.
attn_heads (int): number of attention heads.
"""
def __init__(self, state_dim, attn_heads):
super(AttentionGAT, self).__init__()
self.state_dim = state_dim
self.attn_heads = attn_heads
def build(self, input_shape):
self.attn_kernel_self = self.add_weight(
name="attn_kernel_self",
shape=[self.state_dim, self.attn_heads, 1],
initializer="glorot_uniform",
)
self.attn_kernel_adjc = self.add_weight(
name="attn_kernel_adjc",
shape=[self.state_dim, self.attn_heads, 1],
initializer="glorot_uniform",
)
self.built = True
def call(self, inputs, **kwargs):
"""Apply the layer on input tensors.
Args:
inputs (list):
X (tensor): node feature tensor
N (int): number of nodes
targets (tensor): target node index tensor
sources (tensor): source node index tensor
degree (tensor): node degree sqrt tensor (for GCN attention)
Returns:
attn_coef (tensor): attention coefficient tensor
"""
X, N, targets, sources, _ = inputs
attn_kernel_self = tf.transpose(self.attn_kernel_self, (2, 1, 0))
attn_kernel_adjc = tf.transpose(self.attn_kernel_adjc, (2, 1, 0))
attn_for_self = tf.reduce_sum(X * attn_kernel_self[None, ...], -1)
attn_for_self = tf.gather(attn_for_self, targets, batch_dims=1)
attn_for_adjc = tf.reduce_sum(X * attn_kernel_adjc[None, ...], -1)
attn_for_adjc = tf.gather(attn_for_adjc, sources, batch_dims=1)
attn_coef = attn_for_self + attn_for_adjc
attn_coef = tf.nn.leaky_relu(attn_coef, alpha=0.2)
attn_coef = tf.exp(
attn_coef
- tf.gather(tf.math.unsorted_segment_max(attn_coef, targets, N), targets)
)
attn_coef /= tf.gather(
tf.math.unsorted_segment_max(attn_coef, targets, N) + 1e-9, targets
)
attn_coef = tf.nn.dropout(attn_coef, 0.5)
attn_coef = attn_coef[..., None]
return attn_coef
class AttentionSymGAT(tf.keras.layers.Layer):
"""GAT Symmetry Attention.
The attention coefficient between node :math:`i` and :math:`j` is calculated as:
.. math::
\\alpha_{ij} + \\alpha_{ij}
based on GAT.
Args:
state_dim (int): number of output channels.
attn_heads (int): number of attention heads.
"""
def __init__(self, state_dim, attn_heads):
super(AttentionSymGAT, self).__init__()
self.state_dim = state_dim
self.attn_heads = attn_heads
def build(self, input_shape):
self.attn_kernel_self = self.add_weight(
name="attn_kernel_self",
shape=[self.state_dim, self.attn_heads, 1],
initializer="glorot_uniform",
)
self.attn_kernel_adjc = self.add_weight(
name="attn_kernel_adjc",
shape=[self.state_dim, self.attn_heads, 1],
initializer="glorot_uniform",
)
self.built = True
def call(self, inputs, **kwargs):
"""Apply the layer on input tensors.
Args:
inputs (list):
X (tensor): node feature tensor
N (int): number of nodes
targets (tensor): target node index tensor
sources (tensor): source node index tensor
degree (tensor): node degree sqrt tensor (for GCN attention)
Returns:
attn_coef (tensor): attention coefficient tensor
"""
X, N, targets, sources, _ = inputs
attn_kernel_self = tf.transpose(self.attn_kernel_self, (2, 1, 0))
attn_kernel_adjc = tf.transpose(self.attn_kernel_adjc, (2, 1, 0))
attn_for_self = tf.reduce_sum(X * attn_kernel_self[None, ...], -1)
attn_for_self = tf.gather(attn_for_self, targets, batch_dims=1)
attn_for_adjc = tf.reduce_sum(X * attn_kernel_adjc[None, ...], -1)
attn_for_adjc = tf.gather(attn_for_adjc, sources, batch_dims=1)
attn_for_self_reverse = tf.gather(attn_for_self, sources, batch_dims=1)
attn_for_adjc_reverse = tf.gather(attn_for_self, targets, batch_dims=1)
attn_coef = (
attn_for_self
+ attn_for_adjc
+ attn_for_self_reverse
+ attn_for_adjc_reverse
)
attn_coef = tf.nn.leaky_relu(attn_coef, alpha=0.2)
attn_coef = tf.exp(
attn_coef
- tf.gather(tf.math.unsorted_segment_max(attn_coef, targets, N), targets)
)
attn_coef /= tf.gather(
tf.math.unsorted_segment_max(attn_coef, targets, N) + 1e-9, targets
)
attn_coef = tf.nn.dropout(attn_coef, 0.5)
attn_coef = attn_coef[..., None]
return attn_coef
class AttentionCOS(tf.keras.layers.Layer):
"""COS Attention.
Check details here https://arxiv.org/abs/1803.07294
The attention coefficient between node $i$ and $j$ is calculated as:
.. math::
\\textbf{a}(\\textbf{Wh}_i || \\textbf{Wh}_j)
where :math:`\\textbf{a}` is a trainable vector.
Args:
state_dim (int): number of output channels.
attn_heads (int): number of attention heads.
"""
def __init__(self, state_dim, attn_heads):
super(AttentionCOS, self).__init__()
self.state_dim = state_dim
self.attn_heads = attn_heads
def build(self, input_shape):
self.attn_kernel_self = self.add_weight(
name="attn_kernel_self",
shape=[self.state_dim, self.attn_heads, 1],
initializer="glorot_uniform",
)
self.attn_kernel_adjc = self.add_weight(
name="attn_kernel_adjc",
shape=[self.state_dim, self.attn_heads, 1],
initializer="glorot_uniform",
)
self.built = True
def call(self, inputs, **kwargs):
"""Apply the layer on input tensors.
Args:
inputs (list):
X (tensor): node feature tensor
N (int): number of nodes
targets (tensor): target node index tensor
sources (tensor): source node index tensor
degree (tensor): node degree sqrt tensor (for GCN attention)
Returns:
attn_coef (tensor): attention coefficient tensor (batch, E, H, 1)
"""
X, N, targets, sources, _ = inputs
attn_kernel_self = tf.transpose(self.attn_kernel_self, (2, 1, 0))
attn_kernel_adjc = tf.transpose(self.attn_kernel_adjc, (2, 1, 0))
attn_for_self = tf.reduce_sum(X * attn_kernel_self[None, ...], -1)
attn_for_self = tf.gather(attn_for_self, targets, batch_dims=1)
attn_for_adjc = tf.reduce_sum(X * attn_kernel_adjc[None, ...], -1)
attn_for_adjc = tf.gather(attn_for_adjc, sources, batch_dims=1)
attn_coef = tf.multiply(attn_for_self, attn_for_adjc)
attn_coef = tf.nn.leaky_relu(attn_coef, alpha=0.2)
attn_coef = tf.exp(
attn_coef
- tf.gather(tf.math.unsorted_segment_max(attn_coef, targets, N), targets)
)
attn_coef /= tf.gather(
tf.math.unsorted_segment_max(attn_coef, targets, N) + 1e-9, targets
)
attn_coef = tf.nn.dropout(attn_coef, 0.5)
attn_coef = attn_coef[..., None]
return attn_coef
class AttentionLinear(tf.keras.layers.Layer):
"""Linear Attention.
The attention coefficient between node :math:`i` and :math:`j` is calculated as:
.. math::
\\text{tanh} (\\textbf{a}_l\\textbf{Wh}_i + \\textbf{a}_r\\textbf{Wh}_j)
where :math:`\\textbf{a}_l` and :math:`\\textbf{a}_r` are trainable vectors.
Args:
state_dim (int): number of output channels.
attn_heads (int): number of attention heads.
"""
def __init__(self, state_dim, attn_heads):
super(AttentionLinear, self).__init__()
self.state_dim = state_dim
self.attn_heads = attn_heads
def build(self, input_shape):
self.attn_kernel_adjc = self.add_weight(
name="attn_kernel_adjc",
shape=[self.state_dim, self.attn_heads, 1],
initializer="glorot_uniform",
)
self.built = True
def call(self, inputs, **kwargs):
"""Apply the layer on input tensors.
Args:
inputs (list):
X (tensor): node feature tensor
N (int): number of nodes
targets (tensor): target node index tensor
sources (tensor): source node index tensor
degree (tensor): node degree sqrt tensor (for GCN attention)
Returns:
attn_coef (tensor): attention coefficient tensor
"""
X, N, targets, sources, _ = inputs
attn_kernel_adjc = tf.transpose(self.attn_kernel_adjc, (2, 1, 0))
attn_for_adjc = tf.reduce_sum(X * attn_kernel_adjc[None, ...], -1)
attn_for_adjc = tf.gather(attn_for_adjc, sources, batch_dims=1)
attn_coef = attn_for_adjc
attn_coef = tf.nn.tanh(attn_coef)
attn_coef = tf.exp(
attn_coef
- tf.gather(tf.math.unsorted_segment_max(attn_coef, targets, N), targets)
)
attn_coef /= tf.gather(
tf.math.unsorted_segment_max(attn_coef, targets, N) + 1e-9, targets
)
attn_coef = tf.nn.dropout(attn_coef, 0.5)
attn_coef = attn_coef[..., None]
return attn_coef
class AttentionGenLinear(tf.keras.layers.Layer):
"""Generalized Linear Attention.
Check details here https://arxiv.org/abs/1802.00910
The attention coefficient between node :math:`i` and :math:`j` is calculated as:
.. math::
\\textbf{W}_G \\text{tanh} (\\textbf{Wh}_i + \\textbf{Wh}_j)
where :math:`\\textbf{W}_G` is a trainable matrix.
Args:
state_dim (int): number of output channels.
attn_heads (int): number of attention heads.
"""
def __init__(self, state_dim, attn_heads):
super(AttentionGenLinear, self).__init__()
self.state_dim = state_dim
self.attn_heads = attn_heads
def build(self, input_shape):
self.attn_kernel_self = self.add_weight(
name="attn_kernel_self",
shape=[self.state_dim, self.attn_heads, 1],
initializer="glorot_uniform",
)
self.attn_kernel_adjc = self.add_weight(
name="attn_kernel_adjc",
shape=[self.state_dim, self.attn_heads, 1],
initializer="glorot_uniform",
)
self.gen_nn = tf.keras.layers.Dense(
units=self.attn_heads, kernel_initializer="glorot_uniform", use_bias=False
)
self.built = True
def call(self, inputs, **kwargs):
"""Apply the layer on input tensors.
Args:
inputs (list):
X (tensor): node feature tensor
N (int): number of nodes
targets (tensor): target node index tensor
sources (tensor): source node index tensor
degree (tensor): node degree sqrt tensor (for GCN attention)
Returns:
attn_coef (tensor): attention coefficient tensor
"""
X, N, targets, sources, _ = inputs
attn_kernel_self = tf.transpose(self.attn_kernel_self, (2, 1, 0))
attn_kernel_adjc = tf.transpose(self.attn_kernel_adjc, (2, 1, 0))
attn_for_self = tf.reduce_sum(X * attn_kernel_self[None, ...], -1)
attn_for_self = tf.gather(attn_for_self, targets, batch_dims=1)
attn_for_adjc = tf.reduce_sum(X * attn_kernel_adjc[None, ...], -1)
attn_for_adjc = tf.gather(attn_for_adjc, sources, batch_dims=1)
attn_coef = attn_for_self + attn_for_adjc
attn_coef = tf.nn.tanh(attn_coef)
attn_coef = self.gen_nn(attn_coef)
attn_coef = tf.exp(
attn_coef
- tf.gather(tf.math.unsorted_segment_max(attn_coef, targets, N), targets)
)
attn_coef /= tf.gather(
tf.math.unsorted_segment_max(attn_coef, targets, N) + 1e-9, targets
)
attn_coef = tf.nn.dropout(attn_coef, 0.5)
attn_coef = attn_coef[..., None]
return attn_coef
class AttentionGCN(tf.keras.layers.Layer):
"""GCN Attention.
The attention coefficient between node :math:`i` and :math:`j` is calculated as:
.. math::
\\frac{1}{\sqrt{|\mathcal{N}(i)||\mathcal{N}(j)|}}
where :math:`\mathcal{N}(i)` is the number of neighboring nodes of node :math:`i`.
Args:
state_dim (int): number of output channels.
attn_heads (int): number of attention heads.
"""
def __init__(self, state_dim, attn_heads):
super(AttentionGCN, self).__init__()
self.state_dim = state_dim
self.attn_heads = attn_heads
def call(self, inputs, **kwargs):
"""Apply the layer on input tensors.
Args:
inputs (list):
X (tensor): node feature tensor
N (int): number of nodes
targets (tensor): target node index tensor
sources (tensor): source node index tensor
degree (tensor): node degree sqrt tensor (for GCN attention)
Returns:
attn_coef (tensor): attention coefficient tensor
"""
_, _, _, _, degree = inputs
attn_coef = degree[..., None, None]
attn_coef = tf.tile(attn_coef, [1, 1, self.attn_heads, 1])
return attn_coef
class AttentionConst(tf.keras.layers.Layer):
"""Constant Attention.
The attention coefficient between node :math:`i` and :math:`j` is calculated as:
.. math::
\\alpha_{ij} = 1
Args:
state_dim (int): number of output channels.
attn_heads (int): number of attention heads.
"""
def __init__(self, state_dim, attn_heads):
super(AttentionConst, self).__init__()
self.state_dim = state_dim
self.attn_heads = attn_heads
def call(self, inputs, **kwargs):
"""Apply the layer on input tensors.
Args:
inputs (list):
X (tensor): node feature tensor
N (int): number of nodes
targets (tensor): target node index tensor
sources (tensor): source node index tensor
degree (tensor): node degree sqrt tensor (for GCN attention)
Returns:
attn_coef (tensor): attention coefficient tensor
"""
_, _, targets, _, degree = inputs
attn_coef = tf.ones(
(tf.shape(targets)[0], tf.shape(targets)[1], self.attn_heads, 1)
)
return attn_coef
class GlobalAttentionPool(tf.keras.layers.Layer):
"""Global Attention Pool.
A gated attention global pooling layer as presented by [Li et al. (2017)](https://arxiv.org/abs/1511.05493). Details can be seen from https://github.com/danielegrattarola/spektral
Args:
state_dim (int): number of output channels.
"""
def __init__(self, state_dim, **kwargs):
super(GlobalAttentionPool, self).__init__()
self.state_dim = state_dim
self.kwargs = kwargs
def __str__(self):
return "GlobalAttentionPool"
def build(self, input_shape):
self.features_layer = Dense(self.state_dim, name="features_layer")
self.attention_layer = Dense(
self.state_dim, name="attention_layer", activation="sigmoid"
)
self.built = True
def call(self, inputs, **kwargs):
"""Apply the layer on input tensors.
Args:
inputs (tensor): the node feature tensor
Returns:
GlobalAttentionPool tensor (tensor)
"""
inputs_linear = self.features_layer(inputs)
attn = self.attention_layer(inputs)
masked_inputs = inputs_linear * attn
output = K.sum(masked_inputs, axis=-2, keepdims=False)
return output
class GlobalAttentionSumPool(tf.keras.layers.Layer):
"""Global Attention Summation Pool.
Pools a graph by learning attention coefficients to sum node features.
Details can be seen from https://github.com/danielegrattarola/spektral
"""
def __init__(self, **kwargs):
super(GlobalAttentionSumPool, self).__init__()
self.kwargs = kwargs
def __str__(self):
return "GlobalAttentionSumPool"
def build(self, input_shape):
F = int(input_shape[-1])
# Attention kernels
self.attn_kernel = self.add_weight(
shape=(F, 1), initializer="glorot_uniform", name="attn_kernel"
)
self.built = True
def call(self, inputs, **kwargs):
"""Apply the layer on input tensors.
Args:
inputs (tensor): the node feature tensor
Returns:
GlobalAttentionSumPool tensor (tensor)
"""
X = inputs
attn_coeff = K.dot(X, self.attn_kernel)
attn_coeff = K.squeeze(attn_coeff, -1)
attn_coeff = K.softmax(attn_coeff)
output = K.batch_dot(attn_coeff, X)
return output
class GlobalAvgPool(tf.keras.layers.Layer):
"""Global Average Pool.
Takes the average over all the nodes or features.
Details can be seen from https://github.com/danielegrattarola/spektral
Args:
axis (int): the axis to take average.
"""
def __init__(self, axis=-2, **kwargs):
super(GlobalAvgPool, self).__init__()
self.axis = axis
self.kwargs = kwargs
def __str__(self):
return "GlobalAvgPool"
def call(self, inputs, **kwargs):
"""Apply the layer on input tensors.
Args:
inputs (tensor): the node feature tensor
Returns:
GlobalAvgPool tensor (tensor)
"""
return tf.reduce_mean(inputs, axis=self.axis)
class GlobalMaxPool(tf.keras.layers.Layer):
"""Global Max Pool.
Takes the max value over all the nodes or features.
Details can be seen from https://github.com/danielegrattarola/spektral
Args:
axis (int): the axis to take the max value.
"""
def __init__(self, axis=-2, **kwargs):
super(GlobalMaxPool, self).__init__()
self.axis = axis
self.kwargs = kwargs
def __str__(self):
return "GlobalMaxPool"
def call(self, inputs, **kwargs):
"""Apply the layer on input tensors.
Args:
inputs (tensor): the node feature tensor
Returns:
GlobalMaxPool tensor (tensor)
"""
return tf.reduce_max(inputs, axis=self.axis)
class GlobalSumPool(tf.keras.layers.Layer):
"""Global Summation Pool.
Takes the summation over all the nodes or features.
Details can be seen from https://github.com/danielegrattarola/spektral
Args:
axis (int): the axis to take summation.
"""
def __init__(self, axis=-2, **kwargs):
super(GlobalSumPool, self).__init__()
self.axis = axis
self.kwargs = kwargs
def __str__(self):
return "GlobalSumPool"
def call(self, inputs, **kwargs):
"""Apply the layer on input tensors.
Args:
inputs (tensor): the node feature tensor
Returns:
GlobalSumPool tensor (tensor)
"""
return tf.reduce_sum(inputs, axis=self.axis)
| 36,142 | 35.471241 | 183 | py |
deephyper | deephyper-master/deephyper/keras/layers/__init__.py | from deephyper.keras.layers._mpnn import (
AttentionConst,
AttentionCOS,
AttentionGAT,
AttentionGCN,
AttentionGenLinear,
AttentionLinear,
AttentionSymGAT,
GlobalAttentionPool,
GlobalAttentionSumPool,
GlobalAvgPool,
GlobalMaxPool,
GlobalSumPool,
MessagePasserNNM,
MessagePassing,
SparseMPNN,
UpdateFuncGRU,
UpdateFuncMLP,
)
from deephyper.keras.layers._padding import Padding
__all__ = [
"AttentionConst",
"AttentionCOS",
"AttentionGAT",
"AttentionGCN",
"AttentionGenLinear",
"AttentionLinear",
"AttentionSymGAT",
"GlobalAttentionPool",
"GlobalAttentionSumPool",
"GlobalAvgPool",
"GlobalMaxPool",
"GlobalSumPool",
"MessagePasserNNM",
"MessagePassing",
"SparseMPNN",
"UpdateFuncGRU",
"UpdateFuncMLP",
]
# When loading models with: "model.load('file.h5', custom_objects=custom_objects)"
custom_objects = {"Padding": Padding}
| 960 | 20.840909 | 82 | py |
deephyper | deephyper-master/deephyper/keras/layers/_padding.py | import tensorflow as tf
class Padding(tf.keras.layers.Layer):
"""Multi-dimensions padding layer.
This operation pads a tensor according to the paddings you specify. paddings is an
integer tensor with shape [n-1, 2], where n is the rank of tensor. For each dimension
D of input, paddings[D, 0] indicates how many values to add before the contents of
tensor in that dimension, and paddings[D, 1] indicates how many values to add after
the contents of tensor in that dimension. The first dimension corresponding to the
batch size cannot be padded.
Args:
padding (list(list(int))): e.g. [[1, 1]]
mode (str): 'CONSTANT', 'REFLECT' or 'SYMMETRIC'
"""
def __init__(self, padding, mode="CONSTANT", constant_values=0, **kwargs):
super(Padding, self).__init__(**kwargs)
self.padding = [[0, 0]] + padding
self.mode = mode
self.constant_values = constant_values
def call(self, x, mask=None):
padding = tf.constant(self.padding)
return tf.pad(
tensor=x,
paddings=padding,
mode=self.mode,
constant_values=self.constant_values,
)
def compute_output_shape(self, input_shape):
return tf.TensorShape(
[
input_shape[i] + sum(self.padding[i])
if not input_shape[i] is None
else None
for i in range(len(input_shape))
]
)
def get_config(self):
config = {
"padding": self.padding[1:],
"mode": self.mode,
"constant_values": self.constant_values,
}
base_config = super(Padding, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| 1,788 | 32.12963 | 89 | py |
deephyper | deephyper-master/deephyper/sklearn/classifier/_autosklearn1.py | """
This module provides ``problem_autosklearn1`` and ``run_autosklearn`` for classification tasks.
"""
import warnings
from inspect import signature
import ConfigSpace as cs
from deephyper.problem import HpProblem
from sklearn.ensemble import AdaBoostClassifier, RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.svm import SVC
from xgboost import XGBClassifier
def minmaxstdscaler() -> Pipeline:
"""MinMax preprocesssing followed by Standard normalization.
Returns:
Pipeline: a pipeline with two steps ``[MinMaxScaler, StandardScaler]``.
"""
preprocessor = Pipeline(
[
("minmaxscaler", MinMaxScaler()),
("stdscaler", StandardScaler()),
]
)
return preprocessor
problem_autosklearn1 = HpProblem()
classifier = problem_autosklearn1.add_hyperparameter(
name="classifier",
value=[
"RandomForest",
"Logistic",
"AdaBoost",
"KNeighbors",
"MLP",
"SVC",
"XGBoost",
],
)
# n_estimators
n_estimators = problem_autosklearn1.add_hyperparameter(
name="n_estimators", value=(1, 2000, "log-uniform")
)
cond_n_estimators = cs.OrConjunction(
cs.EqualsCondition(n_estimators, classifier, "RandomForest"),
cs.EqualsCondition(n_estimators, classifier, "AdaBoost"),
)
problem_autosklearn1.add_condition(cond_n_estimators)
# max_depth
max_depth = problem_autosklearn1.add_hyperparameter(
name="max_depth", value=(2, 100, "log-uniform")
)
cond_max_depth = cs.EqualsCondition(max_depth, classifier, "RandomForest")
problem_autosklearn1.add_condition(cond_max_depth)
# n_neighbors
n_neighbors = problem_autosklearn1.add_hyperparameter(
name="n_neighbors", value=(1, 100)
)
cond_n_neighbors = cs.EqualsCondition(n_neighbors, classifier, "KNeighbors")
problem_autosklearn1.add_condition(cond_n_neighbors)
# alpha
alpha = problem_autosklearn1.add_hyperparameter(
name="alpha", value=(1e-5, 10.0, "log-uniform")
)
cond_alpha = cs.EqualsCondition(alpha, classifier, "MLP")
problem_autosklearn1.add_condition(cond_alpha)
# C
C = problem_autosklearn1.add_hyperparameter(name="C", value=(1e-5, 10.0, "log-uniform"))
cond_C = cs.OrConjunction(
cs.EqualsCondition(C, classifier, "Logistic"),
cs.EqualsCondition(C, classifier, "SVC"),
)
problem_autosklearn1.add_condition(cond_C)
# kernel
kernel = problem_autosklearn1.add_hyperparameter(
name="kernel", value=["linear", "poly", "rbf", "sigmoid"]
)
cond_kernel = cs.EqualsCondition(kernel, classifier, "SVC")
problem_autosklearn1.add_condition(cond_kernel)
# gamma
gamma = problem_autosklearn1.add_hyperparameter(
name="gamma", value=(1e-5, 10.0, "log-uniform")
)
cond_gamma = cs.OrConjunction(
cs.EqualsCondition(gamma, kernel, "rbf"),
cs.EqualsCondition(gamma, kernel, "poly"),
cs.EqualsCondition(gamma, kernel, "sigmoid"),
)
problem_autosklearn1.add_condition(cond_gamma)
# Mapping available classifiers
CLASSIFIERS = {
"RandomForest": RandomForestClassifier,
"Logistic": LogisticRegression,
"AdaBoost": AdaBoostClassifier,
"KNeighbors": KNeighborsClassifier,
"MLP": MLPClassifier,
"SVC": SVC,
"XGBoost": XGBClassifier,
}
def run_autosklearn1(config: dict, load_data: callable) -> float:
"""Run function which can be used for AutoML classification.
It has to be used with the ``deephyper.sklearn.classifier.problem_autosklearn1`` problem definition which corresponds to:
.. code-block::
Configuration space object:
Hyperparameters:
C, Type: UniformFloat, Range: [1e-05, 10.0], Default: 0.01, on log-scale
alpha, Type: UniformFloat, Range: [1e-05, 10.0], Default: 0.01, on log-scale
classifier, Type: Categorical, Choices: {RandomForest, Logistic, AdaBoost, KNeighbors, MLP, SVC, XGBoost}, Default: RandomForest
gamma, Type: UniformFloat, Range: [1e-05, 10.0], Default: 0.01, on log-scale
kernel, Type: Categorical, Choices: {linear, poly, rbf, sigmoid}, Default: linear
max_depth, Type: UniformInteger, Range: [2, 100], Default: 14, on log-scale
n_estimators, Type: UniformInteger, Range: [1, 2000], Default: 45, on log-scale
n_neighbors, Type: UniformInteger, Range: [1, 100], Default: 50
Conditions:
(C | classifier == 'Logistic' || C | classifier == 'SVC')
(gamma | kernel == 'rbf' || gamma | kernel == 'poly' || gamma | kernel == 'sigmoid')
(n_estimators | classifier == 'RandomForest' || n_estimators | classifier == 'AdaBoost')
alpha | classifier == 'MLP'
kernel | classifier == 'SVC'
max_depth | classifier == 'RandomForest'
n_neighbors | classifier == 'KNeighbors'
Args:
config (dict): an hyperparameter configuration ``dict`` corresponding to the ``deephyper.sklearn.classifier.problem_autosklearn1``.
load_data (callable): a function returning data as Numpy arrays ``(X, y)``.
Returns:
float: returns the accuracy on the validation set.
"""
config["random_state"] = config.get("random_state", 42)
config["n_jobs"] = config.get("n_jobs", 1)
X, y = load_data()
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.33, random_state=config["random_state"]
)
preproc = minmaxstdscaler()
X_train = preproc.fit_transform(X_train)
X_test = preproc.transform(X_test)
mapping = CLASSIFIERS
clf_class = mapping[config["classifier"]]
# keep parameters possible for the current classifier
sig = signature(clf_class)
clf_allowed_params = list(sig.parameters.keys())
clf_params = {
k: v
for k, v in config.items()
if k in clf_allowed_params and not (v in ["nan", "NA"])
}
try: # good practice to manage the fail value yourself...
clf = clf_class(**clf_params)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
clf.fit(X_train, y_train)
fit_is_complete = True
except: # noqa: E722
fit_is_complete = False
if fit_is_complete:
y_pred = clf.predict(X_test)
acc = accuracy_score(y_test, y_pred)
else:
acc = -1.0
return acc
if __name__ == "__main__":
print(problem_autosklearn1)
| 6,739 | 30.495327 | 144 | py |
deephyper | deephyper-master/deephyper/sklearn/regressor/_autosklearn1.py | """
This module provides ``problem_autosklearn1`` and ``run_autosklearn`` for regression tasks.
"""
import warnings
from inspect import signature
import ConfigSpace as cs
from deephyper.problem import HpProblem
from sklearn.ensemble import AdaBoostRegressor, RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsRegressor
from sklearn.neural_network import MLPRegressor
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.svm import SVR
from xgboost import XGBRegressor
def minmaxstdscaler() -> Pipeline:
"""MinMax preprocesssing followed by Standard normalization.
Returns:
Pipeline: a pipeline with two steps ``[MinMaxScaler, StandardScaler]``.
"""
preprocessor = Pipeline(
[
("minmaxscaler", MinMaxScaler()),
("stdscaler", StandardScaler()),
]
)
return preprocessor
REGRESSORS = {
"RandomForest": RandomForestRegressor,
"Linear": LinearRegression,
"AdaBoost": AdaBoostRegressor,
"KNeighbors": KNeighborsRegressor,
"MLP": MLPRegressor,
"SVR": SVR,
"XGBoost": XGBRegressor,
}
problem_autosklearn1 = HpProblem()
regressor = problem_autosklearn1.add_hyperparameter(
name="regressor",
value=["RandomForest", "Linear", "AdaBoost", "KNeighbors", "MLP", "SVR", "XGBoost"],
)
# n_estimators
n_estimators = problem_autosklearn1.add_hyperparameter(
name="n_estimators", value=(1, 2000, "log-uniform")
)
cond_n_estimators = cs.OrConjunction(
cs.EqualsCondition(n_estimators, regressor, "RandomForest"),
cs.EqualsCondition(n_estimators, regressor, "AdaBoost"),
)
problem_autosklearn1.add_condition(cond_n_estimators)
# max_depth
max_depth = problem_autosklearn1.add_hyperparameter(
name="max_depth", value=(2, 100, "log-uniform")
)
cond_max_depth = cs.EqualsCondition(max_depth, regressor, "RandomForest")
problem_autosklearn1.add_condition(cond_max_depth)
# n_neighbors
n_neighbors = problem_autosklearn1.add_hyperparameter(
name="n_neighbors", value=(1, 100)
)
cond_n_neighbors = cs.EqualsCondition(n_neighbors, regressor, "KNeighbors")
problem_autosklearn1.add_condition(cond_n_neighbors)
# alpha
alpha = problem_autosklearn1.add_hyperparameter(
name="alpha", value=(1e-5, 10.0, "log-uniform")
)
cond_alpha = cs.EqualsCondition(alpha, regressor, "MLP")
problem_autosklearn1.add_condition(cond_alpha)
# C
C = problem_autosklearn1.add_hyperparameter(name="C", value=(1e-5, 10.0, "log-uniform"))
cond_C = cs.EqualsCondition(C, regressor, "SVR")
problem_autosklearn1.add_condition(cond_C)
# kernel
kernel = problem_autosklearn1.add_hyperparameter(
name="kernel", value=["linear", "poly", "rbf", "sigmoid"]
)
cond_kernel = cs.EqualsCondition(kernel, regressor, "SVR")
problem_autosklearn1.add_condition(cond_kernel)
# gamma
gamma = problem_autosklearn1.add_hyperparameter(
name="gamma", value=(1e-5, 10.0, "log-uniform")
)
cond_gamma = cs.OrConjunction(
cs.EqualsCondition(gamma, kernel, "rbf"),
cs.EqualsCondition(gamma, kernel, "poly"),
cs.EqualsCondition(gamma, kernel, "sigmoid"),
)
problem_autosklearn1.add_condition(cond_gamma)
def run_autosklearn1(config: dict, load_data: callable) -> float:
"""Run function which can be used for AutoML regression.
It has to be used with the ``deephyper.sklearn.regressor.problem_autosklearn1`` problem definition which corresponds to:
.. code-block::
Configuration space object:
Hyperparameters:
C, Type: UniformFloat, Range: [1e-05, 10.0], Default: 0.01, on log-scale
alpha, Type: UniformFloat, Range: [1e-05, 10.0], Default: 0.01, on log-scale
gamma, Type: UniformFloat, Range: [1e-05, 10.0], Default: 0.01, on log-scale
kernel, Type: Categorical, Choices: {linear, poly, rbf, sigmoid}, Default: linear
max_depth, Type: UniformInteger, Range: [2, 100], Default: 14, on log-scale
n_estimators, Type: UniformInteger, Range: [1, 2000], Default: 45, on log-scale
n_neighbors, Type: UniformInteger, Range: [1, 100], Default: 50
regressor, Type: Categorical, Choices: {RandomForest, Linear, AdaBoost, KNeighbors, MLP, SVR, XGBoost}, Default: RandomForest
Conditions:
(gamma | kernel == 'rbf' || gamma | kernel == 'poly' || gamma | kernel == 'sigmoid')
(n_estimators | regressor == 'RandomForest' || n_estimators | regressor == 'AdaBoost')
C | regressor == 'SVR'
alpha | regressor == 'MLP'
kernel | regressor == 'SVR'
max_depth | regressor == 'RandomForest'
n_neighbors | regressor == 'KNeighbors'
Args:
config (dict): an hyperparameter configuration ``dict`` corresponding to the ``deephyper.sklearn.regressor.problem_autosklearn1``.
load_data (callable): a function returning data as Numpy arrays ``(X, y)``.
Returns:
float: returns the :math:`R^2` on the validation set.
"""
config["random_state"] = config.get("random_state", 42)
config["n_jobs"] = config.get("n_jobs", 1)
X, y = load_data()
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.33, random_state=config["random_state"]
)
preproc = minmaxstdscaler()
X_train = preproc.fit_transform(X_train)
X_test = preproc.transform(X_test)
mapping = REGRESSORS
clf_class = mapping[config["regressor"]]
# keep parameters possible for the current regressor
sig = signature(clf_class)
clf_allowed_params = list(sig.parameters.keys())
clf_params = {
k: v
for k, v in config.items()
if k in clf_allowed_params and not (v in ["nan", "NA"])
}
try: # good practice to manage the fail value yourself...
clf = clf_class(**clf_params)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
clf.fit(X_train, y_train)
fit_is_complete = True
except: # noqa: E722
fit_is_complete = False
if fit_is_complete:
y_pred = clf.predict(X_test)
r2 = r2_score(y_test, y_pred)
else:
r2 = -1.0
return r2
if __name__ == "__main__":
print(problem_autosklearn1)
| 6,472 | 30.8867 | 141 | py |
deephyper | deephyper-master/deephyper/ensemble/_bagging_ensemble.py | import os
import traceback
import tensorflow as tf
import numpy as np
import ray
from deephyper.nas.metrics import selectMetric
from deephyper.ensemble import BaseEnsemble
from deephyper.nas.run._util import set_memory_growth_for_visible_gpus
def mse(y_true, y_pred):
return tf.square(y_true - y_pred)
@ray.remote(num_cpus=1)
def model_predict(model_path, X, batch_size=32, verbose=0):
"""Perform an inference of the model located at ``model_path``.
:meta private:
Args:
model_path (str): Path to the ``h5`` file to load to perform the inferencec.
X (array): array of input data for which we perform the inference.
batch_size (int, optional): Batch size used to perform the inferencec. Defaults to 32.
verbose (int, optional): Verbose option. Defaults to 0.
Returns:
array: The prediction based on the provided input data.
"""
# GPU Configuration if available
set_memory_growth_for_visible_gpus(True)
tf.keras.backend.clear_session()
model_file = model_path.split("/")[-1]
try:
if verbose:
print(f"Loading model {model_file}", flush=True)
model = tf.keras.models.load_model(model_path, compile=False)
except Exception:
if verbose:
print(f"Could not load model {model_file}", flush=True)
traceback.print_exc()
model = None
if model:
y = model.predict(X, batch_size=batch_size)
else:
y = None
return y
class BaggingEnsemble(BaseEnsemble):
"""Ensemble based on uniform averaging of the predictions of each members.
:meta private:
Args:
model_dir (str): Path to directory containing saved Keras models in .h5 format.
loss (callable): a callable taking (y_true, y_pred) as input.
size (int, optional): Number of unique models used in the ensemble. Defaults to 5.
verbose (bool, optional): Verbose mode. Defaults to True.
ray_address (str, optional): Address of the Ray cluster. If "auto" it will try to connect to an existing cluster. If "" it will start a local Ray cluster. Defaults to "".
num_cpus (int, optional): Number of CPUs allocated to load one model and predict. Defaults to 1.
num_gpus (int, optional): Number of GPUs allocated to load one model and predict. Defaults to None.
batch_size (int, optional): Batch size used batchify the inference of loaded models. Defaults to 32.
selection (str, optional): Selection strategy to build the ensemble. Value in ``["topk"]``. Default to ``topk``.
mode (str, optional): Value in ``["regression", "classification"]``. Default to ``"regression"``.
"""
def __init__(
self,
model_dir,
loss=mse,
size=5,
verbose=True,
ray_address="",
num_cpus=1,
num_gpus=None,
batch_size=32,
selection="topk",
mode="regression",
):
super().__init__(
model_dir,
loss,
size,
verbose,
ray_address,
num_cpus,
num_gpus,
batch_size,
)
assert selection in ["topk"]
self.selection = selection
assert mode in ["regression", "classification"]
self.mode = mode
def __repr__(self) -> str:
out = super().__repr__()
out += f"Mode: {self.mode}\n"
out += f"Selection: {self.selection}\n"
return out
def fit(self, X, y):
"""Fit the current algorithm to the provided data.
Args:
X (array): The input data.
y (array): The output data.
Returns:
BaseEnsemble: The current fitted instance.
"""
X_id = ray.put(X)
model_files = self._list_files_in_model_dir()
def model_path(f):
return os.path.join(self.model_dir, f)
y_pred = ray.get(
[
model_predict.options(
num_cpus=self.num_cpus, num_gpus=self.num_gpus
).remote(model_path(f), X_id, self.batch_size, self.verbose)
for f in model_files
]
)
y_pred = np.array([arr for arr in y_pred if arr is not None])
members_indexes = topk(self.loss, y_true=y, y_pred=y_pred, k=self.size)
self.members_files = [model_files[i] for i in members_indexes]
return self
def predict(self, X) -> np.ndarray:
"""Execute an inference of the ensemble for the provided data.
Args:
X (array): An array of input data.
Returns:
array: The prediction.
"""
# make predictions
X_id = ray.put(X)
def model_path(f):
os.path.join(self.model_dir, f)
y_pred = ray.get(
[
model_predict.options(
num_cpus=self.num_cpus, num_gpus=self.num_gpus
).remote(model_path(f), X_id, self.batch_size, self.verbose)
for f in self.members_files
]
)
y_pred = np.array([arr for arr in y_pred if arr is not None])
y = aggregate_predictions(y_pred, regression=(self.mode == "regression"))
return y
def evaluate(self, X, y, metrics=None):
"""Compute metrics based on the provided data.
Args:
X (array): An array of input data.
y (array): An array of true output data.
metrics (callable, optional): A metric. Defaults to None.
"""
scores = {}
y_pred = self.predict(X)
scores["loss"] = tf.reduce_mean(self.loss(y, y_pred)).numpy()
if metrics:
for metric_name in metrics:
scores[metric_name] = apply_metric(metric_name, y, y_pred)
return scores
class BaggingEnsembleRegressor(BaggingEnsemble):
"""Ensemble for regression based on uniform averaging of the predictions of each members.
Args:
model_dir (str): Path to directory containing saved Keras models in .h5 format.
loss (callable): a callable taking (y_true, y_pred) as input.
size (int, optional): Number of unique models used in the ensemble. Defaults to 5.
verbose (bool, optional): Verbose mode. Defaults to True.
ray_address (str, optional): Address of the Ray cluster. If "auto" it will try to connect to an existing cluster. If "" it will start a local Ray cluster. Defaults to "".
num_cpus (int, optional): Number of CPUs allocated to load one model and predict. Defaults to 1.
num_gpus (int, optional): Number of GPUs allocated to load one model and predict. Defaults to None.
batch_size (int, optional): Batch size used batchify the inference of loaded models. Defaults to 32.
selection (str, optional): Selection strategy to build the ensemble. Value in ``["topk"]``. Default to ``topk``.
"""
def __init__(
self,
model_dir,
loss=mse,
size=5,
verbose=True,
ray_address="",
num_cpus=1,
num_gpus=None,
selection="topk",
):
super().__init__(
model_dir,
loss,
size,
verbose,
ray_address,
num_cpus,
num_gpus,
selection,
mode="regression",
)
class BaggingEnsembleClassifier(BaggingEnsemble):
"""Ensemble for classification based on uniform averaging of the predictions of each members.
Args:
model_dir (str): Path to directory containing saved Keras models in .h5 format.
loss (callable): a callable taking (y_true, y_pred) as input.
size (int, optional): Number of unique models used in the ensemble. Defaults to 5.
verbose (bool, optional): Verbose mode. Defaults to True.
ray_address (str, optional): Address of the Ray cluster. If "auto" it will try to connect to an existing cluster. If "" it will start a local Ray cluster. Defaults to "".
num_cpus (int, optional): Number of CPUs allocated to load one model and predict. Defaults to 1.
num_gpus (int, optional): Number of GPUs allocated to load one model and predict. Defaults to None.
batch_size (int, optional): Batch size used batchify the inference of loaded models. Defaults to 32.
selection (str, optional): Selection strategy to build the ensemble. Value in ``["topk"]``. Default to ``topk``.
"""
def __init__(
self,
model_dir,
loss=mse,
size=5,
verbose=True,
ray_address="",
num_cpus=1,
num_gpus=None,
selection="topk",
):
super().__init__(
model_dir,
loss,
size,
verbose,
ray_address,
num_cpus,
num_gpus,
selection,
mode="classification",
)
def apply_metric(metric_name, y_true, y_pred) -> float:
"""Perform the computation of provided metric.
:meta private:
Args:
metric_name (str|callable): If ``str`` then it needs to be a metric available in ``deephyper.nas.metrics``.
y_true (array): Array of true predictions.
y_pred (array): Array of predicted predictions
Returns:
float: a scalar value of the computed metric.
"""
metric_func = selectMetric(metric_name)
metric = tf.reduce_mean(
metric_func(
tf.convert_to_tensor(y_true, dtype=np.float32),
tf.convert_to_tensor(y_pred, dtype=np.float32),
)
).numpy()
return metric
def aggregate_predictions(y_pred, regression=True):
"""Build an ensemble from predictions.
:meta private:
Args:
ensemble_members (np.array): Indexes of selected members in the axis-0 of y_pred.
y_pred (np.array): Predictions array of shape (n_models, n_samples, n_outputs).
regression (bool): Boolean (True) if it is a regression (False) if it is a classification.
Return:
A TFP Normal Distribution in the case of regression and a np.array with average probabilities
in the case of classification.
"""
n = np.shape(y_pred)[0]
y_pred = np.sum(y_pred, axis=0)
if regression:
agg_y_pred = y_pred / n
else: # classification
agg_y_pred = np.argmax(y_pred, axis=1)
return agg_y_pred
def topk(loss_func, y_true, y_pred, k=2) -> list:
"""Select the Top-k models to be part of the ensemble. A model can appear only once in the ensemble for this strategy.
:meta private:
Args:
loss_func (callable): loss function.
y_true (array): Array of true predictions.
y_pred (array): Array of predicted predictions
k (int, optional): Number of models composing the ensemble. Defaults to 2.
Returns:
list: a list of model indexes composing the ensembles.
"""
# losses is of shape: (n_models, n_outputs)
losses = tf.reduce_mean(loss_func(y_true, y_pred), axis=1).numpy()
ensemble_members = np.argsort(losses, axis=0)[:k].reshape(-1).tolist()
return ensemble_members
| 11,171 | 32.752266 | 178 | py |
deephyper | deephyper-master/deephyper/ensemble/__init__.py | """The ``ensemble`` module provides a way to build ensembles of checkpointed deep neural networks from ``tensorflow.keras``, with ``.h5`` format, to regularize and boost predictive performance as well as estimate better uncertainties.
"""
from deephyper.ensemble._base_ensemble import BaseEnsemble
from deephyper.ensemble._bagging_ensemble import (
BaggingEnsembleRegressor,
BaggingEnsembleClassifier,
)
from deephyper.ensemble._uq_bagging_ensemble import (
UQBaggingEnsembleRegressor,
UQBaggingEnsembleClassifier,
)
__all__ = [
"BaseEnsemble",
"BaggingEnsembleRegressor",
"BaggingEnsembleClassifier",
"UQBaggingEnsembleRegressor",
"UQBaggingEnsembleClassifier",
]
| 702 | 34.15 | 234 | py |
deephyper | deephyper-master/deephyper/ensemble/_uq_bagging_ensemble.py | import os
import traceback
import numpy as np
import ray
import tensorflow as tf
import tensorflow_probability as tfp
from deephyper.ensemble import BaseEnsemble
from deephyper.nas.metrics import selectMetric
from deephyper.nas.run._util import set_memory_growth_for_visible_gpus
from deephyper.core.exceptions import DeephyperRuntimeError
from pandas import DataFrame
def nll(y, rv_y):
"""Negative log likelihood loss for Tensorflow probability."""
return -rv_y.log_prob(y)
cce_obj = tf.keras.losses.CategoricalCrossentropy(
reduction=tf.keras.losses.Reduction.NONE
)
def cce(y_true, y_pred):
"""Categorical cross-entropy loss."""
return cce_obj(tf.broadcast_to(y_true, y_pred.shape), y_pred)
@ray.remote(num_cpus=1)
def model_predict(model_path, X, batch_size=32, verbose=0):
"""Perform an inference of the model located at ``model_path``.
:meta private:
Args:
model_path (str): Path to the ``h5`` file to load to perform the inferencec.
X (array): array of input data for which we perform the inference.
batch_size (int, optional): Batch size used to perform the inferencec. Defaults to 32.
verbose (int, optional): Verbose option. Defaults to 0.
Returns:
array: The prediction based on the provided input data.
"""
import tensorflow as tf
import tensorflow_probability as tfp
# GPU Configuration if available
set_memory_growth_for_visible_gpus(True)
tf.keras.backend.clear_session()
model_file = model_path.split("/")[-1]
try:
if verbose:
print(f"Loading model {model_file}", end="\n", flush=True)
model = tf.keras.models.load_model(model_path, compile=False)
except Exception:
if verbose:
print(f"Could not load model {model_file}", flush=True)
traceback.print_exc()
model = None
if model is None:
return None
# dataset
if type(X) is list:
dataset = tf.data.Dataset.from_tensor_slices(
{f"input_{i}": Xi for i, Xi in enumerate(X)}
)
else:
dataset = tf.data.Dataset.from_tensor_slices(X)
dataset = dataset.batch(batch_size)
def batch_predict(dataset, convert_func=lambda x: x):
y_list = []
for batch in dataset:
y = model(batch, training=False)
y_list.append(convert_func(y))
y = np.concatenate(y_list, axis=0)
return y
y_dist = model(
next(iter(dataset)), training=False
) # just to test the type of the output
if isinstance(y_dist, tfp.distributions.Distribution):
if hasattr(y_dist, "loc") and hasattr(y_dist, "scale"):
def convert_func(y_dist):
return np.concatenate([y_dist.loc, y_dist.scale], axis=-1)
y = batch_predict(dataset, convert_func)
else:
raise DeephyperRuntimeError(
"Distribution doesn't have 'loc' or 'scale' attributes!"
)
else:
y = model.predict(X, batch_size=batch_size)
return y
class UQBaggingEnsemble(BaseEnsemble):
"""Ensemble with uncertainty quantification based on uniform averaging of the predictions of each members.
:meta private:
Args:
model_dir (str): Path to directory containing saved Keras models in .h5 format.
loss (callable): a callable taking (y_true, y_pred) as input.
size (int, optional): Number of unique models used in the ensemble. Defaults to 5.
verbose (bool, optional): Verbose mode. Defaults to True.
ray_address (str, optional): Address of the Ray cluster. If "auto" it will try to connect to an existing cluster. If "" it will start a local Ray cluster. Defaults to "".
num_cpus (int, optional): Number of CPUs allocated to load one model and predict. Defaults to 1.
num_gpus (int, optional): Number of GPUs allocated to load one model and predict. Defaults to None.
batch_size (int, optional): Batch size used batchify the inference of loaded models. Defaults to 32.
selection (str, optional): Selection strategy to build the ensemble. Value in ``["topk", "caruana"]``. Default to ``topk``.
mode (str, optional): Value in ``["regression", "classification"]``. Default to ``"regression"``.
"""
def __init__(
self,
model_dir,
loss=nll,
size=5,
verbose=True,
ray_address="",
num_cpus=1,
num_gpus=None,
batch_size=32,
selection="topk",
mode="regression",
):
super().__init__(
model_dir,
loss,
size,
verbose,
ray_address,
num_cpus,
num_gpus,
batch_size,
)
assert selection in ["topk", "caruana"]
self.selection = selection
assert mode in ["regression", "classification"]
self.mode = mode
def __repr__(self) -> str:
out = super().__repr__()
out += f"Mode: {self.mode}\n"
out += f"Selection: {self.selection}\n"
return out
def _select_members(self, loss_func, y_true, y_pred, k=2, verbose=0):
if self.selection == "topk":
func = topk
elif self.selection == "caruana":
func = greedy_caruana
else:
raise NotImplementedError
return func(loss_func, y_true, y_pred, k, verbose)
def fit(self, X, y):
X_id = ray.put(X)
model_files = self._list_files_in_model_dir()
def model_path(f):
return os.path.join(self.model_dir, f)
y_pred = ray.get(
[
model_predict.options(
num_cpus=self.num_cpus, num_gpus=self.num_gpus
).remote(model_path(f), X_id, self.batch_size, self.verbose)
for f in model_files
]
)
y_pred = np.array([arr for arr in y_pred if arr is not None])
self._members_indexes = self._select_members(
self.loss, y_true=y, y_pred=y_pred, k=self.size
)
self.members_files = [model_files[i] for i in self._members_indexes]
def predict(self, X) -> np.ndarray:
# make predictions
X_id = ray.put(X)
def model_path(f):
return os.path.join(self.model_dir, f)
y_pred = ray.get(
[
model_predict.options(
num_cpus=self.num_cpus, num_gpus=self.num_gpus
).remote(model_path(f), X_id, self.batch_size, self.verbose)
for f in self.members_files
]
)
y_pred = np.array([arr for arr in y_pred if arr is not None])
y = aggregate_predictions(y_pred, regression=(self.mode == "regression"))
return y
def evaluate(self, X, y, metrics=None, scaler_y=None):
scores = {}
y_pred = self.predict(X)
if scaler_y:
y_pred = scaler_y(y_pred)
y = scaler_y(y)
scores["loss"] = tf.reduce_mean(self.loss(y, y_pred)).numpy()
if metrics:
if type(metrics) is list:
for metric in metrics:
if callable(metric):
metric_name = metric.__name__
else:
metric_name = metric
scores[metric_name] = apply_metric(metric, y, y_pred)
elif type(metrics) is dict:
for metric_name, metric in metrics.items():
scores[metric_name] = apply_metric(metric, y, y_pred)
else:
raise ValueError("Metrics should be of type list or dict.")
return scores
class UQBaggingEnsembleRegressor(UQBaggingEnsemble):
"""Ensemble with uncertainty quantification for regression based on uniform averaging of the predictions of each members.
Args:
model_dir (str): Path to directory containing saved Keras models in .h5 format.
loss (callable): a callable taking (y_true, y_pred) as input.
size (int, optional): Number of unique models used in the ensemble. Defaults to 5.
verbose (bool, optional): Verbose mode. Defaults to True.
ray_address (str, optional): Address of the Ray cluster. If "auto" it will try to connect to an existing cluster. If "" it will start a local Ray cluster. Defaults to "".
num_cpus (int, optional): Number of CPUs allocated to load one model and predict. Defaults to 1.
num_gpus (int, optional): Number of GPUs allocated to load one model and predict. Defaults to None.
batch_size (int, optional): Batch size used batchify the inference of loaded models. Defaults to 32.
selection (str, optional): Selection strategy to build the ensemble. Value in ``[["topk", "caruana"]``. Default to ``topk``.
"""
def __init__(
self,
model_dir,
loss=nll,
size=5,
verbose=True,
ray_address="",
num_cpus=1,
num_gpus=None,
batch_size=32,
selection="topk",
):
super().__init__(
model_dir,
loss,
size,
verbose,
ray_address,
num_cpus,
num_gpus,
batch_size,
selection,
mode="regression",
)
def predict_var_decomposition(self, X):
"""Execute an inference of the ensemble for the provided data with uncertainty quantification estimates. The **aleatoric uncertainty** corresponds to the expected value of learned variance of each model composing the ensemble :math:`\mathbf{E}[\sigma_\\theta^2(\mathbf{x})]`. The **epistemic uncertainty** corresponds to the variance of learned mean estimates of each model composing the ensemble :math:`\mathbf{V}[\mu_\\theta(\mathbf{x})]`.
Args:
X (array): An array of input data.
Returns:
y, u1, u2: where ``y`` is the mixture distribution, ``u1`` is the aleatoric component of the variance of ``y`` and ``u2`` is the epistemic component of the variance of ``y``.
"""
# make predictions
X_id = ray.put(X)
def model_path(f):
return os.path.join(self.model_dir, f)
y_pred = ray.get(
[
model_predict.options(
num_cpus=self.num_cpus, num_gpus=self.num_gpus
).remote(model_path(f), X_id, self.batch_size, self.verbose)
for f in self.members_files
]
)
y_pred = np.array([arr for arr in y_pred if arr is not None])
y = aggregate_predictions(y_pred, regression=(self.mode == "regression"))
# variance decomposition
mid = np.shape(y_pred)[-1] // 2
selection = [slice(0, s) for s in np.shape(y_pred)]
selection_loc = selection[:]
selection_std = selection[:]
selection_loc[-1] = slice(0, mid)
selection_std[-1] = slice(mid, np.shape(y_pred)[-1])
loc = y_pred[tuple(selection_loc)]
scale = y_pred[tuple(selection_std)]
aleatoric_unc = np.mean(np.square(scale), axis=0)
epistemic_unc = np.square(np.std(loc, axis=0))
# dist, aleatoric uq, epistemic uq
return y, aleatoric_unc, epistemic_unc
class UQBaggingEnsembleClassifier(UQBaggingEnsemble):
"""Ensemble with uncertainty quantification for classification based on uniform averaging of the predictions of each members.
Args:
model_dir (str): Path to directory containing saved Keras models in .h5 format.
loss (callable): a callable taking (y_true, y_pred) as input.
size (int, optional): Number of unique models used in the ensemble. Defaults to 5.
verbose (bool, optional): Verbose mode. Defaults to True.
ray_address (str, optional): Address of the Ray cluster. If "auto" it will try to connect to an existing cluster. If "" it will start a local Ray cluster. Defaults to "".
num_cpus (int, optional): Number of CPUs allocated to load one model and predict. Defaults to 1.
num_gpus (int, optional): Number of GPUs allocated to load one model and predict. Defaults to None.
batch_size (int, optional): Batch size used batchify the inference of loaded models. Defaults to 32.
selection (str, optional): Selection strategy to build the ensemble. Value in ``[["topk", "caruana"]``. Default to ``topk``.
"""
def __init__(
self,
model_dir,
loss=cce,
size=5,
verbose=True,
ray_address="",
num_cpus=1,
num_gpus=None,
batch_size=32,
selection="topk",
):
super().__init__(
model_dir,
loss,
size,
verbose,
ray_address,
num_cpus,
num_gpus,
batch_size,
selection,
mode="classification",
)
def apply_metric(metric_name, y_true, y_pred) -> float:
"""Perform the computation of provided metric.
:meta private:
Args:
metric_name (str|callable): If ``str`` then it needs to be a metric available in ``deephyper.nas.metrics``.
y_true (array): Array of true predictions.
y_pred (array): Array of predicted predictions
Returns:
float: a scalar value of the computed metric.
"""
metric_func = selectMetric(metric_name)
if type(y_true) is np.ndarray:
y_true = tf.convert_to_tensor(y_true, dtype=np.float32)
if type(y_pred) is np.ndarray:
y_pred = tf.convert_to_tensor(y_pred, dtype=np.float32)
metric = metric_func(y_true, y_pred)
if tf.size(metric) >= 1:
metric = tf.reduce_mean(metric)
return metric.numpy()
def aggregate_predictions(y_pred, regression=True):
"""Build an ensemble from predictions.
:meta private:
Args:
ensemble_members (np.array): Indexes of selected members in the axis-0 of y_pred.
y_pred (np.array): Predictions array of shape (n_models, n_samples, n_outputs).
regression (bool): Boolean (True) if it is a regression (False) if it is a classification.
Return:
A TFP Normal Distribution in the case of regression and a np.array with average probabilities
in the case of classification.
"""
if regression:
# assuming first half are means, second half are std
mid = np.shape(y_pred)[-1] // 2
selection = [slice(0, s) for s in np.shape(y_pred)]
selection_loc = selection[:]
selection_std = selection[:]
selection_loc[-1] = slice(0, mid)
selection_std[-1] = slice(mid, np.shape(y_pred)[-1])
loc = y_pred[tuple(selection_loc)]
scale = y_pred[tuple(selection_std)]
mean_loc = np.mean(loc, axis=0)
sum_loc_scale = np.square(loc) + np.square(scale)
mean_scale = np.sqrt(np.mean(sum_loc_scale, axis=0) - np.square(mean_loc))
return tfp.distributions.Normal(loc=mean_loc, scale=mean_scale)
else: # classification
agg_y_pred = np.mean(y_pred[:, :, :], axis=0)
return agg_y_pred
def topk(loss_func, y_true, y_pred, k=2, verbose=0):
"""Select the top-k models to be part of the ensemble. A model can appear only once in the ensemble for this strategy.
:meta private:
"""
if np.shape(y_true)[-1] * 2 == np.shape(y_pred)[-1]: # regression
mid = np.shape(y_true)[-1]
y_pred = tfp.distributions.Normal(
loc=y_pred[:, :, :mid], scale=y_pred[:, :, mid:]
)
# losses is of shape: (n_models, n_outputs)
losses = tf.reduce_mean(loss_func(y_true, y_pred), axis=1).numpy()
if verbose:
print(f"Top-{k} losses: {losses.reshape(-1)[:k]}")
ensemble_members = np.argsort(losses, axis=0)[:k].reshape(-1).tolist()
return ensemble_members
def greedy_caruana(loss_func, y_true, y_pred, k=2, verbose=0):
"""Select the top-k models to be part of the ensemble. A model can appear only once in the ensemble for this strategy.
:meta private:
"""
regression = np.shape(y_true)[-1] * 2 == np.shape(y_pred)[-1]
n_models = np.shape(y_pred)[0]
if regression: # regression
mid = np.shape(y_true)[-1]
selection = [slice(0, s) for s in np.shape(y_pred)]
selection_loc = selection[:]
selection_std = selection[:]
selection_loc[-1] = slice(0, mid)
selection_std[-1] = slice(mid, np.shape(y_pred)[-1])
y_pred_ = tfp.distributions.Normal(
loc=y_pred[tuple(selection_loc)],
scale=y_pred[tuple(selection_std)],
)
else:
y_pred_ = y_pred
losses = tf.reduce_mean(
tf.reshape(loss_func(y_true, y_pred_), [n_models, -1]), axis=1
).numpy()
assert n_models == np.shape(losses)[0]
i_min = np.nanargmin(losses)
loss_min = losses[i_min]
ensemble_members = [i_min]
if verbose:
print(f"Loss: {loss_min:.3f} - Ensemble: {ensemble_members}")
def loss(y_true, y_pred):
return tf.reduce_mean(loss_func(y_true, y_pred)).numpy()
while len(np.unique(ensemble_members)) < k:
losses = [
loss(
y_true,
aggregate_predictions(
y_pred[ensemble_members + [i]], regression=regression
),
)
for i in range(n_models) # iterate over all models
]
i_min_ = np.nanargmin(losses)
loss_min_ = losses[i_min_]
if loss_min_ < loss_min:
if (
len(np.unique(ensemble_members)) == 1 and ensemble_members[0] == i_min_
): # numerical errors...
return ensemble_members
loss_min = loss_min_
ensemble_members.append(i_min_)
if verbose:
print(f"Loss: {loss_min:.3f} - Ensemble: {ensemble_members}")
else:
return ensemble_members
return ensemble_members
def __convert_to_block_df(a, y_col=None, group_col=None, block_col=None, melted=False):
# TODO: refactor conversion of block data to DataFrame
if melted and not all([i is not None for i in [block_col, group_col, y_col]]):
raise ValueError(
"`block_col`, `group_col`, `y_col` should be explicitly specified if using melted data"
)
if isinstance(a, DataFrame) and not melted:
x = a.copy(deep=True)
group_col = "groups"
block_col = "blocks"
y_col = "y"
x.columns.name = group_col
x.index.name = block_col
x = x.reset_index().melt(
id_vars=block_col, var_name=group_col, value_name=y_col
)
elif isinstance(a, DataFrame) and melted:
x = DataFrame.from_dict(
{"groups": a[group_col], "blocks": a[block_col], "y": a[y_col]}
)
elif not isinstance(a, DataFrame):
x = np.array(a)
x = DataFrame(x, index=np.arange(x.shape[0]), columns=np.arange(x.shape[1]))
if not melted:
group_col = "groups"
block_col = "blocks"
y_col = "y"
x.columns.name = group_col
x.index.name = block_col
x = x.reset_index().melt(
id_vars=block_col, var_name=group_col, value_name=y_col
)
else:
x.rename(
columns={group_col: "groups", block_col: "blocks", y_col: "y"},
inplace=True,
)
group_col = "groups"
block_col = "blocks"
y_col = "y"
return x, y_col, group_col, block_col
| 19,600 | 34.703097 | 449 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/Demosaic/code/main.py | import torch
import utility
import data
import model
import loss
from option import args
from trainer import Trainer
torch.manual_seed(args.seed)
checkpoint = utility.checkpoint(args)
def main():
global model
if args.data_test == ['video']:
from videotester import VideoTester
model = model.Model(args,checkpoint)
print('total params: %.2fM' % (sum(p.numel() for p in model.parameters())/1000000.0))
t = VideoTester(args, model, checkpoint)
t.test()
else:
if checkpoint.ok:
loader = data.Data(args)
_model = model.Model(args, checkpoint)
print('total params:%.2fM' % (sum(p.numel() for p in _model.parameters())/1000000.0))
_loss = loss.Loss(args, checkpoint) if not args.test_only else None
t = Trainer(args, loader, _model, _loss, checkpoint)
while not t.terminate():
t.train()
t.test()
checkpoint.done()
if __name__ == '__main__':
main()
| 1,026 | 27.527778 | 97 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/Demosaic/code/utility.py | import os
import math
import time
import datetime
from multiprocessing import Process
from multiprocessing import Queue
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import imageio
import torch
import torch.optim as optim
import torch.optim.lr_scheduler as lrs
class timer():
def __init__(self):
self.acc = 0
self.tic()
def tic(self):
self.t0 = time.time()
def toc(self, restart=False):
diff = time.time() - self.t0
if restart: self.t0 = time.time()
return diff
def hold(self):
self.acc += self.toc()
def release(self):
ret = self.acc
self.acc = 0
return ret
def reset(self):
self.acc = 0
class checkpoint():
def __init__(self, args):
self.args = args
self.ok = True
self.log = torch.Tensor()
now = datetime.datetime.now().strftime('%Y-%m-%d-%H:%M:%S')
if not args.load:
if not args.save:
args.save = now
self.dir = os.path.join('..', 'experiment', args.save)
else:
self.dir = os.path.join('..', 'experiment', args.load)
if os.path.exists(self.dir):
self.log = torch.load(self.get_path('psnr_log.pt'))
print('Continue from epoch {}...'.format(len(self.log)))
else:
args.load = ''
if args.reset:
os.system('rm -rf ' + self.dir)
args.load = ''
os.makedirs(self.dir, exist_ok=True)
os.makedirs(self.get_path('model'), exist_ok=True)
for d in args.data_test:
os.makedirs(self.get_path('results-{}'.format(d)), exist_ok=True)
open_type = 'a' if os.path.exists(self.get_path('log.txt'))else 'w'
self.log_file = open(self.get_path('log.txt'), open_type)
with open(self.get_path('config.txt'), open_type) as f:
f.write(now + '\n\n')
for arg in vars(args):
f.write('{}: {}\n'.format(arg, getattr(args, arg)))
f.write('\n')
self.n_processes = 8
def get_path(self, *subdir):
return os.path.join(self.dir, *subdir)
def save(self, trainer, epoch, is_best=False):
trainer.model.save(self.get_path('model'), epoch, is_best=is_best)
trainer.loss.save(self.dir)
trainer.loss.plot_loss(self.dir, epoch)
self.plot_psnr(epoch)
trainer.optimizer.save(self.dir)
torch.save(self.log, self.get_path('psnr_log.pt'))
def add_log(self, log):
self.log = torch.cat([self.log, log])
def write_log(self, log, refresh=False):
print(log)
self.log_file.write(log + '\n')
if refresh:
self.log_file.close()
self.log_file = open(self.get_path('log.txt'), 'a')
def done(self):
self.log_file.close()
def plot_psnr(self, epoch):
axis = np.linspace(1, epoch, epoch)
for idx_data, d in enumerate(self.args.data_test):
label = 'SR on {}'.format(d)
fig = plt.figure()
plt.title(label)
for idx_scale, scale in enumerate(self.args.scale):
plt.plot(
axis,
self.log[:, idx_data, idx_scale].numpy(),
label='Scale {}'.format(scale)
)
plt.legend()
plt.xlabel('Epochs')
plt.ylabel('PSNR')
plt.grid(True)
plt.savefig(self.get_path('test_{}.pdf'.format(d)))
plt.close(fig)
def begin_background(self):
self.queue = Queue()
def bg_target(queue):
while True:
if not queue.empty():
filename, tensor = queue.get()
if filename is None: break
imageio.imwrite(filename, tensor.numpy())
self.process = [
Process(target=bg_target, args=(self.queue,)) \
for _ in range(self.n_processes)
]
for p in self.process: p.start()
def end_background(self):
for _ in range(self.n_processes): self.queue.put((None, None))
while not self.queue.empty(): time.sleep(1)
for p in self.process: p.join()
def save_results(self, dataset, filename, save_list, scale):
if self.args.save_results:
filename = self.get_path(
'results-{}'.format(dataset.dataset.name),
'{}_x{}_'.format(filename, scale)
)
postfix = ('DM', 'LQ', 'HQ')
for v, p in zip(save_list, postfix):
normalized = v[0].mul(255 / self.args.rgb_range)
tensor_cpu = normalized.byte().permute(1, 2, 0).cpu()
self.queue.put(('{}{}.png'.format(filename, p), tensor_cpu))
def quantize(img, rgb_range):
pixel_range = 255 / rgb_range
return img.mul(pixel_range).clamp(0, 255).round().div(pixel_range)
def calc_psnr(sr, hr, scale, rgb_range, dataset=None):
if hr.nelement() == 1: return 0
diff = (sr - hr) / rgb_range
if dataset and dataset.dataset.benchmark:
shave = scale
if diff.size(1) > 5:
gray_coeffs = [65.738, 129.057, 25.064]
convert = diff.new_tensor(gray_coeffs).view(1, 3, 1, 1) / 256
diff = diff.mul(convert).sum(dim=1)
else:
shave = scale + 6
valid = diff[..., :, :]
mse = valid.pow(2).mean()
return -10 * math.log10(mse)
def make_optimizer(args, target):
'''
make optimizer and scheduler together
'''
# optimizer
trainable = filter(lambda x: x.requires_grad, target.parameters())
kwargs_optimizer = {'lr': args.lr, 'weight_decay': args.weight_decay}
if args.optimizer == 'SGD':
optimizer_class = optim.SGD
kwargs_optimizer['momentum'] = args.momentum
elif args.optimizer == 'ADAM':
optimizer_class = optim.Adam
kwargs_optimizer['betas'] = args.betas
kwargs_optimizer['eps'] = args.epsilon
elif args.optimizer == 'RMSprop':
optimizer_class = optim.RMSprop
kwargs_optimizer['eps'] = args.epsilon
# scheduler
milestones = list(map(lambda x: int(x), args.decay.split('-')))
kwargs_scheduler = {'milestones': milestones, 'gamma': args.gamma}
scheduler_class = lrs.MultiStepLR
class CustomOptimizer(optimizer_class):
def __init__(self, *args, **kwargs):
super(CustomOptimizer, self).__init__(*args, **kwargs)
def _register_scheduler(self, scheduler_class, **kwargs):
self.scheduler = scheduler_class(self, **kwargs)
def save(self, save_dir):
torch.save(self.state_dict(), self.get_dir(save_dir))
def load(self, load_dir, epoch=1):
self.load_state_dict(torch.load(self.get_dir(load_dir)))
if epoch > 1:
for _ in range(epoch): self.scheduler.step()
def get_dir(self, dir_path):
return os.path.join(dir_path, 'optimizer.pt')
def schedule(self):
self.scheduler.step()
def get_lr(self):
return self.scheduler.get_lr()[0]
def get_last_epoch(self):
return self.scheduler.last_epoch
optimizer = CustomOptimizer(trainable, **kwargs_optimizer)
optimizer._register_scheduler(scheduler_class, **kwargs_scheduler)
return optimizer
| 7,458 | 30.340336 | 77 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/Demosaic/code/dataloader.py | import threading
import random
import torch
import torch.multiprocessing as multiprocessing
from torch.utils.data import DataLoader
from torch.utils.data import SequentialSampler
from torch.utils.data import RandomSampler
from torch.utils.data import BatchSampler
from torch.utils.data import _utils
from torch.utils.data.dataloader import _DataLoaderIter
from torch.utils.data._utils import collate
from torch.utils.data._utils import signal_handling
from torch.utils.data._utils import MP_STATUS_CHECK_INTERVAL
from torch.utils.data._utils import ExceptionWrapper
from torch.utils.data._utils import IS_WINDOWS
from torch.utils.data._utils.worker import ManagerWatchdog
from torch._six import queue
def _ms_loop(dataset, index_queue, data_queue, done_event, collate_fn, scale, seed, init_fn, worker_id):
try:
collate._use_shared_memory = True
signal_handling._set_worker_signal_handlers()
torch.set_num_threads(1)
random.seed(seed)
torch.manual_seed(seed)
data_queue.cancel_join_thread()
if init_fn is not None:
init_fn(worker_id)
watchdog = ManagerWatchdog()
while watchdog.is_alive():
try:
r = index_queue.get(timeout=MP_STATUS_CHECK_INTERVAL)
except queue.Empty:
continue
if r is None:
assert done_event.is_set()
return
elif done_event.is_set():
continue
idx, batch_indices = r
try:
idx_scale = 0
if len(scale) > 1 and dataset.train:
idx_scale = random.randrange(0, len(scale))
dataset.set_scale(idx_scale)
samples = collate_fn([dataset[i] for i in batch_indices])
samples.append(idx_scale)
except Exception:
data_queue.put((idx, ExceptionWrapper(sys.exc_info())))
else:
data_queue.put((idx, samples))
del samples
except KeyboardInterrupt:
pass
class _MSDataLoaderIter(_DataLoaderIter):
def __init__(self, loader):
self.dataset = loader.dataset
self.scale = loader.scale
self.collate_fn = loader.collate_fn
self.batch_sampler = loader.batch_sampler
self.num_workers = loader.num_workers
self.pin_memory = loader.pin_memory and torch.cuda.is_available()
self.timeout = loader.timeout
self.sample_iter = iter(self.batch_sampler)
base_seed = torch.LongTensor(1).random_().item()
if self.num_workers > 0:
self.worker_init_fn = loader.worker_init_fn
self.worker_queue_idx = 0
self.worker_result_queue = multiprocessing.Queue()
self.batches_outstanding = 0
self.worker_pids_set = False
self.shutdown = False
self.send_idx = 0
self.rcvd_idx = 0
self.reorder_dict = {}
self.done_event = multiprocessing.Event()
base_seed = torch.LongTensor(1).random_()[0]
self.index_queues = []
self.workers = []
for i in range(self.num_workers):
index_queue = multiprocessing.Queue()
index_queue.cancel_join_thread()
w = multiprocessing.Process(
target=_ms_loop,
args=(
self.dataset,
index_queue,
self.worker_result_queue,
self.done_event,
self.collate_fn,
self.scale,
base_seed + i,
self.worker_init_fn,
i
)
)
w.daemon = True
w.start()
self.index_queues.append(index_queue)
self.workers.append(w)
if self.pin_memory:
self.data_queue = queue.Queue()
pin_memory_thread = threading.Thread(
target=_utils.pin_memory._pin_memory_loop,
args=(
self.worker_result_queue,
self.data_queue,
torch.cuda.current_device(),
self.done_event
)
)
pin_memory_thread.daemon = True
pin_memory_thread.start()
self.pin_memory_thread = pin_memory_thread
else:
self.data_queue = self.worker_result_queue
_utils.signal_handling._set_worker_pids(
id(self), tuple(w.pid for w in self.workers)
)
_utils.signal_handling._set_SIGCHLD_handler()
self.worker_pids_set = True
for _ in range(2 * self.num_workers):
self._put_indices()
class MSDataLoader(DataLoader):
def __init__(self, cfg, *args, **kwargs):
super(MSDataLoader, self).__init__(
*args, **kwargs, num_workers=cfg.n_threads
)
self.scale = cfg.scale
def __iter__(self):
return _MSDataLoaderIter(self)
| 5,259 | 32.081761 | 104 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/Demosaic/code/videotester.py | import os
import math
import utility
from data import common
import torch
import cv2
from tqdm import tqdm
class VideoTester():
def __init__(self, args, my_model, ckp):
self.args = args
self.scale = args.scale
self.ckp = ckp
self.model = my_model
self.filename, _ = os.path.splitext(os.path.basename(args.dir_demo))
def test(self):
torch.set_grad_enabled(False)
self.ckp.write_log('\nEvaluation on video:')
self.model.eval()
timer_test = utility.timer()
for idx_scale, scale in enumerate(self.scale):
vidcap = cv2.VideoCapture(self.args.dir_demo)
total_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
vidwri = cv2.VideoWriter(
self.ckp.get_path('{}_x{}.avi'.format(self.filename, scale)),
cv2.VideoWriter_fourcc(*'XVID'),
vidcap.get(cv2.CAP_PROP_FPS),
(
int(scale * vidcap.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(scale * vidcap.get(cv2.CAP_PROP_FRAME_HEIGHT))
)
)
tqdm_test = tqdm(range(total_frames), ncols=80)
for _ in tqdm_test:
success, lr = vidcap.read()
if not success: break
lr, = common.set_channel(lr, n_channels=self.args.n_colors)
lr, = common.np2Tensor(lr, rgb_range=self.args.rgb_range)
lr, = self.prepare(lr.unsqueeze(0))
sr = self.model(lr, idx_scale)
sr = utility.quantize(sr, self.args.rgb_range).squeeze(0)
normalized = sr * 255 / self.args.rgb_range
ndarr = normalized.byte().permute(1, 2, 0).cpu().numpy()
vidwri.write(ndarr)
vidcap.release()
vidwri.release()
self.ckp.write_log(
'Total: {:.2f}s\n'.format(timer_test.toc()), refresh=True
)
torch.set_grad_enabled(True)
def prepare(self, *args):
device = torch.device('cpu' if self.args.cpu else 'cuda')
def _prepare(tensor):
if self.args.precision == 'half': tensor = tensor.half()
return tensor.to(device)
return [_prepare(a) for a in args]
| 2,280 | 30.246575 | 77 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/Demosaic/code/trainer.py | import os
import math
from decimal import Decimal
import utility
import torch
import torch.nn.utils as utils
from tqdm import tqdm
class Trainer():
def __init__(self, args, loader, my_model, my_loss, ckp):
self.args = args
self.scale = args.scale
self.ckp = ckp
self.loader_train = loader.loader_train
self.loader_test = loader.loader_test
self.model = my_model
self.loss = my_loss
self.optimizer = utility.make_optimizer(args, self.model)
if self.args.load != '':
self.optimizer.load(ckp.dir, epoch=len(ckp.log))
self.error_last = 1e8
def train(self):
self.loss.step()
epoch = self.optimizer.get_last_epoch() + 1
lr = self.optimizer.get_lr()
self.ckp.write_log(
'[Epoch {}]\tLearning rate: {:.2e}'.format(epoch, Decimal(lr))
)
self.loss.start_log()
self.model.train()
timer_data, timer_model = utility.timer(), utility.timer()
# TEMP
self.loader_train.dataset.set_scale(0)
for batch, (lr, hr, _,) in enumerate(self.loader_train):
lr, hr = self.prepare(lr, hr)
timer_data.hold()
timer_model.tic()
self.optimizer.zero_grad()
sr = self.model(lr, 0)
loss = self.loss(sr, hr)
loss.backward()
if self.args.gclip > 0:
utils.clip_grad_value_(
self.model.parameters(),
self.args.gclip
)
self.optimizer.step()
timer_model.hold()
if (batch + 1) % self.args.print_every == 0:
self.ckp.write_log('[{}/{}]\t{}\t{:.1f}+{:.1f}s'.format(
(batch + 1) * self.args.batch_size,
len(self.loader_train.dataset),
self.loss.display_loss(batch),
timer_model.release(),
timer_data.release()))
timer_data.tic()
self.loss.end_log(len(self.loader_train))
self.error_last = self.loss.log[-1, -1]
self.optimizer.schedule()
def test(self):
torch.set_grad_enabled(False)
epoch = self.optimizer.get_last_epoch()
self.ckp.write_log('\nEvaluation:')
self.ckp.add_log(
torch.zeros(1, len(self.loader_test), len(self.scale))
)
self.model.eval()
timer_test = utility.timer()
if self.args.save_results: self.ckp.begin_background()
for idx_data, d in enumerate(self.loader_test):
for idx_scale, scale in enumerate(self.scale):
d.dataset.set_scale(idx_scale)
for lr, hr, filename in tqdm(d, ncols=80):
lr, hr = self.prepare(lr, hr)
sr = self.model(lr, idx_scale)
sr = utility.quantize(sr, self.args.rgb_range)
save_list = [sr]
self.ckp.log[-1, idx_data, idx_scale] += utility.calc_psnr(
sr, hr, scale, self.args.rgb_range, dataset=d
)
if self.args.save_gt:
save_list.extend([lr, hr])
if self.args.save_results:
self.ckp.save_results(d, filename[0], save_list, scale)
self.ckp.log[-1, idx_data, idx_scale] /= len(d)
best = self.ckp.log.max(0)
self.ckp.write_log(
'[{} x{}]\tPSNR: {:.3f} (Best: {:.3f} @epoch {})'.format(
d.dataset.name,
scale,
self.ckp.log[-1, idx_data, idx_scale],
best[0][idx_data, idx_scale],
best[1][idx_data, idx_scale] + 1
)
)
self.ckp.write_log('Forward: {:.2f}s\n'.format(timer_test.toc()))
self.ckp.write_log('Saving...')
if self.args.save_results:
self.ckp.end_background()
if not self.args.test_only:
self.ckp.save(self, epoch, is_best=(best[1][0, 0] + 1 == epoch))
self.ckp.write_log(
'Total: {:.2f}s\n'.format(timer_test.toc()), refresh=True
)
torch.set_grad_enabled(True)
def prepare(self, *args):
device = torch.device('cpu' if self.args.cpu else 'cuda')
def _prepare(tensor):
if self.args.precision == 'half': tensor = tensor.half()
return tensor.to(device)
return [_prepare(a) for a in args]
def terminate(self):
if self.args.test_only:
self.test()
return True
else:
epoch = self.optimizer.get_last_epoch() + 1
return epoch >= self.args.epochs
| 4,820 | 31.795918 | 79 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/Demosaic/code/loss/adversarial.py | import utility
from types import SimpleNamespace
from model import common
from loss import discriminator
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
class Adversarial(nn.Module):
def __init__(self, args, gan_type):
super(Adversarial, self).__init__()
self.gan_type = gan_type
self.gan_k = args.gan_k
self.dis = discriminator.Discriminator(args)
if gan_type == 'WGAN_GP':
# see https://arxiv.org/pdf/1704.00028.pdf pp.4
optim_dict = {
'optimizer': 'ADAM',
'betas': (0, 0.9),
'epsilon': 1e-8,
'lr': 1e-5,
'weight_decay': args.weight_decay,
'decay': args.decay,
'gamma': args.gamma
}
optim_args = SimpleNamespace(**optim_dict)
else:
optim_args = args
self.optimizer = utility.make_optimizer(optim_args, self.dis)
def forward(self, fake, real):
# updating discriminator...
self.loss = 0
fake_detach = fake.detach() # do not backpropagate through G
for _ in range(self.gan_k):
self.optimizer.zero_grad()
# d: B x 1 tensor
d_fake = self.dis(fake_detach)
d_real = self.dis(real)
retain_graph = False
if self.gan_type == 'GAN':
loss_d = self.bce(d_real, d_fake)
elif self.gan_type.find('WGAN') >= 0:
loss_d = (d_fake - d_real).mean()
if self.gan_type.find('GP') >= 0:
epsilon = torch.rand_like(fake).view(-1, 1, 1, 1)
hat = fake_detach.mul(1 - epsilon) + real.mul(epsilon)
hat.requires_grad = True
d_hat = self.dis(hat)
gradients = torch.autograd.grad(
outputs=d_hat.sum(), inputs=hat,
retain_graph=True, create_graph=True, only_inputs=True
)[0]
gradients = gradients.view(gradients.size(0), -1)
gradient_norm = gradients.norm(2, dim=1)
gradient_penalty = 10 * gradient_norm.sub(1).pow(2).mean()
loss_d += gradient_penalty
# from ESRGAN: Enhanced Super-Resolution Generative Adversarial Networks
elif self.gan_type == 'RGAN':
better_real = d_real - d_fake.mean(dim=0, keepdim=True)
better_fake = d_fake - d_real.mean(dim=0, keepdim=True)
loss_d = self.bce(better_real, better_fake)
retain_graph = True
# Discriminator update
self.loss += loss_d.item()
loss_d.backward(retain_graph=retain_graph)
self.optimizer.step()
if self.gan_type == 'WGAN':
for p in self.dis.parameters():
p.data.clamp_(-1, 1)
self.loss /= self.gan_k
# updating generator...
d_fake_bp = self.dis(fake) # for backpropagation, use fake as it is
if self.gan_type == 'GAN':
label_real = torch.ones_like(d_fake_bp)
loss_g = F.binary_cross_entropy_with_logits(d_fake_bp, label_real)
elif self.gan_type.find('WGAN') >= 0:
loss_g = -d_fake_bp.mean()
elif self.gan_type == 'RGAN':
better_real = d_real - d_fake_bp.mean(dim=0, keepdim=True)
better_fake = d_fake_bp - d_real.mean(dim=0, keepdim=True)
loss_g = self.bce(better_fake, better_real)
# Generator loss
return loss_g
def state_dict(self, *args, **kwargs):
state_discriminator = self.dis.state_dict(*args, **kwargs)
state_optimizer = self.optimizer.state_dict()
return dict(**state_discriminator, **state_optimizer)
def bce(self, real, fake):
label_real = torch.ones_like(real)
label_fake = torch.zeros_like(fake)
bce_real = F.binary_cross_entropy_with_logits(real, label_real)
bce_fake = F.binary_cross_entropy_with_logits(fake, label_fake)
bce_loss = bce_real + bce_fake
return bce_loss
# Some references
# https://github.com/kuc2477/pytorch-wgan-gp/blob/master/model.py
# OR
# https://github.com/caogang/wgan-gp/blob/master/gan_cifar10.py
| 4,393 | 37.884956 | 84 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/Demosaic/code/loss/discriminator.py | from model import common
import torch.nn as nn
class Discriminator(nn.Module):
'''
output is not normalized
'''
def __init__(self, args):
super(Discriminator, self).__init__()
in_channels = args.n_colors
out_channels = 64
depth = 7
def _block(_in_channels, _out_channels, stride=1):
return nn.Sequential(
nn.Conv2d(
_in_channels,
_out_channels,
3,
padding=1,
stride=stride,
bias=False
),
nn.BatchNorm2d(_out_channels),
nn.LeakyReLU(negative_slope=0.2, inplace=True)
)
m_features = [_block(in_channels, out_channels)]
for i in range(depth):
in_channels = out_channels
if i % 2 == 1:
stride = 1
out_channels *= 2
else:
stride = 2
m_features.append(_block(in_channels, out_channels, stride=stride))
patch_size = args.patch_size // (2**((depth + 1) // 2))
m_classifier = [
nn.Linear(out_channels * patch_size**2, 1024),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Linear(1024, 1)
]
self.features = nn.Sequential(*m_features)
self.classifier = nn.Sequential(*m_classifier)
def forward(self, x):
features = self.features(x)
output = self.classifier(features.view(features.size(0), -1))
return output
| 1,595 | 27.5 | 79 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/Demosaic/code/loss/vgg.py | from model import common
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
class VGG(nn.Module):
def __init__(self, conv_index, rgb_range=1):
super(VGG, self).__init__()
vgg_features = models.vgg19(pretrained=True).features
modules = [m for m in vgg_features]
if conv_index.find('22') >= 0:
self.vgg = nn.Sequential(*modules[:8])
elif conv_index.find('54') >= 0:
self.vgg = nn.Sequential(*modules[:35])
vgg_mean = (0.485, 0.456, 0.406)
vgg_std = (0.229 * rgb_range, 0.224 * rgb_range, 0.225 * rgb_range)
self.sub_mean = common.MeanShift(rgb_range, vgg_mean, vgg_std)
for p in self.parameters():
p.requires_grad = False
def forward(self, sr, hr):
def _forward(x):
x = self.sub_mean(x)
x = self.vgg(x)
return x
vgg_sr = _forward(sr)
with torch.no_grad():
vgg_hr = _forward(hr.detach())
loss = F.mse_loss(vgg_sr, vgg_hr)
return loss
| 1,106 | 28.918919 | 75 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/Demosaic/code/loss/__init__.py | import os
from importlib import import_module
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class Loss(nn.modules.loss._Loss):
def __init__(self, args, ckp):
super(Loss, self).__init__()
print('Preparing loss function:')
self.n_GPUs = args.n_GPUs
self.loss = []
self.loss_module = nn.ModuleList()
for loss in args.loss.split('+'):
weight, loss_type = loss.split('*')
if loss_type == 'MSE':
loss_function = nn.MSELoss()
elif loss_type == 'L1':
loss_function = nn.L1Loss()
elif loss_type.find('VGG') >= 0:
module = import_module('loss.vgg')
loss_function = getattr(module, 'VGG')(
loss_type[3:],
rgb_range=args.rgb_range
)
elif loss_type.find('GAN') >= 0:
module = import_module('loss.adversarial')
loss_function = getattr(module, 'Adversarial')(
args,
loss_type
)
self.loss.append({
'type': loss_type,
'weight': float(weight),
'function': loss_function}
)
if loss_type.find('GAN') >= 0:
self.loss.append({'type': 'DIS', 'weight': 1, 'function': None})
if len(self.loss) > 1:
self.loss.append({'type': 'Total', 'weight': 0, 'function': None})
for l in self.loss:
if l['function'] is not None:
print('{:.3f} * {}'.format(l['weight'], l['type']))
self.loss_module.append(l['function'])
self.log = torch.Tensor()
device = torch.device('cpu' if args.cpu else 'cuda')
self.loss_module.to(device)
if args.precision == 'half': self.loss_module.half()
if not args.cpu and args.n_GPUs > 1:
self.loss_module = nn.DataParallel(
self.loss_module, range(args.n_GPUs)
)
if args.load != '': self.load(ckp.dir, cpu=args.cpu)
def forward(self, sr, hr):
losses = []
for i, l in enumerate(self.loss):
if l['function'] is not None:
loss = l['function'](sr, hr)
effective_loss = l['weight'] * loss
losses.append(effective_loss)
self.log[-1, i] += effective_loss.item()
elif l['type'] == 'DIS':
self.log[-1, i] += self.loss[i - 1]['function'].loss
loss_sum = sum(losses)
if len(self.loss) > 1:
self.log[-1, -1] += loss_sum.item()
return loss_sum
def step(self):
for l in self.get_loss_module():
if hasattr(l, 'scheduler'):
l.scheduler.step()
def start_log(self):
self.log = torch.cat((self.log, torch.zeros(1, len(self.loss))))
def end_log(self, n_batches):
self.log[-1].div_(n_batches)
def display_loss(self, batch):
n_samples = batch + 1
log = []
for l, c in zip(self.loss, self.log[-1]):
log.append('[{}: {:.4f}]'.format(l['type'], c / n_samples))
return ''.join(log)
def plot_loss(self, apath, epoch):
axis = np.linspace(1, epoch, epoch)
for i, l in enumerate(self.loss):
label = '{} Loss'.format(l['type'])
fig = plt.figure()
plt.title(label)
plt.plot(axis, self.log[:, i].numpy(), label=label)
plt.legend()
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.grid(True)
plt.savefig(os.path.join(apath, 'loss_{}.pdf'.format(l['type'])))
plt.close(fig)
def get_loss_module(self):
if self.n_GPUs == 1:
return self.loss_module
else:
return self.loss_module.module
def save(self, apath):
torch.save(self.state_dict(), os.path.join(apath, 'loss.pt'))
torch.save(self.log, os.path.join(apath, 'loss_log.pt'))
def load(self, apath, cpu=False):
if cpu:
kwargs = {'map_location': lambda storage, loc: storage}
else:
kwargs = {}
self.load_state_dict(torch.load(
os.path.join(apath, 'loss.pt'),
**kwargs
))
self.log = torch.load(os.path.join(apath, 'loss_log.pt'))
for l in self.get_loss_module():
if hasattr(l, 'scheduler'):
for _ in range(len(self.log)): l.scheduler.step()
| 4,659 | 31.361111 | 80 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/Demosaic/code/utils/tools.py | import os
import torch
import numpy as np
from PIL import Image
import torch.nn.functional as F
def normalize(x):
return x.mul_(2).add_(-1)
def same_padding(images, ksizes, strides, rates):
assert len(images.size()) == 4
batch_size, channel, rows, cols = images.size()
out_rows = (rows + strides[0] - 1) // strides[0]
out_cols = (cols + strides[1] - 1) // strides[1]
effective_k_row = (ksizes[0] - 1) * rates[0] + 1
effective_k_col = (ksizes[1] - 1) * rates[1] + 1
padding_rows = max(0, (out_rows-1)*strides[0]+effective_k_row-rows)
padding_cols = max(0, (out_cols-1)*strides[1]+effective_k_col-cols)
# Pad the input
padding_top = int(padding_rows / 2.)
padding_left = int(padding_cols / 2.)
padding_bottom = padding_rows - padding_top
padding_right = padding_cols - padding_left
paddings = (padding_left, padding_right, padding_top, padding_bottom)
images = torch.nn.ZeroPad2d(paddings)(images)
return images
def extract_image_patches(images, ksizes, strides, rates, padding='same'):
"""
Extract patches from images and put them in the C output dimension.
:param padding:
:param images: [batch, channels, in_rows, in_cols]. A 4-D Tensor with shape
:param ksizes: [ksize_rows, ksize_cols]. The size of the sliding window for
each dimension of images
:param strides: [stride_rows, stride_cols]
:param rates: [dilation_rows, dilation_cols]
:return: A Tensor
"""
assert len(images.size()) == 4
assert padding in ['same', 'valid']
batch_size, channel, height, width = images.size()
if padding == 'same':
images = same_padding(images, ksizes, strides, rates)
elif padding == 'valid':
pass
else:
raise NotImplementedError('Unsupported padding type: {}.\
Only "same" or "valid" are supported.'.format(padding))
unfold = torch.nn.Unfold(kernel_size=ksizes,
dilation=rates,
padding=0,
stride=strides)
patches = unfold(images)
return patches # [N, C*k*k, L], L is the total number of such blocks
def reduce_mean(x, axis=None, keepdim=False):
if not axis:
axis = range(len(x.shape))
for i in sorted(axis, reverse=True):
x = torch.mean(x, dim=i, keepdim=keepdim)
return x
def reduce_std(x, axis=None, keepdim=False):
if not axis:
axis = range(len(x.shape))
for i in sorted(axis, reverse=True):
x = torch.std(x, dim=i, keepdim=keepdim)
return x
def reduce_sum(x, axis=None, keepdim=False):
if not axis:
axis = range(len(x.shape))
for i in sorted(axis, reverse=True):
x = torch.sum(x, dim=i, keepdim=keepdim)
return x
| 2,777 | 32.878049 | 79 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/Demosaic/code/data/benchmark.py | import os
from data import common
from data import srdata
import numpy as np
import torch
import torch.utils.data as data
class Benchmark(srdata.SRData):
def __init__(self, args, name='', train=True, benchmark=True):
super(Benchmark, self).__init__(
args, name=name, train=train, benchmark=True
)
def _set_filesystem(self, dir_data):
self.apath = os.path.join(dir_data, 'benchmark', self.name)
self.dir_hr = os.path.join(self.apath, 'HR')
if self.input_large:
self.dir_lr = os.path.join(self.apath, 'LR_bicubicL')
else:
self.dir_lr = os.path.join(self.apath, 'LR_bicubic')
self.ext = ('', '.png')
| 703 | 26.076923 | 67 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/Demosaic/code/data/video.py | import os
from data import common
import cv2
import numpy as np
import imageio
import torch
import torch.utils.data as data
class Video(data.Dataset):
def __init__(self, args, name='Video', train=False, benchmark=False):
self.args = args
self.name = name
self.scale = args.scale
self.idx_scale = 0
self.train = False
self.do_eval = False
self.benchmark = benchmark
self.filename, _ = os.path.splitext(os.path.basename(args.dir_demo))
self.vidcap = cv2.VideoCapture(args.dir_demo)
self.n_frames = 0
self.total_frames = int(self.vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
def __getitem__(self, idx):
success, lr = self.vidcap.read()
if success:
self.n_frames += 1
lr, = common.set_channel(lr, n_channels=self.args.n_colors)
lr_t, = common.np2Tensor(lr, rgb_range=self.args.rgb_range)
return lr_t, -1, '{}_{:0>5}'.format(self.filename, self.n_frames)
else:
vidcap.release()
return None
def __len__(self):
return self.total_frames
def set_scale(self, idx_scale):
self.idx_scale = idx_scale
| 1,207 | 25.844444 | 77 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/Demosaic/code/data/srdata.py | import os
import glob
import random
import pickle
from data import common
import numpy as np
import imageio
import torch
import torch.utils.data as data
class SRData(data.Dataset):
def __init__(self, args, name='', train=True, benchmark=False):
self.args = args
self.name = name
self.train = train
self.split = 'train' if train else 'test'
self.do_eval = True
self.benchmark = benchmark
self.input_large = (args.model == 'VDSR')
self.scale = args.scale
self.idx_scale = 0
self._set_filesystem(args.dir_data)
if args.ext.find('img') < 0:
path_bin = os.path.join(self.apath, 'bin')
os.makedirs(path_bin, exist_ok=True)
list_hr, list_lr = self._scan()
if args.ext.find('img') >= 0 or benchmark:
self.images_hr, self.images_lr = list_hr, list_lr
elif args.ext.find('sep') >= 0:
os.makedirs(
self.dir_hr.replace(self.apath, path_bin),
exist_ok=True
)
for s in self.scale:
os.makedirs(
os.path.join(
self.dir_lr.replace(self.apath, path_bin),
'X{}'.format(s)
),
exist_ok=True
)
self.images_hr, self.images_lr = [], [[] for _ in self.scale]
for h in list_hr:
b = h.replace(self.apath, path_bin)
b = b.replace(self.ext[0], '.pt')
self.images_hr.append(b)
self._check_and_load(args.ext, h, b, verbose=True)
for i, ll in enumerate(list_lr):
for l in ll:
b = l.replace(self.apath, path_bin)
b = b.replace(self.ext[1], '.pt')
self.images_lr[i].append(b)
self._check_and_load(args.ext, l, b, verbose=True)
if train:
n_patches = args.batch_size * args.test_every
n_images = len(args.data_train) * len(self.images_hr)
if n_images == 0:
self.repeat = 0
else:
self.repeat = max(n_patches // n_images, 1)
# Below functions as used to prepare images
def _scan(self):
names_hr = sorted(
glob.glob(os.path.join(self.dir_hr, '*' + self.ext[0]))
)
names_lr = [[] for _ in self.scale]
for f in names_hr:
filename, _ = os.path.splitext(os.path.basename(f))
for si, s in enumerate(self.scale):
names_lr[si].append(os.path.join(
self.dir_lr, 'X{}/{}{}'.format(
s, filename, self.ext[1]
)
))
return names_hr, names_lr
def _set_filesystem(self, dir_data):
self.apath = os.path.join(dir_data, self.name)
self.dir_hr = os.path.join(self.apath, 'HR')
self.dir_lr = os.path.join(self.apath, 'LR_bicubic')
if self.input_large: self.dir_lr += 'L'
self.ext = ('.png', '.png')
def _check_and_load(self, ext, img, f, verbose=True):
if not os.path.isfile(f) or ext.find('reset') >= 0:
if verbose:
print('Making a binary: {}'.format(f))
with open(f, 'wb') as _f:
pickle.dump(imageio.imread(img), _f)
def __getitem__(self, idx):
lr, hr, filename = self._load_file(idx)
pair = self.get_patch(lr, hr)
pair = common.set_channel(*pair, n_channels=self.args.n_colors)
pair_t = common.np2Tensor(*pair, rgb_range=self.args.rgb_range)
return pair_t[0], pair_t[1], filename
def __len__(self):
if self.train:
return len(self.images_hr) * self.repeat
else:
return len(self.images_hr)
def _get_index(self, idx):
if self.train:
return idx % len(self.images_hr)
else:
return idx
def _load_file(self, idx):
idx = self._get_index(idx)
f_hr = self.images_hr[idx]
f_lr = self.images_lr[self.idx_scale][idx]
filename, _ = os.path.splitext(os.path.basename(f_hr))
if self.args.ext == 'img' or self.benchmark:
hr = imageio.imread(f_hr)
lr = imageio.imread(f_lr)
elif self.args.ext.find('sep') >= 0:
with open(f_hr, 'rb') as _f:
hr = pickle.load(_f)
with open(f_lr, 'rb') as _f:
lr = pickle.load(_f)
return lr, hr, filename
def get_patch(self, lr, hr):
scale = self.scale[self.idx_scale]
if self.train:
lr, hr = common.get_patch(
lr, hr,
patch_size=self.args.patch_size,
scale=scale,
multi=(len(self.scale) > 1),
input_large=self.input_large
)
if not self.args.no_augment: lr, hr = common.augment(lr, hr)
else:
ih, iw = lr.shape[:2]
hr = hr[0:ih * scale, 0:iw * scale]
return lr, hr
def set_scale(self, idx_scale):
if not self.input_large:
self.idx_scale = idx_scale
else:
self.idx_scale = random.randint(0, len(self.scale) - 1)
| 5,337 | 32.78481 | 73 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/Demosaic/code/data/demo.py | import os
from data import common
import numpy as np
import imageio
import torch
import torch.utils.data as data
class Demo(data.Dataset):
def __init__(self, args, name='Demo', train=False, benchmark=False):
self.args = args
self.name = name
self.scale = args.scale
self.idx_scale = 0
self.train = False
self.benchmark = benchmark
self.filelist = []
for f in os.listdir(args.dir_demo):
if f.find('.png') >= 0 or f.find('.jp') >= 0:
self.filelist.append(os.path.join(args.dir_demo, f))
self.filelist.sort()
def __getitem__(self, idx):
filename = os.path.splitext(os.path.basename(self.filelist[idx]))[0]
lr = imageio.imread(self.filelist[idx])
lr, = common.set_channel(lr, n_channels=self.args.n_colors)
lr_t, = common.np2Tensor(lr, rgb_range=self.args.rgb_range)
return lr_t, -1, filename
def __len__(self):
return len(self.filelist)
def set_scale(self, idx_scale):
self.idx_scale = idx_scale
| 1,075 | 25.9 | 76 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/Demosaic/code/data/common.py | import random
import numpy as np
import skimage.color as sc
import torch
def get_patch(*args, patch_size=96, scale=1, multi=False, input_large=False):
ih, iw = args[0].shape[:2]
if not input_large:
p = 1 if multi else 1
tp = p * patch_size
ip = tp // 1
else:
tp = patch_size
ip = patch_size
ix = random.randrange(0, iw - ip + 1)
iy = random.randrange(0, ih - ip + 1)
if not input_large:
tx, ty = 1 * ix, 1 * iy
else:
tx, ty = ix, iy
ret = [
args[0][iy:iy + ip, ix:ix + ip, :],
*[a[ty:ty + tp, tx:tx + tp, :] for a in args[1:]]
]
return ret
def set_channel(*args, n_channels=3):
def _set_channel(img):
if img.ndim == 2:
img = np.expand_dims(img, axis=2)
c = img.shape[2]
if n_channels == 1 and c == 3:
img = np.expand_dims(sc.rgb2ycbcr(img)[:, :, 0], 2)
elif n_channels == 3 and c == 1:
img = np.concatenate([img] * n_channels, 2)
return img
return [_set_channel(a) for a in args]
def np2Tensor(*args, rgb_range=255):
def _np2Tensor(img):
np_transpose = np.ascontiguousarray(img.transpose((2, 0, 1)))
tensor = torch.from_numpy(np_transpose).float()
tensor.mul_(rgb_range / 255)
return tensor
return [_np2Tensor(a) for a in args]
def augment(*args, hflip=True, rot=True):
hflip = hflip and random.random() < 0.5
vflip = rot and random.random() < 0.5
rot90 = rot and random.random() < 0.5
def _augment(img):
if hflip: img = img[:, ::-1, :]
if vflip: img = img[::-1, :, :]
if rot90: img = img.transpose(1, 0, 2)
return img
return [_augment(a) for a in args]
| 1,770 | 23.260274 | 77 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/Demosaic/code/data/__init__.py | from importlib import import_module
#from dataloader import MSDataLoader
from torch.utils.data import dataloader
from torch.utils.data import ConcatDataset
# This is a simple wrapper function for ConcatDataset
class MyConcatDataset(ConcatDataset):
def __init__(self, datasets):
super(MyConcatDataset, self).__init__(datasets)
self.train = datasets[0].train
def set_scale(self, idx_scale):
for d in self.datasets:
if hasattr(d, 'set_scale'): d.set_scale(idx_scale)
class Data:
def __init__(self, args):
self.loader_train = None
if not args.test_only:
datasets = []
for d in args.data_train:
module_name = d if d.find('DIV2K-Q') < 0 else 'DIV2KJPEG'
m = import_module('data.' + module_name.lower())
datasets.append(getattr(m, module_name)(args, name=d))
self.loader_train = dataloader.DataLoader(
MyConcatDataset(datasets),
batch_size=args.batch_size,
shuffle=True,
pin_memory=not args.cpu,
num_workers=args.n_threads,
)
self.loader_test = []
for d in args.data_test:
if d in ['CBSD68','Kodak24','McM','Set5', 'Set14', 'B100', 'Urban100']:
m = import_module('data.benchmark')
testset = getattr(m, 'Benchmark')(args, train=False, name=d)
else:
module_name = d if d.find('DIV2K-Q') < 0 else 'DIV2KJPEG'
m = import_module('data.' + module_name.lower())
testset = getattr(m, module_name)(args, train=False, name=d)
self.loader_test.append(
dataloader.DataLoader(
testset,
batch_size=1,
shuffle=False,
pin_memory=not args.cpu,
num_workers=args.n_threads,
)
)
| 1,974 | 36.264151 | 83 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/Demosaic/code/model/rcan.py | ## ECCV-2018-Image Super-Resolution Using Very Deep Residual Channel Attention Networks
## https://arxiv.org/abs/1807.02758
from model import common
import torch.nn as nn
def make_model(args, parent=False):
return RCAN(args)
## Channel Attention (CA) Layer
class CALayer(nn.Module):
def __init__(self, channel, reduction=16):
super(CALayer, self).__init__()
# global average pooling: feature --> point
self.avg_pool = nn.AdaptiveAvgPool2d(1)
# feature channel downscale and upscale --> channel weight
self.conv_du = nn.Sequential(
nn.Conv2d(channel, channel // reduction, 1, padding=0, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(channel // reduction, channel, 1, padding=0, bias=True),
nn.Sigmoid()
)
def forward(self, x):
y = self.avg_pool(x)
y = self.conv_du(y)
return x * y
## Residual Channel Attention Block (RCAB)
class RCAB(nn.Module):
def __init__(
self, conv, n_feat, kernel_size, reduction,
bias=True, bn=False, act=nn.ReLU(True), res_scale=1):
super(RCAB, self).__init__()
modules_body = []
for i in range(2):
modules_body.append(conv(n_feat, n_feat, kernel_size, bias=bias))
if bn: modules_body.append(nn.BatchNorm2d(n_feat))
if i == 0: modules_body.append(act)
modules_body.append(CALayer(n_feat, reduction))
self.body = nn.Sequential(*modules_body)
self.res_scale = res_scale
def forward(self, x):
res = self.body(x)
#res = self.body(x).mul(self.res_scale)
res += x
return res
## Residual Group (RG)
class ResidualGroup(nn.Module):
def __init__(self, conv, n_feat, kernel_size, reduction, act, res_scale, n_resblocks):
super(ResidualGroup, self).__init__()
modules_body = []
modules_body = [
RCAB(
conv, n_feat, kernel_size, reduction, bias=True, bn=False, act=nn.ReLU(True), res_scale=1) \
for _ in range(n_resblocks)]
modules_body.append(conv(n_feat, n_feat, kernel_size))
self.body = nn.Sequential(*modules_body)
def forward(self, x):
res = self.body(x)
res += x
return res
## Residual Channel Attention Network (RCAN)
class RCAN(nn.Module):
def __init__(self, args, conv=common.default_conv):
super(RCAN, self).__init__()
n_resgroups = args.n_resgroups
n_resblocks = args.n_resblocks
n_feats = args.n_feats
kernel_size = 3
reduction = args.reduction
scale = args.scale[0]
act = nn.ReLU(True)
# RGB mean for DIV2K
rgb_mean = (0.4488, 0.4371, 0.4040)
rgb_std = (1.0, 1.0, 1.0)
self.sub_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std)
# define head module
modules_head = [conv(args.n_colors, n_feats, kernel_size)]
# define body module
modules_body = [
ResidualGroup(
conv, n_feats, kernel_size, reduction, act=act, res_scale=args.res_scale, n_resblocks=n_resblocks) \
for _ in range(n_resgroups)]
modules_body.append(conv(n_feats, n_feats, kernel_size))
# define tail module
modules_tail = [
common.Upsampler(conv, scale, n_feats, act=False),
conv(n_feats, args.n_colors, kernel_size)]
self.add_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std, 1)
self.head = nn.Sequential(*modules_head)
self.body = nn.Sequential(*modules_body)
self.tail = nn.Sequential(*modules_tail)
def forward(self, x):
x = self.sub_mean(x)
x = self.head(x)
res = self.body(x)
res += x
x = self.tail(res)
x = self.add_mean(x)
return x
def load_state_dict(self, state_dict, strict=False):
own_state = self.state_dict()
for name, param in state_dict.items():
if name in own_state:
if isinstance(param, nn.Parameter):
param = param.data
try:
own_state[name].copy_(param)
except Exception:
if name.find('tail') >= 0:
print('Replace pre-trained upsampler to new one...')
else:
raise RuntimeError('While copying the parameter named {}, '
'whose dimensions in the model are {} and '
'whose dimensions in the checkpoint are {}.'
.format(name, own_state[name].size(), param.size()))
elif strict:
if name.find('tail') == -1:
raise KeyError('unexpected key "{}" in state_dict'
.format(name))
if strict:
missing = set(own_state.keys()) - set(state_dict.keys())
if len(missing) > 0:
raise KeyError('missing keys in state_dict: "{}"'.format(missing))
| 5,178 | 34.717241 | 116 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/Demosaic/code/model/ddbpn.py | # Deep Back-Projection Networks For Super-Resolution
# https://arxiv.org/abs/1803.02735
from model import common
import torch
import torch.nn as nn
def make_model(args, parent=False):
return DDBPN(args)
def projection_conv(in_channels, out_channels, scale, up=True):
kernel_size, stride, padding = {
2: (6, 2, 2),
4: (8, 4, 2),
8: (12, 8, 2)
}[scale]
if up:
conv_f = nn.ConvTranspose2d
else:
conv_f = nn.Conv2d
return conv_f(
in_channels, out_channels, kernel_size,
stride=stride, padding=padding
)
class DenseProjection(nn.Module):
def __init__(self, in_channels, nr, scale, up=True, bottleneck=True):
super(DenseProjection, self).__init__()
if bottleneck:
self.bottleneck = nn.Sequential(*[
nn.Conv2d(in_channels, nr, 1),
nn.PReLU(nr)
])
inter_channels = nr
else:
self.bottleneck = None
inter_channels = in_channels
self.conv_1 = nn.Sequential(*[
projection_conv(inter_channels, nr, scale, up),
nn.PReLU(nr)
])
self.conv_2 = nn.Sequential(*[
projection_conv(nr, inter_channels, scale, not up),
nn.PReLU(inter_channels)
])
self.conv_3 = nn.Sequential(*[
projection_conv(inter_channels, nr, scale, up),
nn.PReLU(nr)
])
def forward(self, x):
if self.bottleneck is not None:
x = self.bottleneck(x)
a_0 = self.conv_1(x)
b_0 = self.conv_2(a_0)
e = b_0.sub(x)
a_1 = self.conv_3(e)
out = a_0.add(a_1)
return out
class DDBPN(nn.Module):
def __init__(self, args):
super(DDBPN, self).__init__()
scale = args.scale[0]
n0 = 128
nr = 32
self.depth = 6
rgb_mean = (0.4488, 0.4371, 0.4040)
rgb_std = (1.0, 1.0, 1.0)
self.sub_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std)
initial = [
nn.Conv2d(args.n_colors, n0, 3, padding=1),
nn.PReLU(n0),
nn.Conv2d(n0, nr, 1),
nn.PReLU(nr)
]
self.initial = nn.Sequential(*initial)
self.upmodules = nn.ModuleList()
self.downmodules = nn.ModuleList()
channels = nr
for i in range(self.depth):
self.upmodules.append(
DenseProjection(channels, nr, scale, True, i > 1)
)
if i != 0:
channels += nr
channels = nr
for i in range(self.depth - 1):
self.downmodules.append(
DenseProjection(channels, nr, scale, False, i != 0)
)
channels += nr
reconstruction = [
nn.Conv2d(self.depth * nr, args.n_colors, 3, padding=1)
]
self.reconstruction = nn.Sequential(*reconstruction)
self.add_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std, 1)
def forward(self, x):
x = self.sub_mean(x)
x = self.initial(x)
h_list = []
l_list = []
for i in range(self.depth - 1):
if i == 0:
l = x
else:
l = torch.cat(l_list, dim=1)
h_list.append(self.upmodules[i](l))
l_list.append(self.downmodules[i](torch.cat(h_list, dim=1)))
h_list.append(self.upmodules[-1](torch.cat(l_list, dim=1)))
out = self.reconstruction(torch.cat(h_list, dim=1))
out = self.add_mean(out)
return out
| 3,629 | 26.5 | 78 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/Demosaic/code/model/rdn.py | # Residual Dense Network for Image Super-Resolution
# https://arxiv.org/abs/1802.08797
from model import common
import torch
import torch.nn as nn
def make_model(args, parent=False):
return RDN(args)
class RDB_Conv(nn.Module):
def __init__(self, inChannels, growRate, kSize=3):
super(RDB_Conv, self).__init__()
Cin = inChannels
G = growRate
self.conv = nn.Sequential(*[
nn.Conv2d(Cin, G, kSize, padding=(kSize-1)//2, stride=1),
nn.ReLU()
])
def forward(self, x):
out = self.conv(x)
return torch.cat((x, out), 1)
class RDB(nn.Module):
def __init__(self, growRate0, growRate, nConvLayers, kSize=3):
super(RDB, self).__init__()
G0 = growRate0
G = growRate
C = nConvLayers
convs = []
for c in range(C):
convs.append(RDB_Conv(G0 + c*G, G))
self.convs = nn.Sequential(*convs)
# Local Feature Fusion
self.LFF = nn.Conv2d(G0 + C*G, G0, 1, padding=0, stride=1)
def forward(self, x):
return self.LFF(self.convs(x)) + x
class RDN(nn.Module):
def __init__(self, args):
super(RDN, self).__init__()
r = args.scale[0]
G0 = args.G0
kSize = args.RDNkSize
# number of RDB blocks, conv layers, out channels
self.D, C, G = {
'A': (20, 6, 32),
'B': (16, 8, 64),
}[args.RDNconfig]
# Shallow feature extraction net
self.SFENet1 = nn.Conv2d(args.n_colors, G0, kSize, padding=(kSize-1)//2, stride=1)
self.SFENet2 = nn.Conv2d(G0, G0, kSize, padding=(kSize-1)//2, stride=1)
# Redidual dense blocks and dense feature fusion
self.RDBs = nn.ModuleList()
for i in range(self.D):
self.RDBs.append(
RDB(growRate0 = G0, growRate = G, nConvLayers = C)
)
# Global Feature Fusion
self.GFF = nn.Sequential(*[
nn.Conv2d(self.D * G0, G0, 1, padding=0, stride=1),
nn.Conv2d(G0, G0, kSize, padding=(kSize-1)//2, stride=1)
])
# Up-sampling net
if r == 2 or r == 3:
self.UPNet = nn.Sequential(*[
nn.Conv2d(G0, G * r * r, kSize, padding=(kSize-1)//2, stride=1),
nn.PixelShuffle(r),
nn.Conv2d(G, args.n_colors, kSize, padding=(kSize-1)//2, stride=1)
])
elif r == 4:
self.UPNet = nn.Sequential(*[
nn.Conv2d(G0, G * 4, kSize, padding=(kSize-1)//2, stride=1),
nn.PixelShuffle(2),
nn.Conv2d(G, G * 4, kSize, padding=(kSize-1)//2, stride=1),
nn.PixelShuffle(2),
nn.Conv2d(G, args.n_colors, kSize, padding=(kSize-1)//2, stride=1)
])
else:
raise ValueError("scale must be 2 or 3 or 4.")
def forward(self, x):
f__1 = self.SFENet1(x)
x = self.SFENet2(f__1)
RDBs_out = []
for i in range(self.D):
x = self.RDBs[i](x)
RDBs_out.append(x)
x = self.GFF(torch.cat(RDBs_out,1))
x += f__1
return self.UPNet(x)
| 3,202 | 29.216981 | 90 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/Demosaic/code/model/mdsr.py | from model import common
import torch.nn as nn
def make_model(args, parent=False):
return MDSR(args)
class MDSR(nn.Module):
def __init__(self, args, conv=common.default_conv):
super(MDSR, self).__init__()
n_resblocks = args.n_resblocks
n_feats = args.n_feats
kernel_size = 3
self.scale_idx = 0
act = nn.ReLU(True)
rgb_mean = (0.4488, 0.4371, 0.4040)
rgb_std = (1.0, 1.0, 1.0)
self.sub_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std)
m_head = [conv(args.n_colors, n_feats, kernel_size)]
self.pre_process = nn.ModuleList([
nn.Sequential(
common.ResBlock(conv, n_feats, 5, act=act),
common.ResBlock(conv, n_feats, 5, act=act)
) for _ in args.scale
])
m_body = [
common.ResBlock(
conv, n_feats, kernel_size, act=act
) for _ in range(n_resblocks)
]
m_body.append(conv(n_feats, n_feats, kernel_size))
self.upsample = nn.ModuleList([
common.Upsampler(
conv, s, n_feats, act=False
) for s in args.scale
])
m_tail = [conv(n_feats, args.n_colors, kernel_size)]
self.add_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std, 1)
self.head = nn.Sequential(*m_head)
self.body = nn.Sequential(*m_body)
self.tail = nn.Sequential(*m_tail)
def forward(self, x):
x = self.sub_mean(x)
x = self.head(x)
x = self.pre_process[self.scale_idx](x)
res = self.body(x)
res += x
x = self.upsample[self.scale_idx](res)
x = self.tail(x)
x = self.add_mean(x)
return x
def set_scale(self, scale_idx):
self.scale_idx = scale_idx
| 1,837 | 25.637681 | 78 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/Demosaic/code/model/common.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
def default_conv(in_channels, out_channels, kernel_size,stride=1, bias=True):
return nn.Conv2d(
in_channels, out_channels, kernel_size,
padding=(kernel_size//2),stride=stride, bias=bias)
class MeanShift(nn.Conv2d):
def __init__(
self, rgb_range,
rgb_mean=(0.4488, 0.4371, 0.4040), rgb_std=(1.0, 1.0, 1.0), sign=-1):
super(MeanShift, self).__init__(3, 3, kernel_size=1)
std = torch.Tensor(rgb_std)
self.weight.data = torch.eye(3).view(3, 3, 1, 1) / std.view(3, 1, 1, 1)
self.bias.data = sign * rgb_range * torch.Tensor(rgb_mean) / std
for p in self.parameters():
p.requires_grad = False
class BasicBlock(nn.Sequential):
def __init__(
self, conv, in_channels, out_channels, kernel_size, stride=1, bias=True,
bn=False, act=nn.PReLU()):
m = [conv(in_channels, out_channels, kernel_size, bias=bias)]
if bn:
m.append(nn.BatchNorm2d(out_channels))
if act is not None:
m.append(act)
super(BasicBlock, self).__init__(*m)
class ResBlock(nn.Module):
def __init__(
self, conv, n_feats, kernel_size,
bias=True, bn=False, act=nn.PReLU(), res_scale=1):
super(ResBlock, self).__init__()
m = []
for i in range(2):
m.append(conv(n_feats, n_feats, kernel_size, bias=bias))
if bn:
m.append(nn.BatchNorm2d(n_feats))
if i == 0:
m.append(act)
self.body = nn.Sequential(*m)
self.res_scale = res_scale
def forward(self, x):
res = self.body(x).mul(self.res_scale)
res += x
return res
class Upsampler(nn.Sequential):
def __init__(self, conv, scale, n_feats, bn=False, act=False, bias=True):
m = []
if (scale & (scale - 1)) == 0: # Is scale = 2^n?
for _ in range(int(math.log(scale, 2))):
m.append(conv(n_feats, 4 * n_feats, 3, bias))
m.append(nn.PixelShuffle(2))
if bn:
m.append(nn.BatchNorm2d(n_feats))
if act == 'relu':
m.append(nn.ReLU(True))
elif act == 'prelu':
m.append(nn.PReLU(n_feats))
elif scale == 3:
m.append(conv(n_feats, 9 * n_feats, 3, bias))
m.append(nn.PixelShuffle(3))
if bn:
m.append(nn.BatchNorm2d(n_feats))
if act == 'relu':
m.append(nn.ReLU(True))
elif act == 'prelu':
m.append(nn.PReLU(n_feats))
else:
raise NotImplementedError
super(Upsampler, self).__init__(*m)
| 2,799 | 30.460674 | 80 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/Demosaic/code/model/__init__.py | import os
from importlib import import_module
import torch
import torch.nn as nn
from torch.autograd import Variable
class Model(nn.Module):
def __init__(self, args, ckp):
super(Model, self).__init__()
print('Making model...')
self.scale = args.scale
self.idx_scale = 0
self.self_ensemble = args.self_ensemble
self.chop = args.chop
self.precision = args.precision
self.cpu = args.cpu
self.device = torch.device('cpu' if args.cpu else 'cuda')
self.n_GPUs = args.n_GPUs
self.save_models = args.save_models
module = import_module('model.' + args.model.lower())
self.model = module.make_model(args).to(self.device)
if args.precision == 'half': self.model.half()
if not args.cpu and args.n_GPUs > 1:
self.model = nn.DataParallel(self.model, range(args.n_GPUs))
self.load(
ckp.dir,
pre_train=args.pre_train,
resume=args.resume,
cpu=args.cpu
)
print(self.model, file=ckp.log_file)
def forward(self, x, idx_scale):
self.idx_scale = idx_scale
target = self.get_model()
if hasattr(target, 'set_scale'):
target.set_scale(idx_scale)
if self.self_ensemble and not self.training:
if self.chop:
forward_function = self.forward_chop
else:
forward_function = self.model.forward
return self.forward_x8(x, forward_function)
elif self.chop and not self.training:
return self.forward_chop(x)
else:
return self.model(x)
def get_model(self):
if self.n_GPUs == 1:
return self.model
else:
return self.model.module
def state_dict(self, **kwargs):
target = self.get_model()
return target.state_dict(**kwargs)
def save(self, apath, epoch, is_best=False):
target = self.get_model()
torch.save(
target.state_dict(),
os.path.join(apath, 'model_latest.pt')
)
if is_best:
torch.save(
target.state_dict(),
os.path.join(apath, 'model_best.pt')
)
if self.save_models:
torch.save(
target.state_dict(),
os.path.join(apath, 'model_{}.pt'.format(epoch))
)
def load(self, apath, pre_train='.', resume=-1, cpu=False):
if cpu:
kwargs = {'map_location': lambda storage, loc: storage}
else:
kwargs = {}
if resume == -1:
self.get_model().load_state_dict(
torch.load(
os.path.join(apath, 'model_latest.pt'),
**kwargs
),
strict=False
)
elif resume == 0:
if pre_train != '.':
print('Loading model from {}'.format(pre_train))
self.get_model().load_state_dict(
torch.load(pre_train, **kwargs),
strict=False
)
else:
self.get_model().load_state_dict(
torch.load(
os.path.join(apath, 'model', 'model_{}.pt'.format(resume)),
**kwargs
),
strict=False
)
def forward_chop(self, x, shave=10, min_size=6400):
scale = self.scale[self.idx_scale]
scale = 1
n_GPUs = min(self.n_GPUs, 4)
b, c, h, w = x.size()
h_half, w_half = h // 2, w // 2
h_size, w_size = h_half + shave, w_half + shave
lr_list = [
x[:, :, 0:h_size, 0:w_size],
x[:, :, 0:h_size, (w - w_size):w],
x[:, :, (h - h_size):h, 0:w_size],
x[:, :, (h - h_size):h, (w - w_size):w]]
if w_size * h_size < min_size:
sr_list = []
for i in range(0, 4, n_GPUs):
lr_batch = torch.cat(lr_list[i:(i + n_GPUs)], dim=0)
sr_batch = self.model(lr_batch)
sr_list.extend(sr_batch.chunk(n_GPUs, dim=0))
else:
sr_list = [
self.forward_chop(patch, shave=shave, min_size=min_size) \
for patch in lr_list
]
h, w = scale * h, scale * w
h_half, w_half = scale * h_half, scale * w_half
h_size, w_size = scale * h_size, scale * w_size
shave *= scale
output = x.new(b, c, h, w)
output[:, :, 0:h_half, 0:w_half] \
= sr_list[0][:, :, 0:h_half, 0:w_half]
output[:, :, 0:h_half, w_half:w] \
= sr_list[1][:, :, 0:h_half, (w_size - w + w_half):w_size]
output[:, :, h_half:h, 0:w_half] \
= sr_list[2][:, :, (h_size - h + h_half):h_size, 0:w_half]
output[:, :, h_half:h, w_half:w] \
= sr_list[3][:, :, (h_size - h + h_half):h_size, (w_size - w + w_half):w_size]
return output
def forward_x8(self, x, forward_function):
def _transform(v, op):
if self.precision != 'single': v = v.float()
v2np = v.data.cpu().numpy()
if op == 'v':
tfnp = v2np[:, :, :, ::-1].copy()
elif op == 'h':
tfnp = v2np[:, :, ::-1, :].copy()
elif op == 't':
tfnp = v2np.transpose((0, 1, 3, 2)).copy()
ret = torch.Tensor(tfnp).to(self.device)
if self.precision == 'half': ret = ret.half()
return ret
lr_list = [x]
for tf in 'v', 'h', 't':
lr_list.extend([_transform(t, tf) for t in lr_list])
sr_list = [forward_function(aug) for aug in lr_list]
for i in range(len(sr_list)):
if i > 3:
sr_list[i] = _transform(sr_list[i], 't')
if i % 4 > 1:
sr_list[i] = _transform(sr_list[i], 'h')
if (i % 4) % 2 == 1:
sr_list[i] = _transform(sr_list[i], 'v')
output_cat = torch.cat(sr_list, dim=0)
output = output_cat.mean(dim=0, keepdim=True)
return output
| 6,200 | 31.465969 | 90 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/Demosaic/code/model/panet.py | from model import common
from model import attention
import torch.nn as nn
def make_model(args, parent=False):
return PANET(args)
class PANET(nn.Module):
def __init__(self, args, conv=common.default_conv):
super(PANET, self).__init__()
n_resblocks = args.n_resblocks
n_feats = args.n_feats
kernel_size = 3
scale = args.scale[0]
rgb_mean = (0.4488, 0.4371, 0.4040)
rgb_std = (1.0, 1.0, 1.0)
self.sub_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std)
msa = attention.PyramidAttention()
# define head module
m_head = [conv(args.n_colors, n_feats, kernel_size)]
# define body module
m_body = [
common.ResBlock(
conv, n_feats, kernel_size, nn.PReLU(), res_scale=args.res_scale
) for _ in range(n_resblocks//2)
]
m_body.append(msa)
for i in range(n_resblocks//2):
m_body.append(common.ResBlock(conv,n_feats,kernel_size,nn.PReLU(),res_scale=args.res_scale))
m_body.append(conv(n_feats, n_feats, kernel_size))
# define tail module
#m_tail = [
# common.Upsampler(conv, scale, n_feats, act=False),
# conv(n_feats, args.n_colors, kernel_size)
#]
m_tail = [
conv(n_feats, args.n_colors, kernel_size)
]
self.add_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std, 1)
self.head = nn.Sequential(*m_head)
self.body = nn.Sequential(*m_body)
self.tail = nn.Sequential(*m_tail)
def forward(self, x):
#x = self.sub_mean(x)
x = self.head(x)
res = self.body(x)
res += x
x = self.tail(res)
#x = self.add_mean(x)
return x
def load_state_dict(self, state_dict, strict=True):
own_state = self.state_dict()
for name, param in state_dict.items():
if name in own_state:
if isinstance(param, nn.Parameter):
param = param.data
try:
own_state[name].copy_(param)
except Exception:
if name.find('tail') == -1:
raise RuntimeError('While copying the parameter named {}, '
'whose dimensions in the model are {} and '
'whose dimensions in the checkpoint are {}.'
.format(name, own_state[name].size(), param.size()))
elif strict:
if name.find('tail') == -1:
raise KeyError('unexpected key "{}" in state_dict'
.format(name))
| 2,779 | 32.493976 | 104 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/Demosaic/code/model/attention.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import transforms
from torchvision import utils as vutils
from model import common
from utils.tools import extract_image_patches,\
reduce_mean, reduce_sum, same_padding
class PyramidAttention(nn.Module):
def __init__(self, level=5, res_scale=1, channel=64, reduction=2, ksize=3, stride=1, softmax_scale=10, average=True, conv=common.default_conv):
super(PyramidAttention, self).__init__()
self.ksize = ksize
self.stride = stride
self.res_scale = res_scale
self.softmax_scale = softmax_scale
self.scale = [1-i/10 for i in range(level)]
self.average = average
escape_NaN = torch.FloatTensor([1e-4])
self.register_buffer('escape_NaN', escape_NaN)
self.conv_match_L_base = common.BasicBlock(conv,channel,channel//reduction, 1, bn=False, act=nn.PReLU())
self.conv_match = common.BasicBlock(conv,channel, channel//reduction, 1, bn=False, act=nn.PReLU())
self.conv_assembly = common.BasicBlock(conv,channel, channel,1,bn=False, act=nn.PReLU())
def forward(self, input):
res = input
#theta
match_base = self.conv_match_L_base(input)
shape_base = list(res.size())
input_groups = torch.split(match_base,1,dim=0)
# patch size for matching
kernel = self.ksize
# raw_w is for reconstruction
raw_w = []
# w is for matching
w = []
#build feature pyramid
for i in range(len(self.scale)):
ref = input
if self.scale[i]!=1:
ref = F.interpolate(input, scale_factor=self.scale[i], mode='bicubic')
#feature transformation function f
base = self.conv_assembly(ref)
shape_input = base.shape
#sampling
raw_w_i = extract_image_patches(base, ksizes=[kernel, kernel],
strides=[self.stride,self.stride],
rates=[1, 1],
padding='same') # [N, C*k*k, L]
raw_w_i = raw_w_i.view(shape_input[0], shape_input[1], kernel, kernel, -1)
raw_w_i = raw_w_i.permute(0, 4, 1, 2, 3) # raw_shape: [N, L, C, k, k]
raw_w_i_groups = torch.split(raw_w_i, 1, dim=0)
raw_w.append(raw_w_i_groups)
#feature transformation function g
ref_i = self.conv_match(ref)
shape_ref = ref_i.shape
#sampling
w_i = extract_image_patches(ref_i, ksizes=[self.ksize, self.ksize],
strides=[self.stride, self.stride],
rates=[1, 1],
padding='same')
w_i = w_i.view(shape_ref[0], shape_ref[1], self.ksize, self.ksize, -1)
w_i = w_i.permute(0, 4, 1, 2, 3) # w shape: [N, L, C, k, k]
w_i_groups = torch.split(w_i, 1, dim=0)
w.append(w_i_groups)
y = []
for idx, xi in enumerate(input_groups):
#group in a filter
wi = torch.cat([w[i][idx][0] for i in range(len(self.scale))],dim=0) # [L, C, k, k]
#normalize
max_wi = torch.max(torch.sqrt(reduce_sum(torch.pow(wi, 2),
axis=[1, 2, 3],
keepdim=True)),
self.escape_NaN)
wi_normed = wi/ max_wi
#matching
xi = same_padding(xi, [self.ksize, self.ksize], [1, 1], [1, 1]) # xi: 1*c*H*W
yi = F.conv2d(xi, wi_normed, stride=1) # [1, L, H, W] L = shape_ref[2]*shape_ref[3]
yi = yi.view(1,wi.shape[0], shape_base[2], shape_base[3]) # (B=1, C=32*32, H=32, W=32)
# softmax matching score
yi = F.softmax(yi*self.softmax_scale, dim=1)
if self.average == False:
yi = (yi == yi.max(dim=1,keepdim=True)[0]).float()
# deconv for patch pasting
raw_wi = torch.cat([raw_w[i][idx][0] for i in range(len(self.scale))],dim=0)
yi = F.conv_transpose2d(yi, raw_wi, stride=self.stride,padding=1)/4.
y.append(yi)
y = torch.cat(y, dim=0)+res*self.res_scale # back to the mini-batch
return y | 4,427 | 46.106383 | 147 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/Demosaic/code/model/vdsr.py | from model import common
import torch.nn as nn
import torch.nn.init as init
url = {
'r20f64': ''
}
def make_model(args, parent=False):
return VDSR(args)
class VDSR(nn.Module):
def __init__(self, args, conv=common.default_conv):
super(VDSR, self).__init__()
n_resblocks = args.n_resblocks
n_feats = args.n_feats
kernel_size = 3
self.url = url['r{}f{}'.format(n_resblocks, n_feats)]
self.sub_mean = common.MeanShift(args.rgb_range)
self.add_mean = common.MeanShift(args.rgb_range, sign=1)
def basic_block(in_channels, out_channels, act):
return common.BasicBlock(
conv, in_channels, out_channels, kernel_size,
bias=True, bn=False, act=act
)
# define body module
m_body = []
m_body.append(basic_block(args.n_colors, n_feats, nn.ReLU(True)))
for _ in range(n_resblocks - 2):
m_body.append(basic_block(n_feats, n_feats, nn.ReLU(True)))
m_body.append(basic_block(n_feats, args.n_colors, None))
self.body = nn.Sequential(*m_body)
def forward(self, x):
x = self.sub_mean(x)
res = self.body(x)
res += x
x = self.add_mean(res)
return x
| 1,275 | 26.148936 | 73 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/Demosaic/code/model/utils/tools.py | import os
import torch
import numpy as np
from PIL import Image
import torch.nn.functional as F
def normalize(x):
return x.mul_(2).add_(-1)
def same_padding(images, ksizes, strides, rates):
assert len(images.size()) == 4
batch_size, channel, rows, cols = images.size()
out_rows = (rows + strides[0] - 1) // strides[0]
out_cols = (cols + strides[1] - 1) // strides[1]
effective_k_row = (ksizes[0] - 1) * rates[0] + 1
effective_k_col = (ksizes[1] - 1) * rates[1] + 1
padding_rows = max(0, (out_rows-1)*strides[0]+effective_k_row-rows)
padding_cols = max(0, (out_cols-1)*strides[1]+effective_k_col-cols)
# Pad the input
padding_top = int(padding_rows / 2.)
padding_left = int(padding_cols / 2.)
padding_bottom = padding_rows - padding_top
padding_right = padding_cols - padding_left
paddings = (padding_left, padding_right, padding_top, padding_bottom)
images = torch.nn.ZeroPad2d(paddings)(images)
return images
def extract_image_patches(images, ksizes, strides, rates, padding='same'):
"""
Extract patches from images and put them in the C output dimension.
:param padding:
:param images: [batch, channels, in_rows, in_cols]. A 4-D Tensor with shape
:param ksizes: [ksize_rows, ksize_cols]. The size of the sliding window for
each dimension of images
:param strides: [stride_rows, stride_cols]
:param rates: [dilation_rows, dilation_cols]
:return: A Tensor
"""
assert len(images.size()) == 4
assert padding in ['same', 'valid']
batch_size, channel, height, width = images.size()
if padding == 'same':
images = same_padding(images, ksizes, strides, rates)
elif padding == 'valid':
pass
else:
raise NotImplementedError('Unsupported padding type: {}.\
Only "same" or "valid" are supported.'.format(padding))
unfold = torch.nn.Unfold(kernel_size=ksizes,
dilation=rates,
padding=0,
stride=strides)
patches = unfold(images)
return patches # [N, C*k*k, L], L is the total number of such blocks
def reduce_mean(x, axis=None, keepdim=False):
if not axis:
axis = range(len(x.shape))
for i in sorted(axis, reverse=True):
x = torch.mean(x, dim=i, keepdim=keepdim)
return x
def reduce_std(x, axis=None, keepdim=False):
if not axis:
axis = range(len(x.shape))
for i in sorted(axis, reverse=True):
x = torch.std(x, dim=i, keepdim=keepdim)
return x
def reduce_sum(x, axis=None, keepdim=False):
if not axis:
axis = range(len(x.shape))
for i in sorted(axis, reverse=True):
x = torch.sum(x, dim=i, keepdim=keepdim)
return x
| 2,777 | 32.878049 | 79 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.