repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
DaVinci | DaVinci-main/taming/modules/vqvae/quantize.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from torch import einsum
from einops import rearrange
class VectorQuantizer(nn.Module):
"""
see https://github.com/MishaLaskin/vqvae/blob/d761a999e2267766400dc646d82d3ac3657771d4/models/quantizer.py
____________________________________________
Discretization bottleneck part of the VQ-VAE.
Inputs:
- n_e : number of embeddings
- e_dim : dimension of embedding
- beta : commitment cost used in loss term, beta * ||z_e(x)-sg[e]||^2
_____________________________________________
"""
# NOTE: this class contains a bug regarding beta; see VectorQuantizer2 for
# a fix and use legacy=False to apply that fix. VectorQuantizer2 can be
# used wherever VectorQuantizer has been used before and is additionally
# more efficient.
def __init__(self, n_e, e_dim, beta):
super(VectorQuantizer, self).__init__()
self.n_e = n_e
self.e_dim = e_dim
self.beta = beta
self.embedding = nn.Embedding(self.n_e, self.e_dim)
self.embedding.weight.data.uniform_(-1.0 / self.n_e, 1.0 / self.n_e)
def forward(self, z):
"""
Inputs the output of the encoder network z and maps it to a discrete
one-hot vector that is the index of the closest embedding vector e_j
z (continuous) -> z_q (discrete)
z.shape = (batch, channel, height, width)
quantization pipeline:
1. get encoder input (B,C,H,W)
2. flatten input to (B*H*W,C)
"""
# reshape z -> (batch, height, width, channel) and flatten
z = z.permute(0, 2, 3, 1).contiguous()
z_flattened = z.view(-1, self.e_dim)
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
d = torch.sum(z_flattened ** 2, dim=1, keepdim=True) + \
torch.sum(self.embedding.weight**2, dim=1) - 2 * \
torch.matmul(z_flattened, self.embedding.weight.t())
## could possible replace this here
# #\start...
# find closest encodings
min_encoding_indices = torch.argmin(d, dim=1).unsqueeze(1)
min_encodings = torch.zeros(
min_encoding_indices.shape[0], self.n_e).to(z)
min_encodings.scatter_(1, min_encoding_indices, 1)
# dtype min encodings: torch.float32
# min_encodings shape: torch.Size([2048, 512])
# min_encoding_indices.shape: torch.Size([2048, 1])
# get quantized latent vectors
z_q = torch.matmul(min_encodings, self.embedding.weight).view(z.shape)
#.........\end
# with:
# .........\start
#min_encoding_indices = torch.argmin(d, dim=1)
#z_q = self.embedding(min_encoding_indices)
# ......\end......... (TODO)
# compute loss for embedding
loss = torch.mean((z_q.detach()-z)**2) + self.beta * \
torch.mean((z_q - z.detach()) ** 2)
# preserve gradients
z_q = z + (z_q - z).detach()
# perplexity
e_mean = torch.mean(min_encodings, dim=0)
perplexity = torch.exp(-torch.sum(e_mean * torch.log(e_mean + 1e-10)))
# reshape back to match original input shape
z_q = z_q.permute(0, 3, 1, 2).contiguous()
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def get_codebook_entry(self, indices, shape):
# shape specifying (batch, height, width, channel)
# TODO: check for more easy handling with nn.Embedding
min_encodings = torch.zeros(indices.shape[0], self.n_e).to(indices)
min_encodings.scatter_(1, indices[:,None], 1)
# get quantized latent vectors
z_q = torch.matmul(min_encodings.float(), self.embedding.weight)
if shape is not None:
z_q = z_q.view(shape)
# reshape back to match original input shape
z_q = z_q.permute(0, 3, 1, 2).contiguous()
return z_q
class GumbelQuantize(nn.Module):
"""
credit to @karpathy: https://github.com/karpathy/deep-vector-quantization/blob/main/model.py (thanks!)
Gumbel Softmax trick quantizer
Categorical Reparameterization with Gumbel-Softmax, Jang et al. 2016
https://arxiv.org/abs/1611.01144
"""
def __init__(self, num_hiddens, embedding_dim, n_embed, straight_through=True,
kl_weight=5e-4, temp_init=1.0, use_vqinterface=True,
remap=None, unknown_index="random"):
super().__init__()
self.embedding_dim = embedding_dim
self.n_embed = n_embed
self.straight_through = straight_through
self.temperature = temp_init
self.kl_weight = kl_weight
self.proj = nn.Conv2d(num_hiddens, n_embed, 1)
self.embed = nn.Embedding(n_embed, embedding_dim)
self.use_vqinterface = use_vqinterface
self.remap = remap
if self.remap is not None:
self.register_buffer("used", torch.tensor(np.load(self.remap)))
self.re_embed = self.used.shape[0]
self.unknown_index = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
self.unknown_index = self.re_embed
self.re_embed = self.re_embed+1
print(f"Remapping {self.n_embed} indices to {self.re_embed} indices. "
f"Using {self.unknown_index} for unknown indices.")
else:
self.re_embed = n_embed
def remap_to_used(self, inds):
ishape = inds.shape
assert len(ishape)>1
inds = inds.reshape(ishape[0],-1)
used = self.used.to(inds)
match = (inds[:,:,None]==used[None,None,...]).long()
new = match.argmax(-1)
unknown = match.sum(2)<1
if self.unknown_index == "random":
new[unknown]=torch.randint(0,self.re_embed,size=new[unknown].shape).to(device=new.device)
else:
new[unknown] = self.unknown_index
return new.reshape(ishape)
def unmap_to_all(self, inds):
ishape = inds.shape
assert len(ishape)>1
inds = inds.reshape(ishape[0],-1)
used = self.used.to(inds)
if self.re_embed > self.used.shape[0]: # extra token
inds[inds>=self.used.shape[0]] = 0 # simply set to zero
back=torch.gather(used[None,:][inds.shape[0]*[0],:], 1, inds)
return back.reshape(ishape)
def forward(self, z, temp=None, return_logits=False):
# force hard = True when we are in eval mode, as we must quantize. actually, always true seems to work
hard = self.straight_through if self.training else True
temp = self.temperature if temp is None else temp
logits = self.proj(z)
if self.remap is not None:
# continue only with used logits
full_zeros = torch.zeros_like(logits)
logits = logits[:,self.used,...]
soft_one_hot = F.gumbel_softmax(logits, tau=temp, dim=1, hard=hard)
if self.remap is not None:
# go back to all entries but unused set to zero
full_zeros[:,self.used,...] = soft_one_hot
soft_one_hot = full_zeros
z_q = einsum('b n h w, n d -> b d h w', soft_one_hot, self.embed.weight)
# + kl divergence to the prior loss
qy = F.softmax(logits, dim=1)
diff = self.kl_weight * torch.sum(qy * torch.log(qy * self.n_embed + 1e-10), dim=1).mean()
ind = soft_one_hot.argmax(dim=1)
if self.remap is not None:
ind = self.remap_to_used(ind)
if self.use_vqinterface:
if return_logits:
return z_q, diff, (None, None, ind), logits
return z_q, diff, (None, None, ind)
return z_q, diff, ind
def get_codebook_entry(self, indices, shape):
b, h, w, c = shape
assert b*h*w == indices.shape[0]
indices = rearrange(indices, '(b h w) -> b h w', b=b, h=h, w=w)
if self.remap is not None:
indices = self.unmap_to_all(indices)
one_hot = F.one_hot(indices, num_classes=self.n_embed).permute(0, 3, 1, 2).float()
z_q = einsum('b n h w, n d -> b d h w', one_hot, self.embed.weight)
return z_q
class VectorQuantizer2(nn.Module):
"""
Improved version over VectorQuantizer, can be used as a drop-in replacement. Mostly
avoids costly matrix multiplications and allows for post-hoc remapping of indices.
"""
# NOTE: due to a bug the beta term was applied to the wrong term. for
# backwards compatibility we use the buggy version by default, but you can
# specify legacy=False to fix it.
def __init__(self, n_e, e_dim, beta, remap=None, unknown_index="random",
sane_index_shape=False, legacy=True):
super().__init__()
self.n_e = n_e
self.e_dim = e_dim
self.beta = beta
self.legacy = legacy
self.embedding = nn.Embedding(self.n_e, self.e_dim)
self.embedding.weight.data.uniform_(-1.0 / self.n_e, 1.0 / self.n_e)
self.remap = remap
if self.remap is not None:
self.register_buffer("used", torch.tensor(np.load(self.remap)))
self.re_embed = self.used.shape[0]
self.unknown_index = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
self.unknown_index = self.re_embed
self.re_embed = self.re_embed+1
print(f"Remapping {self.n_e} indices to {self.re_embed} indices. "
f"Using {self.unknown_index} for unknown indices.")
else:
self.re_embed = n_e
self.sane_index_shape = sane_index_shape
def remap_to_used(self, inds):
ishape = inds.shape
assert len(ishape)>1
inds = inds.reshape(ishape[0],-1)
used = self.used.to(inds)
match = (inds[:,:,None]==used[None,None,...]).long()
new = match.argmax(-1)
unknown = match.sum(2)<1
if self.unknown_index == "random":
new[unknown]=torch.randint(0,self.re_embed,size=new[unknown].shape).to(device=new.device)
else:
new[unknown] = self.unknown_index
return new.reshape(ishape)
def unmap_to_all(self, inds):
ishape = inds.shape
assert len(ishape)>1
inds = inds.reshape(ishape[0],-1)
used = self.used.to(inds)
if self.re_embed > self.used.shape[0]: # extra token
inds[inds>=self.used.shape[0]] = 0 # simply set to zero
back=torch.gather(used[None,:][inds.shape[0]*[0],:], 1, inds)
return back.reshape(ishape)
def forward(self, z, temp=None, rescale_logits=False, return_logits=False):
assert temp is None or temp==1.0, "Only for interface compatible with Gumbel"
assert rescale_logits==False, "Only for interface compatible with Gumbel"
assert return_logits==False, "Only for interface compatible with Gumbel"
# reshape z -> (batch, height, width, channel) and flatten
z = rearrange(z, 'b c h w -> b h w c').contiguous()
z_flattened = z.view(-1, self.e_dim)
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
d = torch.sum(z_flattened ** 2, dim=1, keepdim=True) + \
torch.sum(self.embedding.weight**2, dim=1) - 2 * \
torch.einsum('bd,dn->bn', z_flattened, rearrange(self.embedding.weight, 'n d -> d n'))
min_encoding_indices = torch.argmin(d, dim=1)
z_q = self.embedding(min_encoding_indices).view(z.shape)
perplexity = None
min_encodings = None
# compute loss for embedding
if not self.legacy:
loss = self.beta * torch.mean((z_q.detach()-z)**2) + \
torch.mean((z_q - z.detach()) ** 2)
else:
loss = torch.mean((z_q.detach()-z)**2) + self.beta * \
torch.mean((z_q - z.detach()) ** 2)
# preserve gradients
z_q = z + (z_q - z).detach()
# reshape back to match original input shape
z_q = rearrange(z_q, 'b h w c -> b c h w').contiguous()
if self.remap is not None:
min_encoding_indices = min_encoding_indices.reshape(z.shape[0],-1) # add batch axis
min_encoding_indices = self.remap_to_used(min_encoding_indices)
min_encoding_indices = min_encoding_indices.reshape(-1,1) # flatten
if self.sane_index_shape:
min_encoding_indices = min_encoding_indices.reshape(
z_q.shape[0], z_q.shape[2], z_q.shape[3])
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def get_codebook_entry(self, indices, shape):
# shape specifying (batch, height, width, channel)
if self.remap is not None:
indices = indices.reshape(shape[0],-1) # add batch axis
indices = self.unmap_to_all(indices)
indices = indices.reshape(-1) # flatten again
# get quantized latent vectors
z_q = self.embedding(indices)
if shape is not None:
z_q = z_q.view(shape)
# reshape back to match original input shape
z_q = z_q.permute(0, 3, 1, 2).contiguous()
return z_q
| 13,259 | 39.181818 | 110 | py |
DaVinci | DaVinci-main/taming/modules/discriminator/model.py | import functools
import torch.nn as nn
from taming.modules.util import ActNorm
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm') != -1:
nn.init.normal_(m.weight.data, 1.0, 0.02)
nn.init.constant_(m.bias.data, 0)
class NLayerDiscriminator(nn.Module):
"""Defines a PatchGAN discriminator as in Pix2Pix
--> see https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/models/networks.py
"""
def __init__(self, input_nc=3, ndf=64, n_layers=3, use_actnorm=False):
"""Construct a PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
n_layers (int) -- the number of conv layers in the discriminator
norm_layer -- normalization layer
"""
super(NLayerDiscriminator, self).__init__()
if not use_actnorm:
norm_layer = nn.BatchNorm2d
else:
norm_layer = ActNorm
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func != nn.BatchNorm2d
else:
use_bias = norm_layer != nn.BatchNorm2d
kw = 4
padw = 1
sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers): # gradually increase the number of filters
nf_mult_prev = nf_mult
nf_mult = min(2 ** n, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
nf_mult_prev = nf_mult
nf_mult = min(2 ** n_layers, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
sequence += [
nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map
self.main = nn.Sequential(*sequence)
def forward(self, input):
"""Standard forward."""
return self.main(input)
| 2,550 | 36.514706 | 116 | py |
DaVinci | DaVinci-main/taming/modules/misc/coord.py | import torch
class CoordStage(object):
def __init__(self, n_embed, down_factor):
self.n_embed = n_embed
self.down_factor = down_factor
def eval(self):
return self
def encode(self, c):
"""fake vqmodel interface"""
assert 0.0 <= c.min() and c.max() <= 1.0
b,ch,h,w = c.shape
assert ch == 1
c = torch.nn.functional.interpolate(c, scale_factor=1/self.down_factor,
mode="area")
c = c.clamp(0.0, 1.0)
c = self.n_embed*c
c_quant = c.round()
c_ind = c_quant.to(dtype=torch.long)
info = None, None, c_ind
return c_quant, None, info
def decode(self, c):
c = c/self.n_embed
c = torch.nn.functional.interpolate(c, scale_factor=self.down_factor,
mode="nearest")
return c
| 904 | 27.28125 | 79 | py |
DaVinci | DaVinci-main/taming/modules/diffusionmodules/model.py | # pytorch_diffusion + derived encoder decoder
import math
import torch
import torch.nn as nn
import numpy as np
def get_timestep_embedding(timesteps, embedding_dim):
"""
This matches the implementation in Denoising Diffusion Probabilistic Models:
From Fairseq.
Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly
from the description in Section 3.5 of "Attention Is All You Need".
"""
assert len(timesteps.shape) == 1
half_dim = embedding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float32) * -emb)
emb = emb.to(device=timesteps.device)
emb = timesteps.float()[:, None] * emb[None, :]
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
if embedding_dim % 2 == 1: # zero pad
emb = torch.nn.functional.pad(emb, (0,1,0,0))
return emb
def nonlinearity(x):
# swish
return x*torch.sigmoid(x)
def Normalize(in_channels):
return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True)
class Upsample(nn.Module):
def __init__(self, in_channels, with_conv):
super().__init__()
self.with_conv = with_conv
if self.with_conv:
self.conv = torch.nn.Conv2d(in_channels,
in_channels,
kernel_size=3,
stride=1,
padding=1)
def forward(self, x):
x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode="nearest")
if self.with_conv:
x = self.conv(x)
return x
class Downsample(nn.Module):
def __init__(self, in_channels, with_conv):
super().__init__()
self.with_conv = with_conv
if self.with_conv:
# no asymmetric padding in torch conv, must do it ourselves
self.conv = torch.nn.Conv2d(in_channels,
in_channels,
kernel_size=3,
stride=2,
padding=0)
def forward(self, x):
if self.with_conv:
pad = (0,1,0,1)
x = torch.nn.functional.pad(x, pad, mode="constant", value=0)
x = self.conv(x)
else:
x = torch.nn.functional.avg_pool2d(x, kernel_size=2, stride=2)
return x
class ResnetBlock(nn.Module):
def __init__(self, *, in_channels, out_channels=None, conv_shortcut=False,
dropout, temb_channels=512):
super().__init__()
self.in_channels = in_channels
out_channels = in_channels if out_channels is None else out_channels
self.out_channels = out_channels
self.use_conv_shortcut = conv_shortcut
self.norm1 = Normalize(in_channels)
self.conv1 = torch.nn.Conv2d(in_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1)
if temb_channels > 0:
self.temb_proj = torch.nn.Linear(temb_channels,
out_channels)
self.norm2 = Normalize(out_channels)
self.dropout = torch.nn.Dropout(dropout)
self.conv2 = torch.nn.Conv2d(out_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1)
if self.in_channels != self.out_channels:
if self.use_conv_shortcut:
self.conv_shortcut = torch.nn.Conv2d(in_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1)
else:
self.nin_shortcut = torch.nn.Conv2d(in_channels,
out_channels,
kernel_size=1,
stride=1,
padding=0)
def forward(self, x, temb):
h = x
h = self.norm1(h)
h = nonlinearity(h)
h = self.conv1(h)
if temb is not None:
h = h + self.temb_proj(nonlinearity(temb))[:,:,None,None]
h = self.norm2(h)
h = nonlinearity(h)
h = self.dropout(h)
h = self.conv2(h)
if self.in_channels != self.out_channels:
if self.use_conv_shortcut:
x = self.conv_shortcut(x)
else:
x = self.nin_shortcut(x)
return x+h
class AttnBlock(nn.Module):
def __init__(self, in_channels):
super().__init__()
self.in_channels = in_channels
self.norm = Normalize(in_channels)
self.q = torch.nn.Conv2d(in_channels,
in_channels,
kernel_size=1,
stride=1,
padding=0)
self.k = torch.nn.Conv2d(in_channels,
in_channels,
kernel_size=1,
stride=1,
padding=0)
self.v = torch.nn.Conv2d(in_channels,
in_channels,
kernel_size=1,
stride=1,
padding=0)
self.proj_out = torch.nn.Conv2d(in_channels,
in_channels,
kernel_size=1,
stride=1,
padding=0)
def forward(self, x):
h_ = x
h_ = self.norm(h_)
q = self.q(h_)
k = self.k(h_)
v = self.v(h_)
# compute attention
b,c,h,w = q.shape
q = q.reshape(b,c,h*w)
q = q.permute(0,2,1) # b,hw,c
k = k.reshape(b,c,h*w) # b,c,hw
w_ = torch.bmm(q,k) # b,hw,hw w[b,i,j]=sum_c q[b,i,c]k[b,c,j]
w_ = w_ * (int(c)**(-0.5))
w_ = torch.nn.functional.softmax(w_, dim=2)
# attend to values
v = v.reshape(b,c,h*w)
w_ = w_.permute(0,2,1) # b,hw,hw (first hw of k, second of q)
h_ = torch.bmm(v,w_) # b, c,hw (hw of q) h_[b,c,j] = sum_i v[b,c,i] w_[b,i,j]
h_ = h_.reshape(b,c,h,w)
h_ = self.proj_out(h_)
return x+h_
class Model(nn.Module):
def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
resolution, use_timestep=True):
super().__init__()
self.ch = ch
self.temb_ch = self.ch*4
self.num_resolutions = len(ch_mult)
self.num_res_blocks = num_res_blocks
self.resolution = resolution
self.in_channels = in_channels
self.use_timestep = use_timestep
if self.use_timestep:
# timestep embedding
self.temb = nn.Module()
self.temb.dense = nn.ModuleList([
torch.nn.Linear(self.ch,
self.temb_ch),
torch.nn.Linear(self.temb_ch,
self.temb_ch),
])
# downsampling
self.conv_in = torch.nn.Conv2d(in_channels,
self.ch,
kernel_size=3,
stride=1,
padding=1)
curr_res = resolution
in_ch_mult = (1,)+tuple(ch_mult)
self.down = nn.ModuleList()
for i_level in range(self.num_resolutions):
block = nn.ModuleList()
attn = nn.ModuleList()
block_in = ch*in_ch_mult[i_level]
block_out = ch*ch_mult[i_level]
for i_block in range(self.num_res_blocks):
block.append(ResnetBlock(in_channels=block_in,
out_channels=block_out,
temb_channels=self.temb_ch,
dropout=dropout))
block_in = block_out
if curr_res in attn_resolutions:
attn.append(AttnBlock(block_in))
down = nn.Module()
down.block = block
down.attn = attn
if i_level != self.num_resolutions-1:
down.downsample = Downsample(block_in, resamp_with_conv)
curr_res = curr_res // 2
self.down.append(down)
# middle
self.mid = nn.Module()
self.mid.block_1 = ResnetBlock(in_channels=block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout)
self.mid.attn_1 = AttnBlock(block_in)
self.mid.block_2 = ResnetBlock(in_channels=block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout)
# upsampling
self.up = nn.ModuleList()
for i_level in reversed(range(self.num_resolutions)):
block = nn.ModuleList()
attn = nn.ModuleList()
block_out = ch*ch_mult[i_level]
skip_in = ch*ch_mult[i_level]
for i_block in range(self.num_res_blocks+1):
if i_block == self.num_res_blocks:
skip_in = ch*in_ch_mult[i_level]
block.append(ResnetBlock(in_channels=block_in+skip_in,
out_channels=block_out,
temb_channels=self.temb_ch,
dropout=dropout))
block_in = block_out
if curr_res in attn_resolutions:
attn.append(AttnBlock(block_in))
up = nn.Module()
up.block = block
up.attn = attn
if i_level != 0:
up.upsample = Upsample(block_in, resamp_with_conv)
curr_res = curr_res * 2
self.up.insert(0, up) # prepend to get consistent order
# end
self.norm_out = Normalize(block_in)
self.conv_out = torch.nn.Conv2d(block_in,
out_ch,
kernel_size=3,
stride=1,
padding=1)
def forward(self, x, t=None):
#assert x.shape[2] == x.shape[3] == self.resolution
if self.use_timestep:
# timestep embedding
assert t is not None
temb = get_timestep_embedding(t, self.ch)
temb = self.temb.dense[0](temb)
temb = nonlinearity(temb)
temb = self.temb.dense[1](temb)
else:
temb = None
# downsampling
hs = [self.conv_in(x)]
for i_level in range(self.num_resolutions):
for i_block in range(self.num_res_blocks):
h = self.down[i_level].block[i_block](hs[-1], temb)
if len(self.down[i_level].attn) > 0:
h = self.down[i_level].attn[i_block](h)
hs.append(h)
if i_level != self.num_resolutions-1:
hs.append(self.down[i_level].downsample(hs[-1]))
# middle
h = hs[-1]
h = self.mid.block_1(h, temb)
h = self.mid.attn_1(h)
h = self.mid.block_2(h, temb)
# upsampling
for i_level in reversed(range(self.num_resolutions)):
for i_block in range(self.num_res_blocks+1):
h = self.up[i_level].block[i_block](
torch.cat([h, hs.pop()], dim=1), temb)
if len(self.up[i_level].attn) > 0:
h = self.up[i_level].attn[i_block](h)
if i_level != 0:
h = self.up[i_level].upsample(h)
# end
h = self.norm_out(h)
h = nonlinearity(h)
h = self.conv_out(h)
return h
class Encoder(nn.Module):
def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
resolution, z_channels, double_z=True, **ignore_kwargs):
super().__init__()
self.ch = ch
self.temb_ch = 0
self.num_resolutions = len(ch_mult)
self.num_res_blocks = num_res_blocks
self.resolution = resolution
self.in_channels = in_channels
# downsampling
self.conv_in = torch.nn.Conv2d(in_channels,
self.ch,
kernel_size=3,
stride=1,
padding=1)
curr_res = resolution
in_ch_mult = (1,)+tuple(ch_mult)
self.down = nn.ModuleList()
for i_level in range(self.num_resolutions):
block = nn.ModuleList()
attn = nn.ModuleList()
block_in = ch*in_ch_mult[i_level]
block_out = ch*ch_mult[i_level]
for i_block in range(self.num_res_blocks):
block.append(ResnetBlock(in_channels=block_in,
out_channels=block_out,
temb_channels=self.temb_ch,
dropout=dropout))
block_in = block_out
if curr_res in attn_resolutions:
attn.append(AttnBlock(block_in))
down = nn.Module()
down.block = block
down.attn = attn
if i_level != self.num_resolutions-1:
down.downsample = Downsample(block_in, resamp_with_conv)
curr_res = curr_res // 2
self.down.append(down)
# middle
self.mid = nn.Module()
self.mid.block_1 = ResnetBlock(in_channels=block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout)
self.mid.attn_1 = AttnBlock(block_in)
self.mid.block_2 = ResnetBlock(in_channels=block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout)
# end
self.norm_out = Normalize(block_in)
self.conv_out = torch.nn.Conv2d(block_in,
2*z_channels if double_z else z_channels,
kernel_size=3,
stride=1,
padding=1)
def forward(self, x):
#assert x.shape[2] == x.shape[3] == self.resolution, "{}, {}, {}".format(x.shape[2], x.shape[3], self.resolution)
# timestep embedding
temb = None
# downsampling
hs = [self.conv_in(x)]
for i_level in range(self.num_resolutions):
for i_block in range(self.num_res_blocks):
h = self.down[i_level].block[i_block](hs[-1], temb)
if len(self.down[i_level].attn) > 0:
h = self.down[i_level].attn[i_block](h)
hs.append(h)
if i_level != self.num_resolutions-1:
hs.append(self.down[i_level].downsample(hs[-1]))
# middle
h = hs[-1]
h = self.mid.block_1(h, temb)
h = self.mid.attn_1(h)
h = self.mid.block_2(h, temb)
# end
h = self.norm_out(h)
h = nonlinearity(h)
h = self.conv_out(h)
return h
class Decoder(nn.Module):
def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
resolution, z_channels, give_pre_end=False, **ignorekwargs):
super().__init__()
self.ch = ch
self.temb_ch = 0
self.num_resolutions = len(ch_mult)
self.num_res_blocks = num_res_blocks
self.resolution = resolution
self.in_channels = in_channels
self.give_pre_end = give_pre_end
# compute in_ch_mult, block_in and curr_res at lowest res
in_ch_mult = (1,)+tuple(ch_mult)
block_in = ch*ch_mult[self.num_resolutions-1]
curr_res = resolution // 2**(self.num_resolutions-1)
self.z_shape = (1,z_channels,curr_res,curr_res)
print("Working with z of shape {} = {} dimensions.".format(
self.z_shape, np.prod(self.z_shape)))
# z to block_in
self.conv_in = torch.nn.Conv2d(z_channels,
block_in,
kernel_size=3,
stride=1,
padding=1)
# middle
self.mid = nn.Module()
self.mid.block_1 = ResnetBlock(in_channels=block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout)
self.mid.attn_1 = AttnBlock(block_in)
self.mid.block_2 = ResnetBlock(in_channels=block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout)
# upsampling
self.up = nn.ModuleList()
for i_level in reversed(range(self.num_resolutions)):
block = nn.ModuleList()
attn = nn.ModuleList()
block_out = ch*ch_mult[i_level]
for i_block in range(self.num_res_blocks+1):
block.append(ResnetBlock(in_channels=block_in,
out_channels=block_out,
temb_channels=self.temb_ch,
dropout=dropout))
block_in = block_out
if curr_res in attn_resolutions:
attn.append(AttnBlock(block_in))
up = nn.Module()
up.block = block
up.attn = attn
if i_level != 0:
up.upsample = Upsample(block_in, resamp_with_conv)
curr_res = curr_res * 2
self.up.insert(0, up) # prepend to get consistent order
# end
self.norm_out = Normalize(block_in)
self.conv_out = torch.nn.Conv2d(block_in,
out_ch,
kernel_size=3,
stride=1,
padding=1)
def forward(self, z):
#assert z.shape[1:] == self.z_shape[1:]
self.last_z_shape = z.shape
# timestep embedding
temb = None
# z to block_in
h = self.conv_in(z)
# middle
h = self.mid.block_1(h, temb)
h = self.mid.attn_1(h)
h = self.mid.block_2(h, temb)
# upsampling
for i_level in reversed(range(self.num_resolutions)):
for i_block in range(self.num_res_blocks+1):
h = self.up[i_level].block[i_block](h, temb)
if len(self.up[i_level].attn) > 0:
h = self.up[i_level].attn[i_block](h)
if i_level != 0:
h = self.up[i_level].upsample(h)
# end
if self.give_pre_end:
return h
h = self.norm_out(h)
h = nonlinearity(h)
h = self.conv_out(h)
return h
class VUNet(nn.Module):
def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
attn_resolutions, dropout=0.0, resamp_with_conv=True,
in_channels, c_channels,
resolution, z_channels, use_timestep=False, **ignore_kwargs):
super().__init__()
self.ch = ch
self.temb_ch = self.ch*4
self.num_resolutions = len(ch_mult)
self.num_res_blocks = num_res_blocks
self.resolution = resolution
self.use_timestep = use_timestep
if self.use_timestep:
# timestep embedding
self.temb = nn.Module()
self.temb.dense = nn.ModuleList([
torch.nn.Linear(self.ch,
self.temb_ch),
torch.nn.Linear(self.temb_ch,
self.temb_ch),
])
# downsampling
self.conv_in = torch.nn.Conv2d(c_channels,
self.ch,
kernel_size=3,
stride=1,
padding=1)
curr_res = resolution
in_ch_mult = (1,)+tuple(ch_mult)
self.down = nn.ModuleList()
for i_level in range(self.num_resolutions):
block = nn.ModuleList()
attn = nn.ModuleList()
block_in = ch*in_ch_mult[i_level]
block_out = ch*ch_mult[i_level]
for i_block in range(self.num_res_blocks):
block.append(ResnetBlock(in_channels=block_in,
out_channels=block_out,
temb_channels=self.temb_ch,
dropout=dropout))
block_in = block_out
if curr_res in attn_resolutions:
attn.append(AttnBlock(block_in))
down = nn.Module()
down.block = block
down.attn = attn
if i_level != self.num_resolutions-1:
down.downsample = Downsample(block_in, resamp_with_conv)
curr_res = curr_res // 2
self.down.append(down)
self.z_in = torch.nn.Conv2d(z_channels,
block_in,
kernel_size=1,
stride=1,
padding=0)
# middle
self.mid = nn.Module()
self.mid.block_1 = ResnetBlock(in_channels=2*block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout)
self.mid.attn_1 = AttnBlock(block_in)
self.mid.block_2 = ResnetBlock(in_channels=block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout)
# upsampling
self.up = nn.ModuleList()
for i_level in reversed(range(self.num_resolutions)):
block = nn.ModuleList()
attn = nn.ModuleList()
block_out = ch*ch_mult[i_level]
skip_in = ch*ch_mult[i_level]
for i_block in range(self.num_res_blocks+1):
if i_block == self.num_res_blocks:
skip_in = ch*in_ch_mult[i_level]
block.append(ResnetBlock(in_channels=block_in+skip_in,
out_channels=block_out,
temb_channels=self.temb_ch,
dropout=dropout))
block_in = block_out
if curr_res in attn_resolutions:
attn.append(AttnBlock(block_in))
up = nn.Module()
up.block = block
up.attn = attn
if i_level != 0:
up.upsample = Upsample(block_in, resamp_with_conv)
curr_res = curr_res * 2
self.up.insert(0, up) # prepend to get consistent order
# end
self.norm_out = Normalize(block_in)
self.conv_out = torch.nn.Conv2d(block_in,
out_ch,
kernel_size=3,
stride=1,
padding=1)
def forward(self, x, z):
#assert x.shape[2] == x.shape[3] == self.resolution
if self.use_timestep:
# timestep embedding
assert t is not None
temb = get_timestep_embedding(t, self.ch)
temb = self.temb.dense[0](temb)
temb = nonlinearity(temb)
temb = self.temb.dense[1](temb)
else:
temb = None
# downsampling
hs = [self.conv_in(x)]
for i_level in range(self.num_resolutions):
for i_block in range(self.num_res_blocks):
h = self.down[i_level].block[i_block](hs[-1], temb)
if len(self.down[i_level].attn) > 0:
h = self.down[i_level].attn[i_block](h)
hs.append(h)
if i_level != self.num_resolutions-1:
hs.append(self.down[i_level].downsample(hs[-1]))
# middle
h = hs[-1]
z = self.z_in(z)
h = torch.cat((h,z),dim=1)
h = self.mid.block_1(h, temb)
h = self.mid.attn_1(h)
h = self.mid.block_2(h, temb)
# upsampling
for i_level in reversed(range(self.num_resolutions)):
for i_block in range(self.num_res_blocks+1):
h = self.up[i_level].block[i_block](
torch.cat([h, hs.pop()], dim=1), temb)
if len(self.up[i_level].attn) > 0:
h = self.up[i_level].attn[i_block](h)
if i_level != 0:
h = self.up[i_level].upsample(h)
# end
h = self.norm_out(h)
h = nonlinearity(h)
h = self.conv_out(h)
return h
class SimpleDecoder(nn.Module):
def __init__(self, in_channels, out_channels, *args, **kwargs):
super().__init__()
self.model = nn.ModuleList([nn.Conv2d(in_channels, in_channels, 1),
ResnetBlock(in_channels=in_channels,
out_channels=2 * in_channels,
temb_channels=0, dropout=0.0),
ResnetBlock(in_channels=2 * in_channels,
out_channels=4 * in_channels,
temb_channels=0, dropout=0.0),
ResnetBlock(in_channels=4 * in_channels,
out_channels=2 * in_channels,
temb_channels=0, dropout=0.0),
nn.Conv2d(2*in_channels, in_channels, 1),
Upsample(in_channels, with_conv=True)])
# end
self.norm_out = Normalize(in_channels)
self.conv_out = torch.nn.Conv2d(in_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1)
def forward(self, x):
for i, layer in enumerate(self.model):
if i in [1,2,3]:
x = layer(x, None)
else:
x = layer(x)
h = self.norm_out(x)
h = nonlinearity(h)
x = self.conv_out(h)
return x
class UpsampleDecoder(nn.Module):
def __init__(self, in_channels, out_channels, ch, num_res_blocks, resolution,
ch_mult=(2,2), dropout=0.0):
super().__init__()
# upsampling
self.temb_ch = 0
self.num_resolutions = len(ch_mult)
self.num_res_blocks = num_res_blocks
block_in = in_channels
curr_res = resolution // 2 ** (self.num_resolutions - 1)
self.res_blocks = nn.ModuleList()
self.upsample_blocks = nn.ModuleList()
for i_level in range(self.num_resolutions):
res_block = []
block_out = ch * ch_mult[i_level]
for i_block in range(self.num_res_blocks + 1):
res_block.append(ResnetBlock(in_channels=block_in,
out_channels=block_out,
temb_channels=self.temb_ch,
dropout=dropout))
block_in = block_out
self.res_blocks.append(nn.ModuleList(res_block))
if i_level != self.num_resolutions - 1:
self.upsample_blocks.append(Upsample(block_in, True))
curr_res = curr_res * 2
# end
self.norm_out = Normalize(block_in)
self.conv_out = torch.nn.Conv2d(block_in,
out_channels,
kernel_size=3,
stride=1,
padding=1)
def forward(self, x):
# upsampling
h = x
for k, i_level in enumerate(range(self.num_resolutions)):
for i_block in range(self.num_res_blocks + 1):
h = self.res_blocks[i_level][i_block](h, None)
if i_level != self.num_resolutions - 1:
h = self.upsample_blocks[k](h)
h = self.norm_out(h)
h = nonlinearity(h)
h = self.conv_out(h)
return h
| 30,221 | 37.895753 | 121 | py |
DaVinci | DaVinci-main/taming/modules/transformer/mingpt.py | """
taken from: https://github.com/karpathy/minGPT/
GPT model:
- the initial stem consists of a combination of token encoding and a positional encoding
- the meat of it is a uniform sequence of Transformer blocks
- each Transformer is a sequential combination of a 1-hidden-layer MLP block and a self-attention block
- all blocks feed into a central residual pathway similar to resnets
- the final decoder is a linear projection into a vanilla Softmax classifier
"""
import math
import logging
import torch
import torch.nn as nn
from torch.nn import functional as F
from transformers import top_k_top_p_filtering
logger = logging.getLogger(__name__)
class GPTConfig:
""" base GPT config, params common to all GPT versions """
embd_pdrop = 0.1
resid_pdrop = 0.1
attn_pdrop = 0.1
def __init__(self, vocab_size, block_size, **kwargs):
self.vocab_size = vocab_size
self.block_size = block_size
for k,v in kwargs.items():
setattr(self, k, v)
class GPT1Config(GPTConfig):
""" GPT-1 like network roughly 125M params """
n_layer = 12
n_head = 12
n_embd = 768
class CausalSelfAttention(nn.Module):
"""
A vanilla multi-head masked self-attention layer with a projection at the end.
It is possible to use torch.nn.MultiheadAttention here but I am including an
explicit implementation here to show that there is nothing too scary here.
"""
def __init__(self, config):
super().__init__()
assert config.n_embd % config.n_head == 0
# key, query, value projections for all heads
self.key = nn.Linear(config.n_embd, config.n_embd)
self.query = nn.Linear(config.n_embd, config.n_embd)
self.value = nn.Linear(config.n_embd, config.n_embd)
# regularization
self.attn_drop = nn.Dropout(config.attn_pdrop)
self.resid_drop = nn.Dropout(config.resid_pdrop)
# output projection
self.proj = nn.Linear(config.n_embd, config.n_embd)
# causal mask to ensure that attention is only applied to the left in the input sequence
mask = torch.tril(torch.ones(config.block_size,
config.block_size))
if hasattr(config, "n_unmasked"):
mask[:config.n_unmasked, :config.n_unmasked] = 1
self.register_buffer("mask", mask.view(1, 1, config.block_size, config.block_size))
self.n_head = config.n_head
def forward(self, x, layer_past=None):
B, T, C = x.size()
# calculate query, key, values for all heads in batch and move head forward to be the batch dim
k = self.key(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
q = self.query(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
v = self.value(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
present = torch.stack((k, v))
if layer_past is not None:
past_key, past_value = layer_past
k = torch.cat((past_key, k), dim=-2)
v = torch.cat((past_value, v), dim=-2)
# causal self-attention; Self-attend: (B, nh, T, hs) x (B, nh, hs, T) -> (B, nh, T, T)
att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))
if layer_past is None:
att = att.masked_fill(self.mask[:,:,:T,:T] == 0, float('-inf'))
att = F.softmax(att, dim=-1)
att = self.attn_drop(att)
y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs)
y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side
# output projection
y = self.resid_drop(self.proj(y))
return y, present # TODO: check that this does not break anything
class Block(nn.Module):
""" an unassuming Transformer block """
def __init__(self, config):
super().__init__()
self.ln1 = nn.LayerNorm(config.n_embd)
self.ln2 = nn.LayerNorm(config.n_embd)
self.attn = CausalSelfAttention(config)
self.mlp = nn.Sequential(
nn.Linear(config.n_embd, 4 * config.n_embd),
nn.GELU(), # nice
nn.Linear(4 * config.n_embd, config.n_embd),
nn.Dropout(config.resid_pdrop),
)
def forward(self, x, layer_past=None, return_present=False):
# TODO: check that training still works
if return_present: assert not self.training
# layer past: tuple of length two with B, nh, T, hs
attn, present = self.attn(self.ln1(x), layer_past=layer_past)
x = x + attn
x = x + self.mlp(self.ln2(x))
if layer_past is not None or return_present:
return x, present
return x
class GPT(nn.Module):
""" the full GPT language model, with a context size of block_size """
def __init__(self, vocab_size, block_size, n_layer=12, n_head=8, n_embd=256,
embd_pdrop=0., resid_pdrop=0., attn_pdrop=0., n_unmasked=0):
super().__init__()
config = GPTConfig(vocab_size=vocab_size, block_size=block_size,
embd_pdrop=embd_pdrop, resid_pdrop=resid_pdrop, attn_pdrop=attn_pdrop,
n_layer=n_layer, n_head=n_head, n_embd=n_embd,
n_unmasked=n_unmasked)
# input embedding stem
self.tok_emb = nn.Embedding(config.vocab_size, config.n_embd)
self.pos_emb = nn.Parameter(torch.zeros(1, config.block_size, config.n_embd))
self.drop = nn.Dropout(config.embd_pdrop)
# transformer
self.blocks = nn.Sequential(*[Block(config) for _ in range(config.n_layer)])
# decoder head
self.ln_f = nn.LayerNorm(config.n_embd)
self.head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
self.block_size = config.block_size
self.apply(self._init_weights)
self.config = config
logger.info("number of parameters: %e", sum(p.numel() for p in self.parameters()))
def get_block_size(self):
return self.block_size
def _init_weights(self, module):
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=0.02)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def forward(self, idx, embeddings=None, targets=None):
# forward the GPT model
token_embeddings = self.tok_emb(idx) # each index maps to a (learnable) vector
if embeddings is not None: # prepend explicit embeddings
token_embeddings = torch.cat((embeddings, token_embeddings), dim=1)
t = token_embeddings.shape[1]
assert t <= self.block_size, "Cannot forward, model block size is exhausted."
position_embeddings = self.pos_emb[:, :t, :] # each position maps to a (learnable) vector
x = self.drop(token_embeddings + position_embeddings)
x = self.blocks(x)
x = self.ln_f(x)
logits = self.head(x)
# if we are given some desired targets also calculate the loss
loss = None
if targets is not None:
loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1))
return logits, loss
def forward_with_past(self, idx, embeddings=None, targets=None, past=None, past_length=None):
# inference only
assert not self.training
token_embeddings = self.tok_emb(idx) # each index maps to a (learnable) vector
if embeddings is not None: # prepend explicit embeddings
token_embeddings = torch.cat((embeddings, token_embeddings), dim=1)
if past is not None:
assert past_length is not None
past = torch.cat(past, dim=-2) # n_layer, 2, b, nh, len_past, dim_head
past_shape = list(past.shape)
expected_shape = [self.config.n_layer, 2, idx.shape[0], self.config.n_head, past_length, self.config.n_embd//self.config.n_head]
assert past_shape == expected_shape, f"{past_shape} =/= {expected_shape}"
position_embeddings = self.pos_emb[:, past_length, :] # each position maps to a (learnable) vector
else:
position_embeddings = self.pos_emb[:, :token_embeddings.shape[1], :]
x = self.drop(token_embeddings + position_embeddings)
presents = [] # accumulate over layers
for i, block in enumerate(self.blocks):
x, present = block(x, layer_past=past[i, ...] if past is not None else None, return_present=True)
presents.append(present)
x = self.ln_f(x)
logits = self.head(x)
# if we are given some desired targets also calculate the loss
loss = None
if targets is not None:
loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1))
return logits, loss, torch.stack(presents) # _, _, n_layer, 2, b, nh, 1, dim_head
class DummyGPT(nn.Module):
# for debugging
def __init__(self, add_value=1):
super().__init__()
self.add_value = add_value
def forward(self, idx):
return idx + self.add_value, None
class CodeGPT(nn.Module):
"""Takes in semi-embeddings"""
def __init__(self, vocab_size, block_size, in_channels, n_layer=12, n_head=8, n_embd=256,
embd_pdrop=0., resid_pdrop=0., attn_pdrop=0., n_unmasked=0):
super().__init__()
config = GPTConfig(vocab_size=vocab_size, block_size=block_size,
embd_pdrop=embd_pdrop, resid_pdrop=resid_pdrop, attn_pdrop=attn_pdrop,
n_layer=n_layer, n_head=n_head, n_embd=n_embd,
n_unmasked=n_unmasked)
# input embedding stem
self.tok_emb = nn.Linear(in_channels, config.n_embd)
self.pos_emb = nn.Parameter(torch.zeros(1, config.block_size, config.n_embd))
self.drop = nn.Dropout(config.embd_pdrop)
# transformer
self.blocks = nn.Sequential(*[Block(config) for _ in range(config.n_layer)])
# decoder head
self.ln_f = nn.LayerNorm(config.n_embd)
self.head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
self.block_size = config.block_size
self.apply(self._init_weights)
self.config = config
logger.info("number of parameters: %e", sum(p.numel() for p in self.parameters()))
def get_block_size(self):
return self.block_size
def _init_weights(self, module):
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=0.02)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def forward(self, idx, embeddings=None, targets=None):
# forward the GPT model
token_embeddings = self.tok_emb(idx) # each index maps to a (learnable) vector
if embeddings is not None: # prepend explicit embeddings
token_embeddings = torch.cat((embeddings, token_embeddings), dim=1)
t = token_embeddings.shape[1]
assert t <= self.block_size, "Cannot forward, model block size is exhausted."
position_embeddings = self.pos_emb[:, :t, :] # each position maps to a (learnable) vector
x = self.drop(token_embeddings + position_embeddings)
x = self.blocks(x)
x = self.taming_cinln_f(x)
logits = self.head(x)
# if we are given some desired targets also calculate the loss
loss = None
if targets is not None:
loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1))
return logits, loss
#### sampling utils
def top_k_logits(logits, k):
v, ix = torch.topk(logits, k)
out = logits.clone()
out[out < v[:, [-1]]] = -float('Inf')
return out
@torch.no_grad()
def sample(model, x, steps, temperature=1.0, sample=False, top_k=None):
"""
take a conditioning sequence of indices in x (of shape (b,t)) and predict the next token in
the sequence, feeding the predictions back into the model each time. Clearly the sampling
has quadratic complexity unlike an RNN that is only linear, and has a finite context window
of block_size, unlike an RNN that has an infinite context window.
"""
block_size = model.get_block_size()
model.eval()
for k in range(steps):
x_cond = x if x.size(1) <= block_size else x[:, -block_size:] # crop context if needed
logits, _ = model(x_cond)
# pluck the logits at the final step and scale by temperature
logits = logits[:, -1, :] / temperature
# optionally crop probabilities to only the top k options
if top_k is not None:
logits = top_k_logits(logits, top_k)
# apply softmax to convert to probabilities
probs = F.softmax(logits, dim=-1)
# sample from the distribution or take the most likely
if sample:
ix = torch.multinomial(probs, num_samples=1)
else:
_, ix = torch.topk(probs, k=1, dim=-1)
# append to the sequence and continue
x = torch.cat((x, ix), dim=1)
return x
@torch.no_grad()
def sample_with_past(x, model, steps, temperature=1., sample_logits=True,
top_k=None, top_p=None, callback=None):
# x is conditioning
sample = x
cond_len = x.shape[1]
past = None
for n in range(steps):
if callback is not None:
callback(n)
logits, _, present = model.forward_with_past(x, past=past, past_length=(n+cond_len-1))
if past is None:
past = [present]
else:
past.append(present)
logits = logits[:, -1, :] / temperature
if top_k is not None:
logits = top_k_top_p_filtering(logits, top_k=top_k, top_p=top_p)
probs = F.softmax(logits, dim=-1)
if not sample_logits:
_, x = torch.topk(probs, k=1, dim=-1)
else:
x = torch.multinomial(probs, num_samples=1)
# append to the sequence and continue
sample = torch.cat((sample, x), dim=1)
del past
sample = sample[:, cond_len:] # cut conditioning off
return sample
#### clustering utils
class KMeans(nn.Module):
def __init__(self, ncluster=512, nc=3, niter=10):
super().__init__()
self.ncluster = ncluster
self.nc = nc
self.niter = niter
self.shape = (3,32,32)
self.register_buffer("C", torch.zeros(self.ncluster,nc))
self.register_buffer('initialized', torch.tensor(0, dtype=torch.uint8))
def is_initialized(self):
return self.initialized.item() == 1
@torch.no_grad()
def initialize(self, x):
N, D = x.shape
assert D == self.nc, D
c = x[torch.randperm(N)[:self.ncluster]] # init clusters at random
for i in range(self.niter):
# assign all pixels to the closest codebook element
a = ((x[:, None, :] - c[None, :, :])**2).sum(-1).argmin(1)
# move each codebook element to be the mean of the pixels that assigned to it
c = torch.stack([x[a==k].mean(0) for k in range(self.ncluster)])
# re-assign any poorly positioned codebook elements
nanix = torch.any(torch.isnan(c), dim=1)
ndead = nanix.sum().item()
print('done step %d/%d, re-initialized %d dead clusters' % (i+1, self.niter, ndead))
c[nanix] = x[torch.randperm(N)[:ndead]] # re-init dead clusters
self.C.copy_(c)
self.initialized.fill_(1)
def forward(self, x, reverse=False, shape=None):
if not reverse:
# flatten
bs,c,h,w = x.shape
assert c == self.nc
x = x.reshape(bs,c,h*w,1)
C = self.C.permute(1,0)
C = C.reshape(1,c,1,self.ncluster)
a = ((x-C)**2).sum(1).argmin(-1) # bs, h*w indices
return a
else:
# flatten
bs, HW = x.shape
"""
c = self.C.reshape( 1, self.nc, 1, self.ncluster)
c = c[bs*[0],:,:,:]
c = c[:,:,HW*[0],:]
x = x.reshape(bs, 1, HW, 1)
x = x[:,3*[0],:,:]
x = torch.gather(c, dim=3, index=x)
"""
x = self.C[x]
x = x.permute(0,2,1)
shape = shape if shape is not None else self.shape
x = x.reshape(bs, *shape)
return x
| 16,836 | 39.473558 | 140 | py |
DaVinci | DaVinci-main/taming/modules/transformer/permuter.py | import torch
import torch.nn as nn
import numpy as np
class AbstractPermuter(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
def forward(self, x, reverse=False):
raise NotImplementedError
class Identity(AbstractPermuter):
def __init__(self):
super().__init__()
def forward(self, x, reverse=False):
return x
class Subsample(AbstractPermuter):
def __init__(self, H, W):
super().__init__()
C = 1
indices = np.arange(H*W).reshape(C,H,W)
while min(H, W) > 1:
indices = indices.reshape(C,H//2,2,W//2,2)
indices = indices.transpose(0,2,4,1,3)
indices = indices.reshape(C*4,H//2, W//2)
H = H//2
W = W//2
C = C*4
assert H == W == 1
idx = torch.tensor(indices.ravel())
self.register_buffer('forward_shuffle_idx',
nn.Parameter(idx, requires_grad=False))
self.register_buffer('backward_shuffle_idx',
nn.Parameter(torch.argsort(idx), requires_grad=False))
def forward(self, x, reverse=False):
if not reverse:
return x[:, self.forward_shuffle_idx]
else:
return x[:, self.backward_shuffle_idx]
def mortonify(i, j):
"""(i,j) index to linear morton code"""
i = np.uint64(i)
j = np.uint64(j)
z = np.uint(0)
for pos in range(32):
z = (z |
((j & (np.uint64(1) << np.uint64(pos))) << np.uint64(pos)) |
((i & (np.uint64(1) << np.uint64(pos))) << np.uint64(pos+1))
)
return z
class ZCurve(AbstractPermuter):
def __init__(self, H, W):
super().__init__()
reverseidx = [np.int64(mortonify(i,j)) for i in range(H) for j in range(W)]
idx = np.argsort(reverseidx)
idx = torch.tensor(idx)
reverseidx = torch.tensor(reverseidx)
self.register_buffer('forward_shuffle_idx',
idx)
self.register_buffer('backward_shuffle_idx',
reverseidx)
def forward(self, x, reverse=False):
if not reverse:
return x[:, self.forward_shuffle_idx]
else:
return x[:, self.backward_shuffle_idx]
class SpiralOut(AbstractPermuter):
def __init__(self, H, W):
super().__init__()
assert H == W
size = W
indices = np.arange(size*size).reshape(size,size)
i0 = size//2
j0 = size//2-1
i = i0
j = j0
idx = [indices[i0, j0]]
step_mult = 0
for c in range(1, size//2+1):
step_mult += 1
# steps left
for k in range(step_mult):
i = i - 1
j = j
idx.append(indices[i, j])
# step down
for k in range(step_mult):
i = i
j = j + 1
idx.append(indices[i, j])
step_mult += 1
if c < size//2:
# step right
for k in range(step_mult):
i = i + 1
j = j
idx.append(indices[i, j])
# step up
for k in range(step_mult):
i = i
j = j - 1
idx.append(indices[i, j])
else:
# end reached
for k in range(step_mult-1):
i = i + 1
idx.append(indices[i, j])
assert len(idx) == size*size
idx = torch.tensor(idx)
self.register_buffer('forward_shuffle_idx', idx)
self.register_buffer('backward_shuffle_idx', torch.argsort(idx))
def forward(self, x, reverse=False):
if not reverse:
return x[:, self.forward_shuffle_idx]
else:
return x[:, self.backward_shuffle_idx]
class SpiralIn(AbstractPermuter):
def __init__(self, H, W):
super().__init__()
assert H == W
size = W
indices = np.arange(size*size).reshape(size,size)
i0 = size//2
j0 = size//2-1
i = i0
j = j0
idx = [indices[i0, j0]]
step_mult = 0
for c in range(1, size//2+1):
step_mult += 1
# steps left
for k in range(step_mult):
i = i - 1
j = j
idx.append(indices[i, j])
# step down
for k in range(step_mult):
i = i
j = j + 1
idx.append(indices[i, j])
step_mult += 1
if c < size//2:
# step right
for k in range(step_mult):
i = i + 1
j = j
idx.append(indices[i, j])
# step up
for k in range(step_mult):
i = i
j = j - 1
idx.append(indices[i, j])
else:
# end reached
for k in range(step_mult-1):
i = i + 1
idx.append(indices[i, j])
assert len(idx) == size*size
idx = idx[::-1]
idx = torch.tensor(idx)
self.register_buffer('forward_shuffle_idx', idx)
self.register_buffer('backward_shuffle_idx', torch.argsort(idx))
def forward(self, x, reverse=False):
if not reverse:
return x[:, self.forward_shuffle_idx]
else:
return x[:, self.backward_shuffle_idx]
class Random(nn.Module):
def __init__(self, H, W):
super().__init__()
indices = np.random.RandomState(1).permutation(H*W)
idx = torch.tensor(indices.ravel())
self.register_buffer('forward_shuffle_idx', idx)
self.register_buffer('backward_shuffle_idx', torch.argsort(idx))
def forward(self, x, reverse=False):
if not reverse:
return x[:, self.forward_shuffle_idx]
else:
return x[:, self.backward_shuffle_idx]
class AlternateParsing(AbstractPermuter):
def __init__(self, H, W):
super().__init__()
indices = np.arange(W*H).reshape(H,W)
for i in range(1, H, 2):
indices[i, :] = indices[i, ::-1]
idx = indices.flatten()
assert len(idx) == H*W
idx = torch.tensor(idx)
self.register_buffer('forward_shuffle_idx', idx)
self.register_buffer('backward_shuffle_idx', torch.argsort(idx))
def forward(self, x, reverse=False):
if not reverse:
return x[:, self.forward_shuffle_idx]
else:
return x[:, self.backward_shuffle_idx]
if __name__ == "__main__":
p0 = AlternateParsing(16, 16)
print(p0.forward_shuffle_idx)
print(p0.backward_shuffle_idx)
x = torch.randint(0, 768, size=(11, 256))
y = p0(x)
xre = p0(y, reverse=True)
assert torch.equal(x, xre)
p1 = SpiralOut(2, 2)
print(p1.forward_shuffle_idx)
print(p1.backward_shuffle_idx)
| 7,093 | 27.48996 | 83 | py |
DaVinci | DaVinci-main/taming/modules/losses/lpips.py | """Stripped version of https://github.com/richzhang/PerceptualSimilarity/tree/master/models"""
import torch
import torch.nn as nn
from torchvision import models
from collections import namedtuple
from taming.util import get_ckpt_path
class LPIPS(nn.Module):
# Learned perceptual metric
def __init__(self, use_dropout=True):
super().__init__()
self.scaling_layer = ScalingLayer()
self.chns = [64, 128, 256, 512, 512] # vg16 features
self.net = vgg16(pretrained=True, requires_grad=False)
self.lin0 = NetLinLayer(self.chns[0], use_dropout=use_dropout)
self.lin1 = NetLinLayer(self.chns[1], use_dropout=use_dropout)
self.lin2 = NetLinLayer(self.chns[2], use_dropout=use_dropout)
self.lin3 = NetLinLayer(self.chns[3], use_dropout=use_dropout)
self.lin4 = NetLinLayer(self.chns[4], use_dropout=use_dropout)
self.load_from_pretrained()
for param in self.parameters():
param.requires_grad = False
def load_from_pretrained(self, name="vgg_lpips"):
ckpt = get_ckpt_path(name, "taming/modules/autoencoder/lpips")
self.load_state_dict(torch.load(ckpt, map_location=torch.device("cpu")), strict=False)
print("loaded pretrained LPIPS loss from {}".format(ckpt))
@classmethod
def from_pretrained(cls, name="vgg_lpips"):
if name is not "vgg_lpips":
raise NotImplementedError
model = cls()
ckpt = get_ckpt_path(name)
model.load_state_dict(torch.load(ckpt, map_location=torch.device("cpu")), strict=False)
return model
def forward(self, input, target):
in0_input, in1_input = (self.scaling_layer(input), self.scaling_layer(target))
outs0, outs1 = self.net(in0_input), self.net(in1_input)
feats0, feats1, diffs = {}, {}, {}
lins = [self.lin0, self.lin1, self.lin2, self.lin3, self.lin4]
for kk in range(len(self.chns)):
feats0[kk], feats1[kk] = normalize_tensor(outs0[kk]), normalize_tensor(outs1[kk])
diffs[kk] = (feats0[kk] - feats1[kk]) ** 2
res = [spatial_average(lins[kk].model(diffs[kk]), keepdim=True) for kk in range(len(self.chns))]
val = res[0]
for l in range(1, len(self.chns)):
val += res[l]
return val
class ScalingLayer(nn.Module):
def __init__(self):
super(ScalingLayer, self).__init__()
self.register_buffer('shift', torch.Tensor([-.030, -.088, -.188])[None, :, None, None])
self.register_buffer('scale', torch.Tensor([.458, .448, .450])[None, :, None, None])
def forward(self, inp):
return (inp - self.shift) / self.scale
class NetLinLayer(nn.Module):
""" A single linear layer which does a 1x1 conv """
def __init__(self, chn_in, chn_out=1, use_dropout=False):
super(NetLinLayer, self).__init__()
layers = [nn.Dropout(), ] if (use_dropout) else []
layers += [nn.Conv2d(chn_in, chn_out, 1, stride=1, padding=0, bias=False), ]
self.model = nn.Sequential(*layers)
class vgg16(torch.nn.Module):
def __init__(self, requires_grad=False, pretrained=True):
super(vgg16, self).__init__()
vgg_pretrained_features = models.vgg16(pretrained=pretrained).features
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
self.slice5 = torch.nn.Sequential()
self.N_slices = 5
for x in range(4):
self.slice1.add_module(str(x), vgg_pretrained_features[x])
for x in range(4, 9):
self.slice2.add_module(str(x), vgg_pretrained_features[x])
for x in range(9, 16):
self.slice3.add_module(str(x), vgg_pretrained_features[x])
for x in range(16, 23):
self.slice4.add_module(str(x), vgg_pretrained_features[x])
for x in range(23, 30):
self.slice5.add_module(str(x), vgg_pretrained_features[x])
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, X):
h = self.slice1(X)
h_relu1_2 = h
h = self.slice2(h)
h_relu2_2 = h
h = self.slice3(h)
h_relu3_3 = h
h = self.slice4(h)
h_relu4_3 = h
h = self.slice5(h)
h_relu5_3 = h
vgg_outputs = namedtuple("VggOutputs", ['relu1_2', 'relu2_2', 'relu3_3', 'relu4_3', 'relu5_3'])
out = vgg_outputs(h_relu1_2, h_relu2_2, h_relu3_3, h_relu4_3, h_relu5_3)
return out
def normalize_tensor(x,eps=1e-10):
norm_factor = torch.sqrt(torch.sum(x**2,dim=1,keepdim=True))
return x/(norm_factor+eps)
def spatial_average(x, keepdim=True):
return x.mean([2,3],keepdim=keepdim)
| 4,836 | 38.008065 | 104 | py |
DaVinci | DaVinci-main/taming/modules/losses/segmentation.py | import torch.nn as nn
import torch.nn.functional as F
class BCELoss(nn.Module):
def forward(self, prediction, target):
loss = F.binary_cross_entropy_with_logits(prediction,target)
return loss, {}
class BCELossWithQuant(nn.Module):
def __init__(self, codebook_weight=1.):
super().__init__()
self.codebook_weight = codebook_weight
def forward(self, qloss, target, prediction, split):
bce_loss = F.binary_cross_entropy_with_logits(prediction,target)
loss = bce_loss + self.codebook_weight*qloss
return loss, {"{}/total_loss".format(split): loss.clone().detach().mean(),
"{}/bce_loss".format(split): bce_loss.detach().mean(),
"{}/quant_loss".format(split): qloss.detach().mean()
}
| 816 | 34.521739 | 82 | py |
DaVinci | DaVinci-main/taming/modules/losses/vqperceptual.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from taming.modules.losses.lpips import LPIPS
from taming.modules.discriminator.model import NLayerDiscriminator, weights_init
class DummyLoss(nn.Module):
def __init__(self):
super().__init__()
def adopt_weight(weight, global_step, threshold=0, value=0.):
if global_step < threshold:
weight = value
return weight
def hinge_d_loss(logits_real, logits_fake):
loss_real = torch.mean(F.relu(1. - logits_real))
loss_fake = torch.mean(F.relu(1. + logits_fake))
d_loss = 0.5 * (loss_real + loss_fake)
return d_loss
def vanilla_d_loss(logits_real, logits_fake):
d_loss = 0.5 * (
torch.mean(torch.nn.functional.softplus(-logits_real)) +
torch.mean(torch.nn.functional.softplus(logits_fake)))
return d_loss
class VQLPIPSWithDiscriminator(nn.Module):
def __init__(self, disc_start, codebook_weight=1.0, pixelloss_weight=1.0,
disc_num_layers=3, disc_in_channels=3, disc_factor=1.0, disc_weight=1.0,
perceptual_weight=1.0, use_actnorm=False, disc_conditional=False,
disc_ndf=64, disc_loss="hinge"):
super().__init__()
assert disc_loss in ["hinge", "vanilla"]
self.codebook_weight = codebook_weight
self.pixel_weight = pixelloss_weight
self.perceptual_loss = LPIPS().eval()
self.perceptual_weight = perceptual_weight
self.discriminator = NLayerDiscriminator(input_nc=disc_in_channels,
n_layers=disc_num_layers,
use_actnorm=use_actnorm,
ndf=disc_ndf
).apply(weights_init)
self.discriminator_iter_start = disc_start
if disc_loss == "hinge":
self.disc_loss = hinge_d_loss
elif disc_loss == "vanilla":
self.disc_loss = vanilla_d_loss
else:
raise ValueError(f"Unknown GAN loss '{disc_loss}'.")
print(f"VQLPIPSWithDiscriminator running with {disc_loss} loss.")
self.disc_factor = disc_factor
self.discriminator_weight = disc_weight
self.disc_conditional = disc_conditional
def calculate_adaptive_weight(self, nll_loss, g_loss, last_layer=None):
if last_layer is not None:
nll_grads = torch.autograd.grad(nll_loss, last_layer, retain_graph=True)[0]
g_grads = torch.autograd.grad(g_loss, last_layer, retain_graph=True)[0]
else:
nll_grads = torch.autograd.grad(nll_loss, self.last_layer[0], retain_graph=True)[0]
g_grads = torch.autograd.grad(g_loss, self.last_layer[0], retain_graph=True)[0]
d_weight = torch.norm(nll_grads) / (torch.norm(g_grads) + 1e-4)
d_weight = torch.clamp(d_weight, 0.0, 1e4).detach()
d_weight = d_weight * self.discriminator_weight
return d_weight
def forward(self, codebook_loss, inputs, reconstructions, optimizer_idx,
global_step, last_layer=None, cond=None, split="train"):
rec_loss = torch.abs(inputs.contiguous() - reconstructions.contiguous())
if self.perceptual_weight > 0:
p_loss = self.perceptual_loss(inputs.contiguous(), reconstructions.contiguous())
rec_loss = rec_loss + self.perceptual_weight * p_loss
else:
p_loss = torch.tensor([0.0])
nll_loss = rec_loss
#nll_loss = torch.sum(nll_loss) / nll_loss.shape[0]
nll_loss = torch.mean(nll_loss)
# now the GAN part
if optimizer_idx == 0:
# generator update
if cond is None:
assert not self.disc_conditional
logits_fake = self.discriminator(reconstructions.contiguous())
else:
assert self.disc_conditional
logits_fake = self.discriminator(torch.cat((reconstructions.contiguous(), cond), dim=1))
g_loss = -torch.mean(logits_fake)
try:
d_weight = self.calculate_adaptive_weight(nll_loss, g_loss, last_layer=last_layer)
except RuntimeError:
assert not self.training
d_weight = torch.tensor(0.0)
disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start)
loss = nll_loss + d_weight * disc_factor * g_loss + self.codebook_weight * codebook_loss.mean()
log = {"{}/total_loss".format(split): loss.clone().detach().mean(),
"{}/quant_loss".format(split): codebook_loss.detach().mean(),
"{}/nll_loss".format(split): nll_loss.detach().mean(),
"{}/rec_loss".format(split): rec_loss.detach().mean(),
"{}/p_loss".format(split): p_loss.detach().mean(),
"{}/d_weight".format(split): d_weight.detach(),
"{}/disc_factor".format(split): torch.tensor(disc_factor),
"{}/g_loss".format(split): g_loss.detach().mean(),
}
return loss, log
if optimizer_idx == 1:
# second pass for discriminator update
if cond is None:
logits_real = self.discriminator(inputs.contiguous().detach())
logits_fake = self.discriminator(reconstructions.contiguous().detach())
else:
logits_real = self.discriminator(torch.cat((inputs.contiguous().detach(), cond), dim=1))
logits_fake = self.discriminator(torch.cat((reconstructions.contiguous().detach(), cond), dim=1))
disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start)
d_loss = disc_factor * self.disc_loss(logits_real, logits_fake)
log = {"{}/disc_loss".format(split): d_loss.clone().detach().mean(),
"{}/logits_real".format(split): logits_real.detach().mean(),
"{}/logits_fake".format(split): logits_fake.detach().mean()
}
return d_loss, log
| 6,179 | 44.109489 | 113 | py |
DaVinci | DaVinci-main/taming/models/vqgan.py | import torch
import torch.nn.functional as F
import pytorch_lightning as pl
from taming.main import instantiate_from_config
from taming.modules.diffusionmodules.model import Encoder, Decoder
from taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer
from taming.modules.vqvae.quantize import GumbelQuantize
class VQModel(pl.LightningModule):
def __init__(self,
ddconfig,
lossconfig,
n_embed,
embed_dim,
ckpt_path=None,
ignore_keys=[],
image_key="image",
colorize_nlabels=None,
monitor=None,
remap=None,
sane_index_shape=False, # tell vector quantizer to return indices as bhw
):
super().__init__()
self.image_key = image_key
self.encoder = Encoder(**ddconfig)
self.decoder = Decoder(**ddconfig)
self.loss = instantiate_from_config(lossconfig)
self.quantize = VectorQuantizer(n_embed, embed_dim, beta=0.25,
remap=remap, sane_index_shape=sane_index_shape)
self.quant_conv = torch.nn.Conv2d(ddconfig["z_channels"], embed_dim, 1)
self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1)
if ckpt_path is not None:
self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)
self.image_key = image_key
if colorize_nlabels is not None:
assert type(colorize_nlabels)==int
self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1))
if monitor is not None:
self.monitor = monitor
def init_from_ckpt(self, path, ignore_keys=list()):
sd = torch.load(path, map_location="cpu")["state_dict"]
keys = list(sd.keys())
for k in keys:
for ik in ignore_keys:
if k.startswith(ik):
print("Deleting key {} from state_dict.".format(k))
del sd[k]
self.load_state_dict(sd, strict=False)
print(f"Restored from {path}")
def encode(self, x):
h = self.encoder(x).float()
h = self.quant_conv(h).float()
quant, emb_loss, info = self.quantize(h)
return quant, emb_loss, info
def decode(self, quant):
quant = self.post_quant_conv(quant)
dec = self.decoder(quant)
return dec
def decode_code(self, code_b):
quant_b = self.quantize.embed_code(code_b)
dec = self.decode(quant_b)
return dec
def forward(self, input):
quant, diff, _ = self.encode(input)
dec = self.decode(quant)
return dec, diff
def get_input(self, batch, k):
x = batch[k]
if len(x.shape) == 3:
x = x[..., None]
x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format)
return x.float()
def training_step(self, batch, batch_idx, optimizer_idx):
x = self.get_input(batch, self.image_key)
xrec, qloss = self(x)
if optimizer_idx == 0:
# autoencode
aeloss, log_dict_ae = self.loss(qloss, x, xrec, optimizer_idx, self.global_step,
last_layer=self.get_last_layer(), split="train")
self.log("train/aeloss", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=True)
return aeloss
if optimizer_idx == 1:
# discriminator
discloss, log_dict_disc = self.loss(qloss, x, xrec, optimizer_idx, self.global_step,
last_layer=self.get_last_layer(), split="train")
self.log("train/discloss", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=True)
return discloss
def validation_step(self, batch, batch_idx):
x = self.get_input(batch, self.image_key)
xrec, qloss = self(x)
aeloss, log_dict_ae = self.loss(qloss, x, xrec, 0, self.global_step,
last_layer=self.get_last_layer(), split="val")
discloss, log_dict_disc = self.loss(qloss, x, xrec, 1, self.global_step,
last_layer=self.get_last_layer(), split="val")
rec_loss = log_dict_ae["val/rec_loss"]
self.log("val/rec_loss", rec_loss,
prog_bar=True, logger=True, on_step=True, on_epoch=True, sync_dist=True)
self.log("val/aeloss", aeloss,
prog_bar=True, logger=True, on_step=True, on_epoch=True, sync_dist=True)
self.log_dict(log_dict_ae)
self.log_dict(log_dict_disc)
return self.log_dict
def configure_optimizers(self):
lr = self.learning_rate
opt_ae = torch.optim.Adam(list(self.encoder.parameters())+
list(self.decoder.parameters())+
list(self.quantize.parameters())+
list(self.quant_conv.parameters())+
list(self.post_quant_conv.parameters()),
lr=lr, betas=(0.5, 0.9))
opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),
lr=lr, betas=(0.5, 0.9))
return [opt_ae, opt_disc], []
def get_last_layer(self):
return self.decoder.conv_out.weight
def log_images(self, batch, **kwargs):
log = dict()
x = self.get_input(batch, self.image_key)
x = x.to(self.device)
xrec, _ = self(x)
if x.shape[1] > 3:
# colorize with random projection
assert xrec.shape[1] > 3
x = self.to_rgb(x)
xrec = self.to_rgb(xrec)
log["inputs"] = x
log["reconstructions"] = xrec
return log
def to_rgb(self, x):
assert self.image_key == "segmentation"
if not hasattr(self, "colorize"):
self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x))
x = F.conv2d(x, weight=self.colorize)
x = 2.*(x-x.min())/(x.max()-x.min()) - 1.
return x
class VQSegmentationModel(VQModel):
def __init__(self, n_labels, *args, **kwargs):
super().__init__(*args, **kwargs)
self.register_buffer("colorize", torch.randn(3, n_labels, 1, 1))
def configure_optimizers(self):
lr = self.learning_rate
opt_ae = torch.optim.Adam(list(self.encoder.parameters())+
list(self.decoder.parameters())+
list(self.quantize.parameters())+
list(self.quant_conv.parameters())+
list(self.post_quant_conv.parameters()),
lr=lr, betas=(0.5, 0.9))
return opt_ae
def training_step(self, batch, batch_idx):
x = self.get_input(batch, self.image_key)
xrec, qloss = self(x)
aeloss, log_dict_ae = self.loss(qloss, x, xrec, split="train")
self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=True)
return aeloss
def validation_step(self, batch, batch_idx):
x = self.get_input(batch, self.image_key)
xrec, qloss = self(x)
aeloss, log_dict_ae = self.loss(qloss, x, xrec, split="val")
self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=True)
total_loss = log_dict_ae["val/total_loss"]
self.log("val/total_loss", total_loss,
prog_bar=True, logger=True, on_step=True, on_epoch=True, sync_dist=True)
return aeloss
@torch.no_grad()
def log_images(self, batch, **kwargs):
log = dict()
x = self.get_input(batch, self.image_key)
x = x.to(self.device)
xrec, _ = self(x)
if x.shape[1] > 3:
# colorize with random projection
assert xrec.shape[1] > 3
# convert logits to indices
xrec = torch.argmax(xrec, dim=1, keepdim=True)
xrec = F.one_hot(xrec, num_classes=x.shape[1])
xrec = xrec.squeeze(1).permute(0, 3, 1, 2).float()
x = self.to_rgb(x)
xrec = self.to_rgb(xrec)
log["inputs"] = x
log["reconstructions"] = xrec
return log
class VQNoDiscModel(VQModel):
def __init__(self,
ddconfig,
lossconfig,
n_embed,
embed_dim,
ckpt_path=None,
ignore_keys=[],
image_key="image",
colorize_nlabels=None
):
super().__init__(ddconfig=ddconfig, lossconfig=lossconfig, n_embed=n_embed, embed_dim=embed_dim,
ckpt_path=ckpt_path, ignore_keys=ignore_keys, image_key=image_key,
colorize_nlabels=colorize_nlabels)
def training_step(self, batch, batch_idx):
x = self.get_input(batch, self.image_key)
xrec, qloss = self(x)
# autoencode
aeloss, log_dict_ae = self.loss(qloss, x, xrec, self.global_step, split="train")
output = pl.TrainResult(minimize=aeloss)
output.log("train/aeloss", aeloss,
prog_bar=True, logger=True, on_step=True, on_epoch=True)
output.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=True)
return output
def validation_step(self, batch, batch_idx):
x = self.get_input(batch, self.image_key)
xrec, qloss = self(x)
aeloss, log_dict_ae = self.loss(qloss, x, xrec, self.global_step, split="val")
rec_loss = log_dict_ae["val/rec_loss"]
output = pl.EvalResult(checkpoint_on=rec_loss)
output.log("val/rec_loss", rec_loss,
prog_bar=True, logger=True, on_step=True, on_epoch=True)
output.log("val/aeloss", aeloss,
prog_bar=True, logger=True, on_step=True, on_epoch=True)
output.log_dict(log_dict_ae)
return output
def configure_optimizers(self):
optimizer = torch.optim.Adam(list(self.encoder.parameters())+
list(self.decoder.parameters())+
list(self.quantize.parameters())+
list(self.quant_conv.parameters())+
list(self.post_quant_conv.parameters()),
lr=self.learning_rate, betas=(0.5, 0.9))
return optimizer
class GumbelVQ(VQModel):
def __init__(self,
ddconfig,
lossconfig,
n_embed,
embed_dim,
temperature_scheduler_config,
ckpt_path=None,
ignore_keys=[],
image_key="image",
colorize_nlabels=None,
monitor=None,
kl_weight=1e-8,
remap=None,
):
z_channels = ddconfig["z_channels"]
super().__init__(ddconfig,
lossconfig,
n_embed,
embed_dim,
ckpt_path=None,
ignore_keys=ignore_keys,
image_key=image_key,
colorize_nlabels=colorize_nlabels,
monitor=monitor,
)
self.loss.n_classes = n_embed
self.vocab_size = n_embed
self.quantize = GumbelQuantize(z_channels, embed_dim,
n_embed=n_embed,
kl_weight=kl_weight, temp_init=1.0,
remap=remap)
self.temperature_scheduler = instantiate_from_config(temperature_scheduler_config) # annealing of temp
if ckpt_path is not None:
self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)
def temperature_scheduling(self):
self.quantize.temperature = self.temperature_scheduler(self.global_step)
def encode_to_prequant(self, x):
h = self.encoder(x)
h = self.quant_conv(h)
return h
def decode_code(self, code_b):
raise NotImplementedError
def training_step(self, batch, batch_idx, optimizer_idx):
self.temperature_scheduling()
x = self.get_input(batch, self.image_key)
xrec, qloss = self(x)
if optimizer_idx == 0:
# autoencode
aeloss, log_dict_ae = self.loss(qloss, x, xrec, optimizer_idx, self.global_step,
last_layer=self.get_last_layer(), split="train")
self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=True)
self.log("temperature", self.quantize.temperature, prog_bar=False, logger=True, on_step=True, on_epoch=True)
return aeloss
if optimizer_idx == 1:
# discriminator
discloss, log_dict_disc = self.loss(qloss, x, xrec, optimizer_idx, self.global_step,
last_layer=self.get_last_layer(), split="train")
self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=True)
return discloss
def validation_step(self, batch, batch_idx):
x = self.get_input(batch, self.image_key)
xrec, qloss = self(x, return_pred_indices=True)
aeloss, log_dict_ae = self.loss(qloss, x, xrec, 0, self.global_step,
last_layer=self.get_last_layer(), split="val")
discloss, log_dict_disc = self.loss(qloss, x, xrec, 1, self.global_step,
last_layer=self.get_last_layer(), split="val")
rec_loss = log_dict_ae["val/rec_loss"]
self.log("val/rec_loss", rec_loss,
prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True)
self.log("val/aeloss", aeloss,
prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True)
self.log_dict(log_dict_ae)
self.log_dict(log_dict_disc)
return self.log_dict
def log_images(self, batch, **kwargs):
log = dict()
x = self.get_input(batch, self.image_key)
x = x.to(self.device)
# encode
h = self.encoder(x)
h = self.quant_conv(h)
quant, _, _ = self.quantize(h)
# decode
x_rec = self.decode(quant)
log["inputs"] = x
log["reconstructions"] = x_rec
return log
| 14,908 | 39.958791 | 120 | py |
DaVinci | DaVinci-main/taming/models/cond_transformer.py | import os, math
import torch
import torch.nn.functional as F
import pytorch_lightning as pl
from taming.main import instantiate_from_config
from taming.modules.util import SOSProvider
def disabled_train(self, mode=True):
"""Overwrite model.train with this function to make sure train/eval mode
does not change anymore."""
return self
class Net2NetTransformer(pl.LightningModule):
def __init__(self,
transformer_config,
first_stage_config,
cond_stage_config,
permuter_config=None,
ckpt_path=None,
ignore_keys=[],
first_stage_key="image",
cond_stage_key="depth",
downsample_cond_size=-1,
pkeep=1.0,
sos_token=0,
unconditional=False,
):
super().__init__()
self.be_unconditional = unconditional
self.sos_token = sos_token
self.first_stage_key = first_stage_key
self.cond_stage_key = cond_stage_key
self.init_first_stage_from_ckpt(first_stage_config)
self.init_cond_stage_from_ckpt(cond_stage_config)
if permuter_config is None:
permuter_config = {"target": "taming.modules.transformer.permuter.Identity"}
self.permuter = instantiate_from_config(config=permuter_config)
self.transformer = instantiate_from_config(config=transformer_config)
if ckpt_path is not None:
self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)
self.downsample_cond_size = downsample_cond_size
self.pkeep = pkeep
def init_from_ckpt(self, path, ignore_keys=list()):
sd = torch.load(path, map_location="cpu")["state_dict"]
for k in sd.keys():
for ik in ignore_keys:
if k.startswith(ik):
self.print("Deleting key {} from state_dict.".format(k))
del sd[k]
self.load_state_dict(sd, strict=False)
print(f"Restored from {path}")
def init_first_stage_from_ckpt(self, config):
model = instantiate_from_config(config)
model = model.eval()
model.train = disabled_train
self.first_stage_model = model
def init_cond_stage_from_ckpt(self, config):
if config == "__is_first_stage__":
print("Using first stage also as cond stage.")
self.cond_stage_model = self.first_stage_model
elif config == "__is_unconditional__" or self.be_unconditional:
print(f"Using no cond stage. Assuming the training is intended to be unconditional. "
f"Prepending {self.sos_token} as a sos token.")
self.be_unconditional = True
self.cond_stage_key = self.first_stage_key
self.cond_stage_model = SOSProvider(self.sos_token)
else:
model = instantiate_from_config(config)
model = model.eval()
model.train = disabled_train
self.cond_stage_model = model
def forward(self, x, c):
# one step to produce the logits
_, z_indices = self.encode_to_z(x)
_, c_indices = self.encode_to_c(c)
if self.training and self.pkeep < 1.0:
mask = torch.bernoulli(self.pkeep*torch.ones(z_indices.shape,
device=z_indices.device))
mask = mask.round().to(dtype=torch.int64)
r_indices = torch.randint_like(z_indices, self.transformer.config.vocab_size)
a_indices = mask*z_indices+(1-mask)*r_indices
else:
a_indices = z_indices
cz_indices = torch.cat((c_indices, a_indices), dim=1)
# target includes all sequence elements (no need to handle first one
# differently because we are conditioning)
target = z_indices
# make the prediction
logits, _ = self.transformer(cz_indices[:, :-1])
# cut off conditioning outputs - output i corresponds to p(z_i | z_{<i}, c)
logits = logits[:, c_indices.shape[1]-1:]
return logits, target
def top_k_logits(self, logits, k):
v, ix = torch.topk(logits, k)
out = logits.clone()
out[out < v[..., [-1]]] = -float('Inf')
return out
@torch.no_grad()
def sample(self, x, c, steps, temperature=1.0, sample=False, top_k=None,
callback=lambda k: None):
x = torch.cat((c,x),dim=1)
block_size = self.transformer.get_block_size()
assert not self.transformer.training
if self.pkeep <= 0.0:
# one pass suffices since input is pure noise anyway
assert len(x.shape)==2
noise_shape = (x.shape[0], steps-1)
#noise = torch.randint(self.transformer.config.vocab_size, noise_shape).to(x)
noise = c.clone()[:,x.shape[1]-c.shape[1]:-1]
x = torch.cat((x,noise),dim=1)
logits, _ = self.transformer(x)
# take all logits for now and scale by temp
logits = logits / temperature
# optionally crop probabilities to only the top k options
if top_k is not None:
logits = self.top_k_logits(logits, top_k)
# apply softmax to convert to probabilities
probs = F.softmax(logits, dim=-1)
# sample from the distribution or take the most likely
if sample:
shape = probs.shape
probs = probs.reshape(shape[0]*shape[1],shape[2])
ix = torch.multinomial(probs, num_samples=1)
probs = probs.reshape(shape[0],shape[1],shape[2])
ix = ix.reshape(shape[0],shape[1])
else:
_, ix = torch.topk(probs, k=1, dim=-1)
# cut off conditioning
x = ix[:, c.shape[1]-1:]
else:
for k in range(steps):
callback(k)
assert x.size(1) <= block_size # make sure model can see conditioning
x_cond = x if x.size(1) <= block_size else x[:, -block_size:] # crop context if needed
logits, _ = self.transformer(x_cond)
# pluck the logits at the final step and scale by temperature
logits = logits[:, -1, :] / temperature
# optionally crop probabilities to only the top k options
if top_k is not None:
logits = self.top_k_logits(logits, top_k)
# apply softmax to convert to probabilities
probs = F.softmax(logits, dim=-1)
# sample from the distribution or take the most likely
if sample:
ix = torch.multinomial(probs, num_samples=1)
else:
_, ix = torch.topk(probs, k=1, dim=-1)
# append to the sequence and continue
x = torch.cat((x, ix), dim=1)
# cut off conditioning
x = x[:, c.shape[1]:]
return x
@torch.no_grad()
def encode_to_z(self, x):
quant_z, _, info = self.first_stage_model.encode(x)
indices = info[2].view(quant_z.shape[0], -1)
indices = self.permuter(indices)
return quant_z, indices
@torch.no_grad()
def encode_to_c(self, c):
if self.downsample_cond_size > -1:
c = F.interpolate(c, size=(self.downsample_cond_size, self.downsample_cond_size))
quant_c, _, [_,_,indices] = self.cond_stage_model.encode(c)
if len(indices.shape) > 2:
indices = indices.view(c.shape[0], -1)
return quant_c, indices
@torch.no_grad()
def decode_to_img(self, index, zshape):
index = self.permuter(index, reverse=True)
bhwc = (zshape[0],zshape[2],zshape[3],zshape[1])
quant_z = self.first_stage_model.quantize.get_codebook_entry(
index.reshape(-1), shape=bhwc)
x = self.first_stage_model.decode(quant_z)
return x
@torch.no_grad()
def log_images(self, batch, temperature=None, top_k=None, callback=None, lr_interface=False, **kwargs):
log = dict()
N = 4
if lr_interface:
x, c = self.get_xc(batch, N, diffuse=False, upsample_factor=8)
else:
x, c = self.get_xc(batch, N)
x = x.to(device=self.device)
c = c.to(device=self.device)
quant_z, z_indices = self.encode_to_z(x)
quant_c, c_indices = self.encode_to_c(c)
# create a "half"" sample
z_start_indices = z_indices[:,:z_indices.shape[1]//2]
index_sample = self.sample(z_start_indices, c_indices,
steps=z_indices.shape[1]-z_start_indices.shape[1],
temperature=temperature if temperature is not None else 1.0,
sample=True,
top_k=top_k if top_k is not None else 100,
callback=callback if callback is not None else lambda k: None)
x_sample = self.decode_to_img(index_sample, quant_z.shape)
# sample
z_start_indices = z_indices[:, :0]
index_sample = self.sample(z_start_indices, c_indices,
steps=z_indices.shape[1],
temperature=temperature if temperature is not None else 1.0,
sample=True,
top_k=top_k if top_k is not None else 100,
callback=callback if callback is not None else lambda k: None)
x_sample_nopix = self.decode_to_img(index_sample, quant_z.shape)
# det sample
z_start_indices = z_indices[:, :0]
index_sample = self.sample(z_start_indices, c_indices,
steps=z_indices.shape[1],
sample=False,
callback=callback if callback is not None else lambda k: None)
x_sample_det = self.decode_to_img(index_sample, quant_z.shape)
# reconstruction
x_rec = self.decode_to_img(z_indices, quant_z.shape)
log["inputs"] = x
log["reconstructions"] = x_rec
if self.cond_stage_key != "image":
cond_rec = self.cond_stage_model.decode(quant_c)
if self.cond_stage_key == "segmentation":
# get image from segmentation mask
num_classes = cond_rec.shape[1]
c = torch.argmax(c, dim=1, keepdim=True)
c = F.one_hot(c, num_classes=num_classes)
c = c.squeeze(1).permute(0, 3, 1, 2).float()
c = self.cond_stage_model.to_rgb(c)
cond_rec = torch.argmax(cond_rec, dim=1, keepdim=True)
cond_rec = F.one_hot(cond_rec, num_classes=num_classes)
cond_rec = cond_rec.squeeze(1).permute(0, 3, 1, 2).float()
cond_rec = self.cond_stage_model.to_rgb(cond_rec)
log["conditioning_rec"] = cond_rec
log["conditioning"] = c
log["samples_half"] = x_sample
log["samples_nopix"] = x_sample_nopix
log["samples_det"] = x_sample_det
return log
def get_input(self, key, batch):
x = batch[key]
if len(x.shape) == 3:
x = x[..., None]
if len(x.shape) == 4:
x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format)
if x.dtype == torch.double:
x = x.float()
return x
def get_xc(self, batch, N=None):
x = self.get_input(self.first_stage_key, batch)
c = self.get_input(self.cond_stage_key, batch)
if N is not None:
x = x[:N]
c = c[:N]
return x, c
def shared_step(self, batch, batch_idx):
x, c = self.get_xc(batch)
logits, target = self(x, c)
loss = F.cross_entropy(logits.reshape(-1, logits.size(-1)), target.reshape(-1))
return loss
def training_step(self, batch, batch_idx):
loss = self.shared_step(batch, batch_idx)
self.log("train/loss", loss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
return loss
def validation_step(self, batch, batch_idx):
loss = self.shared_step(batch, batch_idx)
self.log("val/loss", loss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
return loss
def configure_optimizers(self):
"""
Following minGPT:
This long function is unfortunately doing something very simple and is being very defensive:
We are separating out all parameters of the model into two buckets: those that will experience
weight decay for regularization and those that won't (biases, and layernorm/embedding weights).
We are then returning the PyTorch optimizer object.
"""
# separate out all parameters to those that will and won't experience regularizing weight decay
decay = set()
no_decay = set()
whitelist_weight_modules = (torch.nn.Linear, )
blacklist_weight_modules = (torch.nn.LayerNorm, torch.nn.Embedding)
for mn, m in self.transformer.named_modules():
for pn, p in m.named_parameters():
fpn = '%s.%s' % (mn, pn) if mn else pn # full param name
if pn.endswith('bias'):
# all biases will not be decayed
no_decay.add(fpn)
elif pn.endswith('weight') and isinstance(m, whitelist_weight_modules):
# weights of whitelist modules will be weight decayed
decay.add(fpn)
elif pn.endswith('weight') and isinstance(m, blacklist_weight_modules):
# weights of blacklist modules will NOT be weight decayed
no_decay.add(fpn)
# special case the position embedding parameter in the root GPT module as not decayed
no_decay.add('pos_emb')
# validate that we considered every parameter
param_dict = {pn: p for pn, p in self.transformer.named_parameters()}
inter_params = decay & no_decay
union_params = decay | no_decay
assert len(inter_params) == 0, "parameters %s made it into both decay/no_decay sets!" % (str(inter_params), )
assert len(param_dict.keys() - union_params) == 0, "parameters %s were not separated into either decay/no_decay set!" \
% (str(param_dict.keys() - union_params), )
# create the pytorch optimizer object
optim_groups = [
{"params": [param_dict[pn] for pn in sorted(list(decay))], "weight_decay": 0.01},
{"params": [param_dict[pn] for pn in sorted(list(no_decay))], "weight_decay": 0.0},
]
optimizer = torch.optim.AdamW(optim_groups, lr=self.learning_rate, betas=(0.9, 0.95))
return optimizer
| 15,002 | 42.613372 | 127 | py |
HeadlineCause | HeadlineCause-main/headline_cause/labse.py | import gc
import numpy as np
import tensorflow as tf
import tensorflow_hub as hub
import tensorflow_text as text
from tqdm import tqdm
from util import gen_batch
DEFAULT_ENCODER_PATH = "https://tfhub.dev/google/LaBSE/2"
DEFAULT_PREPROCESSOR_PATH = "https://tfhub.dev/google/universal-sentence-encoder-cmlm/multilingual-preprocess/2"
def normalization(embeds):
norms = np.linalg.norm(embeds, 2, axis=1, keepdims=True)
return embeds / norms
class LaBSE:
def __init__(self, encoder_path=DEFAULT_ENCODER_PATH, preprocessor_path=DEFAULT_PREPROCESSOR_PATH):
self.preprocessor = hub.KerasLayer(preprocessor_path)
self.model = hub.KerasLayer(encoder_path)
def embed_batch(self, texts):
return normalization(self.model(self.preprocessor(texts))["default"])
def labse_get_embeddings(self, sentences, batch_size):
embeddings = np.zeros((len(sentences), 768))
current_index = 0
for batch in gen_batch(sentences, batch_size):
batch_embeddings = self.embed_batch(batch)
embeddings[current_index:current_index+batch_size, :] = batch_embeddings
current_index += batch_size
tf.keras.backend.clear_session()
gc.collect()
return embeddings
def __call__(self, sentences, batch_size=16):
return self.labse_get_embeddings(sentences, batch_size)
| 1,380 | 33.525 | 112 | py |
HeadlineCause | HeadlineCause-main/headline_cause/util.py | import os
import json
import csv
import random
from urllib.parse import urlparse
import torch
import numpy as np
def write_tsv(records, header, path):
with open(path, "w") as w:
writer = csv.writer(w, delimiter="\t", quotechar='"')
writer.writerow(header)
for r in records:
row = [r[key] for key in header]
writer.writerow(row)
def read_tsv(file_name):
records = []
with open(file_name, "r") as r:
reader = csv.reader(r, delimiter="\t")
header = next(reader)
for row in reader:
record = dict(zip(header, row))
records.append(record)
return records
def write_jsonl(records, path):
with open(path, "w") as w:
for r in records:
w.write(json.dumps(r, ensure_ascii=False) + "\n")
def read_jsonl(file_name):
records = []
with open(file_name, "r") as r:
for line in r:
record = json.loads(line)
records.append(record)
return records
def get_host(url):
return '{uri.scheme}://{uri.netloc}/'.format(uri=urlparse(url))
def set_random_seed(seed):
random.seed(seed)
np.random.seed(seed)
os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":4096:2"
os.environ["PL_GLOBAL_SEED"] = str(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
def gen_batch(records, batch_size):
batch_start = 0
while batch_start < len(records):
batch_end = batch_start + batch_size
batch = records[batch_start: batch_end]
batch_start = batch_end
yield batch
| 1,720 | 23.585714 | 67 | py |
HeadlineCause | HeadlineCause-main/headline_cause/predict.py | import argparse
import torch
from tqdm import tqdm
from transformers import AutoModelForSequenceClassification, AutoTokenizer, pipeline
import numpy as np
from util import read_jsonl, write_jsonl
def get_batch(data, batch_size):
start_index = 0
while start_index < len(data):
end_index = start_index + batch_size
batch = data[start_index:end_index]
yield batch
start_index = end_index
def pipe_predict(data, pipe, batch_size=64):
raw_preds = []
for batch in tqdm(get_batch(data, batch_size), desc="predict"):
raw_preds += pipe(batch)
preds = np.array([int(max(labels, key=lambda x: x["score"])["label"][-1]) for labels in raw_preds])
pp = np.array([[label["score"] for label in labels] for labels in raw_preds])
return preds, pp
def predict(model_path, input_path, output_path):
model = AutoModelForSequenceClassification.from_pretrained(model_path)
tokenizer = AutoTokenizer.from_pretrained(model_path)
model.eval()
pipe = pipeline(
"text-classification",
model=model,
tokenizer=tokenizer,
framework="pt",
device=(0 if torch.cuda.is_available() else -1),
return_all_scores=True
)
data = read_jsonl(input_path)
test_pairs = [(r["left_title"], r["right_title"]) for r in data]
labels, probs = pipe_predict(test_pairs, pipe)
for r, l, p in zip(data, labels, probs):
r["pred_label"] = int(l)
r["pred_prob"] = float(p[l])
write_jsonl(data, output_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--model-path", type=str, required=True)
parser.add_argument("--input-path", type=str, required=True)
parser.add_argument("--output-path", type=str, required=True)
args = parser.parse_args()
predict(**vars(args))
| 1,850 | 31.473684 | 103 | py |
HeadlineCause | HeadlineCause-main/headline_cause/train_clf.py | import argparse
import json
import random
from statistics import mean
import torch
from torch.utils.data import Dataset
from transformers import AutoTokenizer, AutoModelForSequenceClassification
from transformers import Trainer, TrainingArguments, EarlyStoppingCallback
from scipy.stats import entropy
from sklearn.metrics import classification_report
from augment import augment
from util import read_jsonl
class PairsDataset(Dataset):
def __init__(self, records, max_tokens, tokenizer, result_key):
self.tokenizer = tokenizer
self.max_tokens = max_tokens
self.records = list()
for r in records:
inputs = self.embed_pair(r)
inputs["labels"] = r["label"]
self.records.append(inputs)
def __len__(self):
return len(self.records)
def embed_pair(self, pair):
inputs = self.tokenizer(
text=pair["left_title"],
text_pair=pair["right_title"],
add_special_tokens=True,
max_length=self.max_tokens,
padding="max_length",
truncation="longest_first",
return_tensors="pt"
)
return {key: value.squeeze(0) for key, value in inputs.items()}
def __getitem__(self, index):
return self.records[index]
def calc_labels(records, result_key):
full_result_mapping = {
"bad": 0,
"same": 1,
"rel": 2,
"left_right_cause": 3,
"right_left_cause": 4,
"left_right_refute": 5,
"right_left_refute": 6
}
simple_result_mapping = {
"not_cause": 0,
"left_right": 1,
"right_left": 2
}
result_mapping = simple_result_mapping if "simple" in result_key else full_result_mapping
for r in records:
r["label"] = result_mapping[r[result_key]]
return records
def main(
input_path,
min_agreement,
max_tokens,
model_name,
epochs,
eval_steps,
warmup_steps,
lr,
task,
batch_size,
grad_accum_steps,
seed,
out_dir
):
res_key = "{}_result".format(task)
agreement_key = "{}_agreement".format(task)
records = read_jsonl(input_path)
records = [r for r in records if r[agreement_key] > min_agreement]
records = calc_labels(records, res_key)
random.seed(seed)
random.shuffle(records)
val_border = int(len(records) * 0.8)
test_border = int(len(records) * 0.9)
train_records = records[:val_border]
val_records = records[val_border:test_border]
test_records = records[test_border:]
train_records, val_records = augment(train_records, task), augment(val_records, task)
labels_count = len({r[res_key] for r in train_records})
assert labels_count >= 3
print("Train records: ", len(train_records))
print("Val records: ", len(val_records))
print("Test records: ", len(test_records))
print("Labels: ", labels_count)
tokenizer = AutoTokenizer.from_pretrained(model_name, do_lower_case=False)
train_dataset = PairsDataset(train_records, max_tokens, tokenizer, res_key)
val_dataset = PairsDataset(val_records, max_tokens, tokenizer, res_key)
test_dataset = PairsDataset(test_records, max_tokens, tokenizer, res_key)
device = "cuda" if torch.cuda.is_available() else "cpu"
model = AutoModelForSequenceClassification.from_pretrained(model_name, num_labels=labels_count)
model = model.to(device)
training_args = TrainingArguments(
output_dir="checkpoints",
evaluation_strategy="steps",
save_strategy="steps",
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
logging_steps=eval_steps,
save_steps=eval_steps,
warmup_steps=warmup_steps,
learning_rate=lr,
num_train_epochs=epochs,
gradient_accumulation_steps=grad_accum_steps,
report_to="none",
load_best_model_at_end=True,
save_total_limit=1
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=val_dataset
)
trainer.train()
model.save_pretrained(out_dir)
train_dataset.tokenizer.save_pretrained(out_dir)
y_true, y_pred = [], []
true_entropies = []
false_entropies = []
with torch.no_grad():
for item in test_dataset:
input_ids = item["input_ids"].unsqueeze(0).to(device)
mask = item["attention_mask"].unsqueeze(0).to(device)
label = item["labels"]
outputs = model(input_ids, mask, return_dict=True)
logits = outputs.logits[0]
pred = torch.argmax(logits).item()
y_pred.append(pred)
y_true.append(label)
scores = torch.softmax(logits, dim=0).cpu().numpy()
ent = entropy(scores)
if pred == label:
true_entropies.append(ent)
else:
false_entropies.append(ent)
print("Avg true entropy: {}".format(mean(true_entropies)))
if false_entropies:
print("Avg false entropy: {}".format(mean(false_entropies)))
print(classification_report(y_true, y_pred))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--input-path", type=str, required=True)
parser.add_argument("--out-dir", type=str, required=True)
parser.add_argument("--max-tokens", type=int, default=60)
parser.add_argument("--min-agreement", type=float, default=0.69)
parser.add_argument("--epochs", type=int, default=4)
parser.add_argument("--eval-steps", type=int, default=32)
parser.add_argument("--warmup-steps", type=int, default=16)
parser.add_argument("--lr", type=float, default=2e-05)
parser.add_argument("--batch-size", type=int, default=8)
parser.add_argument("--seed", type=int, default=42)
parser.add_argument("--task", type=str, choices=("full", "simple"), default="simple")
parser.add_argument("--grad-accum-steps", type=int, default=16)
parser.add_argument("--model-name", type=str, default="xlm-roberta-large")
args = parser.parse_args()
main(**vars(args))
| 6,161 | 31.603175 | 99 | py |
HeadlineCause | HeadlineCause-main/headline_cause/active_learning/infer_clf.py | import argparse
import json
import torch
import numpy as np
from scipy.stats import entropy
from scipy.special import softmax
from tqdm import tqdm
from transformers import AutoTokenizer, AutoModelForSequenceClassification
from util import write_jsonl, read_jsonl, gen_batch
def main(
input_path,
model_path,
output_path,
max_tokens,
models_count,
batch_size
):
device = "cuda" if torch.cuda.is_available() else "cpu"
tokenizer = AutoTokenizer.from_pretrained(model_path)
model = AutoModelForSequenceClassification.from_pretrained(model_path)
model = model.to(device)
model.train() # Monte-Carlo Dropout
output_records = []
records = list(read_jsonl(input_path))
for batch in tqdm(gen_batch(records, batch_size)):
s1 = [r["left_title"] for r in batch]
s2 = [r["right_title"] for r in batch]
inputs = tokenizer(
text=s1,
text_pair=s2,
add_special_tokens=True,
max_length=max_tokens,
padding="max_length",
truncation="longest_first",
return_tensors="pt"
)
inputs = inputs.to(device)
num_labels = model.num_labels
all_scores = torch.zeros((len(batch), models_count, num_labels))
with torch.no_grad():
for model_num in range(models_count):
output = model(**inputs)
logits = output.logits
scores = torch.softmax(logits, dim=1).cpu()
all_scores[:, model_num, :] = scores
for sample_num in range(len(batch)):
sample = batch[sample_num]
sample_scores = all_scores[sample_num]
avg_scores = torch.mean(sample_scores, dim=0).tolist()
entropy_over_avg = float(entropy(avg_scores))
entropies = [float(entropy(scores)) for scores in sample_scores]
avg_entropy = float(np.mean(entropies))
bald_score = entropy_over_avg - avg_entropy
sample["entropy"] = entropy_over_avg
sample["avg_entropy"] = avg_entropy
sample["bald"] = bald_score
sample["scores"] = avg_scores
output_records.append(sample)
write_jsonl(output_records, output_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--input-path", type=str, required=True)
parser.add_argument("--model-path", type=str, required=True)
parser.add_argument("--output-path", type=str, required=True)
parser.add_argument("--batch-size", type=int, default=128)
parser.add_argument("--models-count", type=int, default=3)
parser.add_argument("--max-tokens", type=int, default=60)
args = parser.parse_args()
main(**vars(args))
| 2,774 | 32.035714 | 76 | py |
opioid-repurposing | opioid-repurposing-main/generate_bt_fps_mean.py | from fairseq.models.roberta import RobertaModel
import argparse
import sys
import numpy as np
import torch
def load_pretrain_model(model_name_or_path, checkpoint_file, data_name_or_path, bpe='smi'):
'''Currently only load to cpu()'''
# load model
pretrain_model = RobertaModel.from_pretrained(
model_name_or_path,
checkpoint_file,
data_name_or_path, # dict_dir,
bpe=bpe,
)
pretrain_model.eval()
return pretrain_model
def extract_hidden(pretrain_model, target_file):
sample_num = 0
for i, line in enumerate(open(target_file)):
if len(line.strip()) == 0:
continue
sample_num += 1
hidden_features = {i: None for i in range(sample_num)}
for i, line in enumerate(open(target_file)):
if len(line.strip()) == 0:
continue
tokens = pretrain_model.encode(line.strip())
if len(tokens) > pretrain_model.args.max_positions:
tokens = torch.cat(
(tokens[:pretrain_model.args.max_positions - 1], tokens[-1].unsqueeze(0)))
_, all_layer_hiddens = pretrain_model.model(
tokens.unsqueeze(0), features_only=True, return_all_hiddens=True)
#print(len(tokens))
hidden_info = all_layer_hiddens['inner_states'][-1]
# last_hidden shape [tokens_num, sample_num(default=1), hidden_dim]
# hidden_features.append(hidden_info.squeeze(1).cpu().detach().numpy())
hidden_features[i] = hidden_info.squeeze(1).cpu().detach().numpy()
#print(hidden_features[i].shape)
# hidden_features type: dict, length: samples_num
return hidden_features
def extract_features_from_hidden(hidden_info):
samples_num = len(hidden_info)
hidden_dim = np.shape(hidden_info[0])[-1]
#print(np.shape(hidden_info[0]))
samples_features = np.zeros([samples_num, hidden_dim])
for n_sample, hidden in hidden_info.items():
# hidden shape [tokens, embed_dim]
samples_features[n_sample, :] =np.mean(hidden,axis=0)
return samples_features
def main(args):
pretrain_model = load_pretrain_model(
args.model_name_or_path, args.checkpoint_file, args.data_name_or_path, args.bpe)
hidden_info = extract_hidden(pretrain_model, args.target_file)
print('Generate features from hidden information')
samples_features = extract_features_from_hidden(hidden_info)
print(f'Features shape: {np.shape(samples_features)}')
np.save(args.save_feature_path, samples_features)
def parse_args(args):
parser = argparse.ArgumentParser(description="Tools kit for downstream jobs")
parser.add_argument('--model_name_or_path', default=None, type=str,
help='Pretrained model folder')
parser.add_argument('--checkpoint_file', default='checkpoint_best.pt', type=str,
help='Pretrained model name')
parser.add_argument('--data_name_or_path', default=None, type=str,
help="Pre-training dataset folder")
parser.add_argument('--dict_file', default='dict.txt', type=str,
help="Pre-training dict filename(full path)")
parser.add_argument('--bpe', default='smi', type=str)
parser.add_argument('--target_file', default=None, type=str,
help="Target file for feature extraction, default format is .smi")
parser.add_argument('--save_feature_path', default='extract_f1.npy', type=str,
help="Saving feature filename(path)")
args = parser.parse_args()
return args
def cli_main():
args = parse_args(sys.argv[1:])
print(args)
main(args)
if __name__ == '__main__':
cli_main()
print('End!')
| 3,724 | 33.490741 | 91 | py |
LAMOL | LAMOL-master/test.py | import torch
import csv
import os
import json
import logging
from fp16 import FP16_Module
import GPUtil
from collections import OrderedDict
from settings import args, MODEL_CLASS, TOKENIZER, SPECIAL_TOKEN_IDS, init_logging
from settings import MEMORY_FACTOR, LEN_FACTOR, TASK_DICT, MODEL_CONFIG, DATA_ATTRS, SPECIAL_TOKENS, CONFIG_CLASS, CONFIG_NAME
from utils import QADataset, top_k_top_p_filtering, create_dataloader, logits_to_tokens, get_model_dir
from utils import sample_sequence, remove_id, get_gen_token, lll_unbound_setting
from metrics import compute_metrics
logger = logging.getLogger(__name__)
def test_one_to_one(task_load, task_eval, model, score_dict):
logger.info("start to test { task: %s (load) %s (eval), seq train type: %s }" % (task_load, task_eval, args.seq_train_type))
test_qadata = QADataset(TASK_DICT[task_eval]["test"] , "test", SPECIAL_TOKEN_IDS[task_load]).sort()
max_a_len = test_qadata.max_a_len
test_dataloader = create_dataloader(test_qadata, "test")
n_examples = len(test_qadata)
logger.info("len of test dataset: {}".format(n_examples))
need_process = OrderedDict()
qa_results = [0 for _ in range(n_examples)]
all_pasts = [[0 for _ in range(n_examples)] for __ in range(MODEL_CONFIG.n_layer)]
max_tot_lens = [0 for _ in range(n_examples)]
cnt = 0
for n_steps, (cqs, len_cqs, _, _, _, _, _) in enumerate(test_dataloader):
# assume n_gpus == 1
cqs = cqs[0]
len_cqs = len_cqs[0]
n_inputs = cqs.shape[0]
all_outputs = model(input_ids=cqs.cuda())
outputs = all_outputs[0]
if args.model_name == "gpt2":
pasts = all_outputs[1]
next_logits = outputs[range(n_inputs), len_cqs-1, :] / args.temperature_qa
next_tokens = logits_to_tokens(next_logits).cpu()
for i in range(n_inputs):
max_tot_lens[cnt] = max_a_len + test_qadata[cnt][1]
qa_results[cnt] = cqs[i][:len_cqs[i]]
if next_tokens[i] != SPECIAL_TOKEN_IDS["eos_token"]:
qa_results[cnt] = torch.cat((cqs[i][:len_cqs[i]], next_tokens[i]))
if len(qa_results[cnt]) not in [max_tot_lens[cnt], args.max_len]:
need_process.update([[cnt, None]])
if args.model_name == "gpt2":
for layer_id in range(MODEL_CONFIG.n_layer):
all_pasts[layer_id][cnt] = pasts[layer_id][:, i, ..., :len_cqs[i], :].type(torch.float32 if args.fp32 else torch.half)
cnt += 1
if len(need_process) > int(12 * args.memory_sizes[0] / cqs.shape[1]): # dynamic threshold to avoid out of memory
sample_sequence(model, need_process, qa_results, all_pasts, max_tot_lens)
sample_sequence(model, need_process, qa_results, all_pasts, max_tot_lens)
if task_eval in ['wikisql','woz.en','multinli.in.out']:
ids = test_qadata.get_indices()
test_qadata.sort_by_index()
qa_results = [x[1] for x in sorted([(i, g) for i, g in zip(ids, qa_results)])]
for i in range(len(test_qadata)):
_, len_cq, _, _, Y, _, _, _ = test_qadata[i]
if task_eval in ['wikisql','woz.en']:
Y = test_qadata.answers[i]
else:
Y = list(filter(lambda x: x != -1, Y))[:-1] # remove eos
Y = ' '.join([str(y) for y in Y]).split(str(SPECIAL_TOKEN_IDS["pad_token"]))
Y = [TOKENIZER.decode(list(map(int, y.split()))) for y in Y]
qa_results[i] = [TOKENIZER.decode(qa_results[i].tolist()[len_cq:]), Y]
get_test_score(task_eval, qa_results, score_dict)
model_dir = model.model_dir
ep = model.ep
results_path = os.path.join(model_dir,"qa_{}_{}.csv".format(task_eval,ep+1))
if not args.debug:
with open(results_path, "w",encoding="utf-8") as f:
qa_writer = csv.writer(f,delimiter=',')
qa_writer.writerow(["y","pred"])
for pred, y in qa_results:
if task_eval == 'wikisql':
y = y["answer"]
elif task_eval == 'woz.en':
y = y[1]
qa_writer.writerow([y,pred])
return model, score_dict
def get_test_score(task_eval,qa_results,score_dict):
score = compute_metrics(
qa_results,
bleu='iwslt.en.de' in task_eval or 'multinli.in.out' in task_eval,
dialogue='woz.en' in task_eval,
rouge='cnn_dailymail' in task_eval,
logical_form='wikisql' in task_eval,
corpus_f1='zre' in task_eval
)
score_dict[task_eval] = score
def test_one_to_many(task_load):
score_dicts = []
for ep in range(args.n_train_epochs[task_load]):
model_dir = get_model_dir([task_load])
model_path = os.path.join(model_dir, 'model-{}'.format(ep+1))
config_path = os.path.join(model_dir,CONFIG_NAME)
gen_token = get_gen_token(task_load)
TOKENIZER.add_tokens([gen_token])
SPECIAL_TOKENS[task_load] = gen_token
SPECIAL_TOKEN_IDS[task_load] = TOKENIZER.convert_tokens_to_ids(gen_token)
model_config = CONFIG_CLASS.from_json_file(config_path)
model = MODEL_CLASS(model_config).cuda().eval()
state_dict = torch.load(model_path, map_location='cuda:0')
model.load_state_dict(state_dict)
if not args.fp32:
model = FP16_Module(model)
model.ep = ep
model.model_dir = model_dir
logger.info("task: {}, epoch: {}".format(task_load, ep+1))
score_dict = {k:None for k in args.tasks}
with torch.no_grad():
for task_eval in args.tasks:
test_one_to_one(task_load, task_eval, model, score_dict)
logger.info("score: {}".format(score_dict))
score_dicts.append(score_dict)
with open(os.path.join(model_dir, "metrics.json"),"w") as f:
json.dump(score_dicts, f)
if __name__ == '__main__':
if args.n_gpus > 1:
raise NotImplementedError("test can be run with only one gpu currently!")
if args.model_name == "gpt2":
args.fp32 = False # always use fp16 in testing
if not args.debug:
logging.getLogger("pytorch_transformers").setLevel(logging.WARNING)
logging.getLogger("pytorch_transformers.tokenization_utils").setLevel(logging.CRITICAL)
init_logging(os.path.join(args.model_dir_root, 'log_test.txt'))
logger.info('args = {}'.format(args))
if args.seq_train_type == "multitask":
test_one_to_many('_'.join(args.tasks))
else:
if args.unbound:
TASK_DICT = lll_unbound_setting(split_size=args.unbound, data_type="test",test_target="origin")
for task_load in args.splitted_tasks:
test_one_to_many(task_load)
else:
for task_load in args.tasks:
test_one_to_many(task_load)
| 6,865 | 41.645963 | 146 | py |
LAMOL | LAMOL-master/settings.py | import os
import json
import argparse
import logging
import datetime
logger = logging.getLogger(__name__)
import GPUtil
from pytorch_transformers import OpenAIGPTLMHeadModel, OpenAIGPTTokenizer, OpenAIGPTConfig
from pytorch_transformers import GPT2LMHeadModel, GPT2Tokenizer, GPT2Config, CONFIG_NAME
import torch
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
FILL_VAL = -1
LEN_FACTOR = 1.163
MEMORY_FACTOR = {
"finetune": 0.58,
"multitask": 0.58,
"lll": 0.35,
"ewc": 0.30,
"mas": 0.18,
"gem": 0.50,
}
TURING_ARCHS = {'Tesla V100', '2080 Ti'}
MODEL_CLASSES = {
'gpt2': (GPT2LMHeadModel, GPT2Tokenizer, GPT2Config),
'openai-gpt': (OpenAIGPTLMHeadModel, OpenAIGPTTokenizer, OpenAIGPTConfig),
}
SAVE_NAME = 'model-'
FINAL_SAVE_NAME = 'model-finish'
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--adam_epsilon", default=1e-4, type=float)
parser.add_argument("--add_task_tokens", action="store_true")
parser.add_argument("--data_dir", type=str, required=True)
parser.add_argument("--debug", action="store_true")
parser.add_argument("--decay_style", type=str, default="linear")
parser.add_argument("--fp32", action="store_true")
parser.add_argument("--real_sample", action="store_true")
parser.add_argument("--unbound", type=int, default=0)
parser.add_argument("--gen_lm_sample_percentage", type=float, default=0.05)
parser.add_argument("--learning_rate", type=float, default=6.25e-5)
parser.add_argument("--logging_steps", type=int, default=1000)
parser.add_argument("--lm_lambda", type=float, default=0.25)
parser.add_argument("--lr_schedule", type=str, default="warmup_linear")
parser.add_argument("--max_grad_norm", type=int, default=1)
parser.add_argument("--max_n_epochs", type=int, default=9)
parser.add_argument("--min_batch_size", type=int, default=4)
parser.add_argument("--min_n_steps", type=int, default=1500)
parser.add_argument("--model_dir_root", type=str, required=True)
parser.add_argument("--model_name", type=str, default="gpt2", choices=["gpt2", "openai-gpt"])
parser.add_argument("--n_gpus", type=int, default=1)
parser.add_argument("--n_train_epochs", type=int, default=3)
parser.add_argument("--dynamic_epochs", action="store_true")
parser.add_argument("--n_warmup_ratio", type=float, default=0.005)
parser.add_argument("--n_workers", type=int, default=4)
parser.add_argument("--use_sep", action="store_true")
parser.add_argument("--reg_lambda", type=float, default=1.)
parser.add_argument("--seed", type=int, default=42)
parser.add_argument("--seq_train_type", type=str, default="lll", choices=["lll","finetune","multitask","mas","ewc","gem"])
parser.add_argument("--tasks", nargs='+', default=["squad2"])
parser.add_argument("--skip_tasks", nargs='+')
parser.add_argument("--temperature_lm", type=float, default=1.0)
parser.add_argument("--temperature_qa", type=float, default=1.0)
parser.add_argument("--test_batch_size", type=int, default=0)
parser.add_argument("--tokens_weight", type=float, default=5)
parser.add_argument("--top_k_lm", type=int, default=20)
parser.add_argument("--top_k_qa", type=int, default=20)
parser.add_argument("--top_p_lm", type=float, default=0.)
parser.add_argument("--top_p_qa", type=float, default=0.)
parser.add_argument("--train_batch_size", type=int, default=0)
parser.add_argument("--weight_decay", type=float, default=0.01)
parser.add_argument("--qp_margin", type=float, default=0.5)
args = parser.parse_args()
if args.debug:
args.logging_steps = 1
torch.manual_seed(0)
torch.backends.cudnn.deterministric = True
args.model_dir_root = os.path.join(args.model_dir_root, args.model_name,
args.seq_train_type, "{}_{}".format("_".join(args.tasks),
args.gen_lm_sample_percentage) if "lll" in args.seq_train_type else "_".join(args.tasks))
args.device_ids = GPUtil.getAvailable(maxLoad=0.05, maxMemory=0.05, limit=args.n_gpus)
if len(args.device_ids) == 0:
logger.error('No available GPUs!')
raise NotImplementedError("No CPU mode available!")
if len(args.device_ids) < args.n_gpus:
logger.warning('Available number of GPU = {} < n_gpus = {}'.format(len(args.device_ids), args.n_gpus))
args.n_gpus = len(args.device_ids)
logger.warning('Continue training with {} GPUs'.format(args.n_gpus))
torch.cuda.set_device(args.device_ids[0])
gpus = GPUtil.getGPUs()
gpu_names = [gpus[device_id].name for device_id in args.device_ids]
if not all(any(turing_arch in gpu_name for turing_arch in TURING_ARCHS) for gpu_name in gpu_names):
logger.warning('Not all gpus support fp16 training! Will use fp32 instead.')
args.fp32 = True
if args.model_name == "openai-gpt":
args.fp32 = True # openai-gpt currently doesn't support fp16
if not args.fp32:
global MEMORY_FACTOR
MEMORY_FACTOR = dict([k, v*1.4] for k, v in MEMORY_FACTOR.items())
args.memory_sizes = [gpus[device_id].memoryTotal for device_id in args.device_ids]
args.memory_sizes[0] = args.memory_sizes[0] * (1 - 0.04 * (args.n_gpus-1))
for i in range(1, args.n_gpus):
args.memory_sizes[i] = args.memory_sizes[i] * 1.04
if args.train_batch_size <= 0:
args.train_batch_size = [int(memory_size * MEMORY_FACTOR[args.seq_train_type]) for memory_size in args.memory_sizes]
if args.test_batch_size <= 0:
args.test_batch_size = [int(memory_size * MEMORY_FACTOR[args.seq_train_type]) for memory_size in args.memory_sizes]
special_tokens = {"ans_token":'__ans__', "pad_token":'__pad__', "unk_token":'__unk__', "eos_token": '<|endoftext|>'}
if args.use_sep:
special_tokens["sep_token"] = '__sep__'
model_class, tokenizer_class, config_class = MODEL_CLASSES[args.model_name]
tokenizer = tokenizer_class.from_pretrained(args.model_name)
tokenizer.add_tokens(list(special_tokens.values()))
special_token_ids = {k:tokenizer.convert_tokens_to_ids(v) for k,v in special_tokens.items()}
model_config = config_class.from_pretrained(args.model_name)
model_config.vocab_size = len(tokenizer)
tokens_weight = torch.ones([model_config.vocab_size], dtype=torch.float).cuda()
tokens_weight[special_token_ids["ans_token"]] = args.tokens_weight
if args.use_sep:
tokens_weight[special_token_ids["sep_token"]] = args.tokens_weight
args.max_len = model_config.n_positions
data_attrs_path = os.path.join(BASE_DIR,"data_attrs.json")
assert os.path.exists(data_attrs_path)
with open(data_attrs_path, "r") as f:
data_attrs = json.load(f)
if args.seq_train_type == "multitask":
args.n_train_epochs = {'_'.join(args.tasks): args.n_train_epochs}
elif args.unbound:
pass
else:
if "gem" in args.seq_train_type:
args.memory_data = []
if args.dynamic_epochs:
data_sizes = {task: data_attrs[task]["train"]["data_size"] for task in args.tasks}
max_total_data_size = max(data_sizes.values()) * args.n_train_epochs
args.n_train_epochs = {d[0]: min(args.max_n_epochs, max_total_data_size//d[1]) for d in data_sizes.items()}
else:
args.n_train_epochs = {task: args.n_train_epochs for task in args.tasks}
return args, model_config, model_class, tokenizer, config_class, special_token_ids, special_tokens, data_attrs, tokens_weight
class TimeFilter(logging.Filter):
def filter(self, record):
try:
last = self.last
except AttributeError:
last = record.relativeCreated
delta = record.relativeCreated/1000 - last/1000
record.relative = "{:.1f}".format(delta)
record.uptime = str(datetime.timedelta(seconds=record.relativeCreated//1000))
self.last = record.relativeCreated
return True
def init_logging(filename):
logging_format = "%(asctime)s - %(uptime)s - %(relative)ss - %(levelname)s - %(name)s - %(message)s"
logging.basicConfig(format=logging_format, filename=filename, filemode='a', level=logging.INFO)
console_handler = logging.StreamHandler()
console_handler.setFormatter(logging.Formatter(logging_format))
root_logger = logging.getLogger()
root_logger.addHandler(console_handler)
for handler in root_logger.handlers:
handler.addFilter(TimeFilter())
args, MODEL_CONFIG, MODEL_CLASS, TOKENIZER, CONFIG_CLASS, SPECIAL_TOKEN_IDS, SPECIAL_TOKENS, DATA_ATTRS, TOKENS_WEIGHT = parse_args()
TASK_DICT = {
"squad1": {
"train":os.path.join(args.data_dir,"squad-train-v1.1.json"),
"eval":os.path.join(args.data_dir,"squad-dev-v1.1.json"),
"test":os.path.join(args.data_dir,"squad-dev-v1.1.json"),
"n_train_epochs": args.n_train_epochs
},
"squad2": {
"train":os.path.join(args.data_dir,"squad-train-v2.0.json"),
"eval":os.path.join(args.data_dir,"squad-dev-v2.0.json"),
"test":os.path.join(args.data_dir,"squad-dev-v2.0.json"),
"n_train_epochs": args.n_train_epochs
},
"iwslt.en.de": {
"train":os.path.join(args.data_dir,"iwslt.en.de_to_squad-train-v2.0.json"),
"eval":os.path.join(args.data_dir,"iwslt.en.de_to_squad-dev-v2.0.json"),
"test":os.path.join(args.data_dir,"iwslt.en.de_to_squad-test-v2.0.json"),
"n_train_epochs": args.n_train_epochs
},
"cnn_dailymail": {
"train":os.path.join(args.data_dir,"cnn_dailymail_to_squad-train-v2.0.json"),
"eval":os.path.join(args.data_dir,"cnn_dailymail_to_squad-dev-v2.0.json"),
"test":os.path.join(args.data_dir,"cnn_dailymail_to_squad-test-v2.0.json"),
"n_train_epochs": args.n_train_epochs
},
"multinli.in.out": {
"train":os.path.join(args.data_dir,"multinli.in.out_to_squad-train-v2.0.json"),
"eval":os.path.join(args.data_dir,"multinli.in.out_to_squad-dev-v2.0.json"),
"test":os.path.join(args.data_dir,"multinli.in.out_to_squad-dev-v2.0.json"),
"n_train_epochs": args.n_train_epochs
},
"sst": {
"train":os.path.join(args.data_dir,"sst_to_squad-train-v2.0.json"),
"eval":os.path.join(args.data_dir,"sst_to_squad-dev-v2.0.json"),
"test":os.path.join(args.data_dir,"sst_to_squad-test-v2.0.json"),
"n_train_epochs": args.n_train_epochs
},
"srl": {
"train":os.path.join(args.data_dir,"srl_to_squad-train-v2.0.json"),
"eval":os.path.join(args.data_dir,"srl_to_squad-dev-v2.0.json"),
"test":os.path.join(args.data_dir,"srl_to_squad-test-v2.0.json"),
"n_train_epochs": args.n_train_epochs
},
"zre": {
"train":os.path.join(args.data_dir,"zre_to_squad-train-v2.0.json"),
"eval":os.path.join(args.data_dir,"zre_to_squad-dev-v2.0.json"),
"test":os.path.join(args.data_dir,"zre_to_squad-test-v2.0.json"),
"n_train_epochs": args.n_train_epochs
},
"woz.en": {
"train":os.path.join(args.data_dir,"woz.en_to_squad-train-v2.0.json"),
"eval":os.path.join(args.data_dir,"woz.en_to_squad-dev-v2.0.json"),
"test":os.path.join(args.data_dir,"woz.en_to_squad-test-v2.0.json"),
"n_train_epochs": args.n_train_epochs
},
"wikisql": {
"train":os.path.join(args.data_dir,"wikisql_to_squad-train-v2.0.json"),
"eval":os.path.join(args.data_dir,"wikisql_to_squad-dev-v2.0.json"),
"test":os.path.join(args.data_dir,"wikisql_to_squad-test-v2.0.json"),
"n_train_epochs": args.n_train_epochs
},
"schema": {
"train":os.path.join(args.data_dir,"schema_to_squad-train-v2.0.json"),
"eval":os.path.join(args.data_dir,"schema_to_squad-dev-v2.0.json"),
"test":os.path.join(args.data_dir,"schema_to_squad-test-v2.0.json"),
"n_train_epochs": args.n_train_epochs
},
"ag": {
"train":os.path.join(args.data_dir,"ag_to_squad-train-v2.0.json"),
"eval":os.path.join(args.data_dir,"ag_to_squad-test-v2.0.json"),
"test":os.path.join(args.data_dir,"ag_to_squad-test-v2.0.json"),
"n_train_epochs": args.n_train_epochs
},
"dbpedia": {
"train":os.path.join(args.data_dir,"dbpedia_to_squad-train-v2.0.json"),
"eval":os.path.join(args.data_dir,"dbpedia_to_squad-test-v2.0.json"),
"test":os.path.join(args.data_dir,"dbpedia_to_squad-test-v2.0.json"),
"n_train_epochs": args.n_train_epochs
},
"yahoo": {
"train":os.path.join(args.data_dir,"yahoo_to_squad-train-v2.0.json"),
"eval":os.path.join(args.data_dir,"yahoo_to_squad-test-v2.0.json"),
"test":os.path.join(args.data_dir,"yahoo_to_squad-test-v2.0.json"),
"n_train_epochs": args.n_train_epochs
},
"amazon": {
"train":os.path.join(args.data_dir,"amazon_to_squad-train-v2.0.json"),
"eval":os.path.join(args.data_dir,"amazon_to_squad-test-v2.0.json"),
"test":os.path.join(args.data_dir,"amazon_to_squad-test-v2.0.json"),
"n_train_epochs": args.n_train_epochs
},
"yelp": {
"train":os.path.join(args.data_dir,"yelp_to_squad-train-v2.0.json"),
"eval":os.path.join(args.data_dir,"yelp_to_squad-test-v2.0.json"),
"test":os.path.join(args.data_dir,"yelp_to_squad-test-v2.0.json"),
"n_train_epochs": args.n_train_epochs
},
}
| 13,903 | 47.957746 | 133 | py |
LAMOL | LAMOL-master/utils.py | import torch
from torch.utils.data import Dataset, DataLoader, Sampler
import torch.nn.functional as F
import re
import csv
import json
import uuid
import pickle as pkl
import numpy as np
from copy import deepcopy
import os
from glob import glob
import logging
import pathlib
from collections import OrderedDict
from settings import args, TASK_DICT, SPECIAL_TOKENS, SPECIAL_TOKEN_IDS, FILL_VAL
from settings import TOKENIZER, LEN_FACTOR, DATA_ATTRS, MEMORY_FACTOR, MODEL_CONFIG, MODEL_CLASS
from multiprocessing import Pool
import sys
import time
import quadprog
import io
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding="UTF-8")
logger = logging.getLogger(__name__)
def make_dir(d):
pathlib.Path(d).mkdir(parents=True, exist_ok=True)
def get_gen_token(task):
if args.add_task_tokens:
return '__' + task + '__'
else:
return '__gen__'
def get_model_dir(tasks):
return os.path.join(args.model_dir_root, tasks[0]) if args.seq_train_type != "multitask" else args.model_dir_root
def get_losses(parallel_model, cqa, Y, gen_X, gen_Y, loss_fct):
if "lll" in args.seq_train_type:
qa_logits = parallel_model(cqa)
lm_logits = parallel_model(gen_X)
qa_loss = loss_fct([torch.transpose(l, 1, 2) for l in qa_logits], Y)
lm_loss = loss_fct([torch.transpose(l, 1, 2) for l in lm_logits], gen_Y)
return torch.mean(qa_loss), args.lm_lambda * torch.mean(lm_loss)
else:
qa_logits = parallel_model(cqa)
qa_loss = loss_fct([torch.transpose(l, 1, 2) for l in qa_logits], Y)
return torch.mean(qa_loss), torch.tensor(0.)
def pad_to_max_len(l, pad_len, val):
return l + [val] * pad_len
def pad_all_to_max_len(ls, val):
max_len = max(len(l) for l in ls)
return [pad_to_max_len(l, max_len-len(l), val) for l in ls]
def top_k_top_p_filtering(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')):
""" Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
Args:
logits: logits distribution shape (vocabulary size)
top_k > 0: keep only top k tokens with highest probability (top-k filtering).
top_p > 0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
"""
# assert logits.dim() == 1 # batch size 1 for now - could be updated for more but the code would be less clear
top_k = min(top_k, logits.size(-1)) # Safety check
if top_k > 0:
# Remove all tokens with a probability less than the last token of the top-k
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
# if top_p > 0.0:
# sorted_logits, sorted_indices = torch.sort(logits, descending=True)
# cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# # Remove tokens with cumulative probability above the threshold
# sorted_indices_to_remove = cumulative_probs > top_p
# # Shift the indices to the right to keep also the first token above the threshold
# sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
# sorted_indices_to_remove[..., 0] = 0
# indices_to_remove = sorted_indices[sorted_indices_to_remove]
# logits[indices_to_remove] = filter_value
return logits
def varlen_collate_fn(data):
batch_size = (len(data) + args.n_gpus - 1) // args.n_gpus
cqs = torch.tensor(pad_all_to_max_len([datum[0] for datum in data], SPECIAL_TOKEN_IDS["pad_token"])).split(batch_size)
len_cqs = torch.tensor([datum[1] for datum in data]).split(batch_size)
cqas = torch.tensor(pad_all_to_max_len([datum[2] for datum in data], SPECIAL_TOKEN_IDS["pad_token"])).split(batch_size)
len_cqas = torch.tensor([datum[3] for datum in data]).split(batch_size)
Ys = torch.tensor(pad_all_to_max_len([datum[4] for datum in data], FILL_VAL)).split(batch_size)
gen_Xs = torch.tensor(pad_all_to_max_len([datum[5] for datum in data], SPECIAL_TOKEN_IDS["pad_token"])).split(batch_size)
gen_Ys = torch.tensor(pad_all_to_max_len([datum[6] for datum in data], FILL_VAL)).split(batch_size)
return list(cqs), list(len_cqs), list(cqas), list(len_cqas), list(Ys), list(gen_Xs), list(gen_Ys)
def dynamic_collate_fn(data, batch_size):
def local_collate():
null_counter = 0
_cqs, _len_cqs, _cqas, _len_cqas, _Ys, _gen_Xs, _gen_Ys = [], [], [], [], [], [], []
Y_max_len = max(len(data[j][4]) for j in range(st, ed))
cq_max_len = max(len(data[j][0]) for j in range(st, ed))
for j in range(st, ed):
if None in data[j] or [] in data[j]:
null_counter+=1
logger.warning('null example in collate_fn, count: {}'.format(null_counter))
continue
pad_len = cqa_max_len - len(data[j][2])
_cqs.append(pad_to_max_len(data[j][0], cq_max_len-len(data[j][0]), SPECIAL_TOKEN_IDS["pad_token"]))
_len_cqs.append(data[j][1])
_cqas.append(pad_to_max_len(data[j][2], pad_len, SPECIAL_TOKEN_IDS["pad_token"]))
_len_cqas.append(data[j][3])
_Ys.append(pad_to_max_len(data[j][4], Y_max_len - len(data[j][4]), FILL_VAL))
_gen_Xs.append(pad_to_max_len(data[j][5], pad_len, SPECIAL_TOKEN_IDS["pad_token"]))
_gen_Ys.append(pad_to_max_len(data[j][6], pad_len, FILL_VAL))
cqs.append(torch.tensor(_cqs))
len_cqs.append(torch.tensor(_len_cqs))
cqas.append(torch.tensor(_cqas))
len_cqas.append(torch.tensor(_len_cqas))
Ys.append(torch.tensor(_Ys))
gen_Xs.append(torch.tensor(_gen_Xs))
gen_Ys.append(torch.tensor(_gen_Ys))
cqs, len_cqs, cqas, len_cqas, Ys, gen_Xs, gen_Ys = [], [], [], [], [], [], []
cqa_max_len, cnt, st = 0, 0, 0
for ed, datum in enumerate(data):
ln = len(datum[2]) # use cqas to calibrate
if max(cqa_max_len, ln)**LEN_FACTOR * (ed - st + 1) > batch_size[cnt]:
local_collate()
cnt += 1
cqa_max_len = 0
st = ed
cqa_max_len = max(cqa_max_len, ln)
ed += 1 # otherwise ed will be len(data)-1
local_collate()
return cqs, len_cqs, cqas, len_cqas, Ys, gen_Xs, gen_Ys
class QADataset(Dataset):
def __init__(self, data_paths, data_type, gen_token, extra_data=[]):
self.data_type = data_type
self.gen_token = gen_token
if args.use_sep:
self.sep_token = SPECIAL_TOKEN_IDS["sep_token"]
self.ans_token = SPECIAL_TOKEN_IDS["ans_token"]
self.eos_token = SPECIAL_TOKEN_IDS["eos_token"]
self.pad_token = SPECIAL_TOKEN_IDS["pad_token"]
if not isinstance(data_paths, list):
data_paths = [data_paths]
data = []
for data_path in data_paths:
if not data_path:
continue
with open(data_path, "r") as f:
raw_ds = json.load(f)
raw_ds = map(lambda x: x["paragraphs"], raw_ds["data"])
d = []
for raw_d in raw_ds:
d.extend(raw_d)
data += d
self.data = []
self.max_a_len = 0
if len(data_paths)==1 and data_paths[0] is not None and ('wiki' in data_paths[0] or 'woz' in data_paths[0]):
#data = self._sort_by_index(data)
#args.n_workers = 1
if 'wiki' in data_paths[0]:
answers_file = "wikisql_answers.json"
elif 'woz' in data_paths[0]:
answers_file = "woz.en_answers.json"
with open(os.path.join(args.data_dir,answers_file),"r") as f:
self.answers = json.load(f)
if len(data) > 0:
self.data_tokenization(data)
if len(extra_data) > 0:
extra_data = map(lambda x: self.etl_single_extra_data(x), extra_data)
extra_data = list(filter(lambda x:x, extra_data))
if args.gen_lm_sample_percentage > 0. and len(extra_data) == 0:
logger.warning("No good extra data but sample percentage > 0!")
self.data += extra_data
def etl_single_extra_data(self, data):
gen_token = data[0]
data = ' '.join([str(datum) for datum in data[1:]])
try:
if args.use_sep:
context, qa = re.split(str(SPECIAL_TOKEN_IDS["sep_token"]), data)
else:
context = ""
qa = data
question, answer = re.split(str(SPECIAL_TOKEN_IDS["ans_token"]), qa)
context = [int(c) for c in context.strip().split()]
question = [int(q) for q in question.strip().split()]
answer = [int(a) for a in re.sub(str(SPECIAL_TOKEN_IDS["eos_token"]), "", answer).strip().split()]
uid = uuid.uuid1().hex
data = self.parse_example(gen_token, context, question, answer, uid)
except ValueError:
return
return data
def concat_example(self, gen_token, c, sep_token, q, ans_token, a, eos_token):
example = sep_token + q + ans_token + a
if len(example) + 1 > args.max_len:
logger.warning('an example with len {} is too long!'.format(len(example) + 1))
return
example = gen_token + c[:args.max_len-len(example)-1] + example + eos_token
return example
def parse_example(self, gen_token, context, question, answer, idx):
if args.use_sep:
cq_example = self.concat_example([], context, [self.sep_token], question, [self.ans_token], [], [])
cqa_example = self.concat_example([], context, [self.sep_token], question, [self.ans_token], answer, [])
else:
cq_example = self.concat_example([], context, [], question, [self.ans_token], [], [])
cqa_example = self.concat_example([], context, [], question, [self.ans_token], answer, [])
Y_example = self.concat_example([], [], [], [], [], answer, [self.eos_token])
Y_example = [FILL_VAL] * (len(cqa_example) - len(Y_example)) + Y_example
if args.use_sep:
gen_X_example = self.concat_example([gen_token], context, [self.sep_token], question, [self.ans_token], answer, [])
gen_Y_example = self.concat_example([], context, [self.sep_token], question, [self.ans_token], answer, [self.eos_token])
else:
gen_X_example = self.concat_example([gen_token], context, [], question, [self.ans_token], answer, [])
gen_Y_example = self.concat_example([], context, [], question, [self.ans_token], answer, [self.eos_token])
return cq_example, len(cq_example), cqa_example, len(cqa_example), Y_example, gen_X_example, gen_Y_example, idx
def parallel_tokenization(self, d):
examples = []
context = TOKENIZER.encode(d["context"])
max_a_len = 0
for qa in d["qas"]:
question = TOKENIZER.encode(qa["question"])
raw_answers = qa["answers"]
if len(raw_answers) == 0:
assert qa["is_impossible"]
raw_answers.append({"text": ""})
answer = []
for i, raw_answer in enumerate(raw_answers):
answer.extend(TOKENIZER.encode(raw_answer["text"]))
if i != len(raw_answers) - 1:
answer.append(self.pad_token)
max_a_len = max(max_a_len, len(answer))
examples.append(self.parse_example(self.gen_token, context, question, answer, qa.get("id", 0)))
return examples, max_a_len
def data_tokenization(self, data):
if args.debug:
data = data[:10]
new_data = []
for datum in data:
new_data.append(self.parallel_tokenization(datum))
data = new_data
else:
with Pool(args.n_workers) as pool:
data = pool.map(self.parallel_tokenization, data)
for datum, max_a_len in data:
self.data.extend(datum)
self.max_a_len = max(self.max_a_len, max_a_len)
def sort(self):
self.data.sort(key=lambda x: len(x[0]))
return self
def sort_by_index(self):
self.data.sort(key=lambda x: x[-1])
def get_indices(self):
return [d[-1] for d in self.data]
#def _sort_by_index(self,data):
# datum = []
# for d in data:
# for qa in d["qas"]:
# datum.append({"context":d["context"], "qas":[qa]})
# datum.sort(key=lambda x:x["qas"][0]["id"])
# return datum
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.data[index]
class EarlyStopping:
def __init__(self, logger, patience=7, verbose=False):
self.patience = patience
self.verbose = verbose
self.counter = 0
self.best_score = None
self.early_stop = False
self.val_loss_min = np.Inf
self.logger = logger
def __call__(self, val_loss, model, model_dir):
score = -val_loss
if self.best_score is None:
self.best_score = score
self.save_checkpoint(val_loss, model, model_dir)
elif score < self.best_score:
self.counter += 1
self.logger.info(f'EarlyStopping counter: {self.counter} out of {self.patience}')
if self.counter >= self.patience:
self.early_stop = True
else:
self.best_score = score
self.save_checkpoint(val_loss, model, model_dir)
self.counter = 0
def save_checkpoint(self, val_loss, model, model_dir):
'''Saves model when validation loss decrease.'''
if self.verbose:
self.logger.info(f'Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}). Saving model ...')
model.save_pretrained(model_dir)
TOKENIZER.save_pretrained(model_dir)
self.val_loss_min = val_loss
class TrainStep:
def __init__(self, model, optimizer, scheduler):
self.model = model
self.optimizer = optimizer
self.scheduler = scheduler
def __call__(self, loss, scheduler_steps):
if not args.fp32:
self.optimizer.backward(loss, update_master_grads=False)
else:
loss.backward()
if not args.fp32:
self.optimizer.update_master_grads()
self.optimizer.clip_master_grads(args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(self.model.parameters(), args.max_grad_norm)
if "gem" in args.seq_train_type and self.model.task_id >0:
store_grad(self.model.parameters, self.model.grads, self.model.grad_dims,self.model.task_id)
indx = torch.cuda.LongTensor([i for i in range(self.model.task_id)])
dotp = torch.mm(self.model.grads[:, self.model.task_id].unsqueeze(0),
self.model.grads.index_select(1, indx))
if (dotp < 0).sum() != 0:
project2cone2(self.model.grads[:, self.model.task_id].unsqueeze(1),
self.model.grads.index_select(1, indx), args.qp_margin)
# copy gradients back
overwrite_grad(self.model.parameters,
self.model.grads[:, self.model.task_id],
self.model.grad_dims)
if args.seq_train_type in args.REG_TYPE_KEYS:
self.optimizer.step(self.model.reg_params)
else:
self.optimizer.step()
if args.fp32 or (not self.optimizer.overflow):
for i in range(scheduler_steps):
self.scheduler.step()
self.optimizer.zero_grad()
class GEMStep:
def __init__(self, model, parallel_model, train_loss_fct, optimizer):
self.model = model
self.parallel_model = parallel_model
self.train_loss_fct = train_loss_fct
self.optimizer = optimizer
def __call__(self,current_task_id):
for past_task_id, md in enumerate(args.memory_data):
# Not saving current task's grads.
if past_task_id >= current_task_id: return
qadata = QADataset(None, "test", "gen", md)
dataloader = create_dataloader(qadata, "test")
grads_tmp = torch.zeros(sum(self.model.grad_dims),).cuda()
if not args.fp32:
grads_tmp = grads_tmp.half()
for _, _, cqa, _, Y, gen_X, gen_Y in dataloader:
#CHECK
n_inputs = sum(_cqa.shape[0] for _cqa in cqa)
self.optimizer.zero_grad()
for i in range(len(cqa)):
cqa[i] = (cqa[i].to(args.device_ids[i]),)
Y[i] = Y[i].to(args.device_ids[i])
gen_X[i] = (gen_X[i].to(args.device_ids[i]),)
gen_Y[i] = gen_Y[i].to(args.device_ids[i])
losses = get_losses(self.parallel_model, cqa, Y, gen_X, gen_Y, self.train_loss_fct)
loss = sum(losses)
if not args.fp32:
self.optimizer.backward(loss, update_master_grads=False)
else:
loss.backward()
if not args.fp32:
#copy fp16 grads to fp32 grads
self.optimizer.update_master_grads()
self.optimizer.clip_master_grads(args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(self.model.parameters(), args.max_grad_norm)
i = 0
for param in self.model.parameters():
if param.grad is not None:
beg = 0 if i == 0 else sum(self.model.grad_dims[:i])
end = sum(self.model.grad_dims[:i+1])
grads_tmp[beg: end] += param.grad.data.view(-1)*n_inputs
i += 1
grads_tmp /= len(qadata)
self.model.grads[:, past_task_id].copy_(grads_tmp)
self.optimizer.zero_grad()
class DynamicBatchSampler(Sampler):
def __init__(self, dataset, data_type, max_batch_size):
self.dataset = dataset
self.data_type = data_type
if data_type == "train":
self.batch_size = args.train_batch_size
else:
self.batch_size = args.test_batch_size
self.n_samples = len(dataset)
self.max_batch_size = max_batch_size
def __iter__(self):
if args.debug or self.data_type == "test":
indices = range(self.n_samples)
else:
indices = np.random.permutation(self.n_samples)
max_len, cnt, st = 0, 0, 0
batch = []
for ed, idx in enumerate(indices):
ln = len(self.dataset[idx][2])
if max(max_len, ln)**LEN_FACTOR * (ed - st + 1) > self.batch_size[cnt]:
st = ed
cnt += 1
max_len = 0
if cnt == args.n_gpus:
yield batch
cnt = 0
batch = []
max_len = max(max_len, ln)
batch.append(idx)
if len(batch) == self.max_batch_size and self.data_type == "train":
yield batch
cnt, max_len, st = 0, 0, ed
batch = []
if len(batch) > 0:
yield batch
def __len__(self):
raise NotImplementedError
def create_dataloader(dataset, data_type, max_batch_size=1000000000):
if data_type == "train":
batch_size = args.train_batch_size
else:
batch_size = args.test_batch_size
if isinstance(batch_size, list):
collate_fn=lambda x,bs=batch_size: dynamic_collate_fn(x, bs)
shuffle = False
batch_size = 1
batch_sampler = DynamicBatchSampler(dataset, data_type, max_batch_size)
else:
collate_fn=lambda x: varlen_collate_fn(x)
shuffle = not (data_type != "train" or args.debug)
batch_sampler = None
dataloader = DataLoader(dataset, num_workers=args.n_workers,
collate_fn=collate_fn,
shuffle=shuffle,
batch_size=batch_size,
batch_sampler=batch_sampler)
return dataloader
class WrapModel(torch.nn.Module):
def __init__(self, model):
super(WrapModel, self).__init__()
self.model = model
def forward(self, input_ids):
outputs = self.model(input_ids)
return outputs[0]
def remove_id(idx, need_process, all_pasts):
assert idx in need_process
del need_process[idx]
for layer_id in range(MODEL_CONFIG.n_layer):
all_pasts[layer_id][idx] = 0
def sample_sequence(model, need_process, qa_results, all_pasts, max_tot_lens):
while len(need_process) > 0:
first_id = next(iter(need_process))
shortest_len = len(qa_results[first_id])
decode_batch_size = int(args.memory_sizes[0] * MEMORY_FACTOR[args.seq_train_type] // (shortest_len+1)**LEN_FACTOR)
it = iter(need_process)
stop = False
remove_ids = []
while not stop:
batch_ids, input_ids, past = [], [], [[] for _ in range(MODEL_CONFIG.n_layer)]
while True:
try:
cur_id = next(it)
if len(qa_results[cur_id]) > shortest_len:
stop = True
break
batch_ids.append(cur_id)
if args.model_name == "gpt2":
input_ids.append(qa_results[cur_id][-1:])
for layer_id in range(MODEL_CONFIG.n_layer):
past[layer_id].append(all_pasts[layer_id][cur_id])
else:
input_ids.append(qa_results[cur_id])
if len(input_ids) == decode_batch_size:
break
except StopIteration:
stop = True
break
n_inputs = len(input_ids)
if n_inputs == 0:
break
input_ids = torch.stack(input_ids)
if args.model_name == "gpt2":
for layer_id in range(MODEL_CONFIG.n_layer):
past[layer_id] = torch.stack(past[layer_id], dim=1)
all_outputs = model(input_ids=input_ids.cuda(), past=past)
else:
all_outputs = model(input_ids=input_ids.cuda())
outputs = all_outputs[0]
if args.model_name == "gpt2":
pasts = all_outputs[1]
next_logits = outputs[..., -1, :] / args.temperature_qa
next_tokens = logits_to_tokens(next_logits).cpu()
for i, cur_id in enumerate(batch_ids):
if next_tokens[i] == SPECIAL_TOKEN_IDS["eos_token"]:
remove_ids.append(cur_id)
else:
qa_results[cur_id] = torch.cat((qa_results[cur_id], next_tokens[i]))
if len(qa_results[cur_id]) in [max_tot_lens[cur_id], args.max_len]:
remove_ids.append(cur_id)
elif args.model_name == "gpt2":
for layer_id in range(MODEL_CONFIG.n_layer):
all_pasts[layer_id][cur_id] = pasts[layer_id][:, i].type(torch.float if args.fp32 else torch.half)
for idx in remove_ids:
remove_id(idx, need_process, all_pasts)
def write_extra_data(dump_path, qa_results):
logger.info(f"writing extra data in {dump_path} ...")
with open(dump_path,"w",newline="",encoding="utf-8") as f:
lm_writer = csv.writer(f,delimiter=',')
lm_writer.writerow(["gen"])
for l in qa_results:
lm_writer.writerow([l])
def parse_single_real_data(data,task):
c = data["paragraphs"][0]["context"]
q = data["paragraphs"][0]["qas"][0]["question"]
a = data["paragraphs"][0]["qas"][0]["answers"][0]["text"]
if args.use_sep:
data = "{}{}{}{}{}{}{}".format(SPECIAL_TOKENS[task],c,SPECIAL_TOKENS["sep_token"],q,SPECIAL_TOKENS["ans_token"],a,SPECIAL_TOKENS["eos_token"])
else:
data = "{}{} {}{}{}{}".format(SPECIAL_TOKENS[task],c,q,SPECIAL_TOKENS["ans_token"],a,SPECIAL_TOKENS["eos_token"])
return data
def get_real_data(task, train_extra_data, accum=True, encode=True):
task_idx = args.tasks.index(task)
gen_size = DATA_ATTRS[task]["train"]["data_size"]
if accum:
prev_tasks = args.tasks[:task_idx]
gen_size = int(np.ceil(gen_size * args.gen_lm_sample_percentage))//len(prev_tasks)
else:
prev_tasks = [args.tasks[task_idx-1]]
gen_size = int(gen_size * args.gen_lm_sample_percentage)
datum = []
for prev_task in prev_tasks:
with open(TASK_DICT[prev_task]["train"],"r") as f:
data = data_expand(json.load(f)["data"])
indices = np.random.choice(range(len(data)), gen_size)
for i in indices:
d = parse_single_real_data(data[i],prev_task)
datum.append(d)
if encode:
train_extra_data.append(TOKENIZER.encode(d))
model_dir = get_model_dir([prev_task])
dump_path = os.path.join(model_dir,"real.csv")
write_extra_data(dump_path, datum)
return dump_path
def read_extra_data(gen_path, train_extra_data):
with open(gen_path,"r") as lm_file:
reader = csv.reader(lm_file,delimiter=',')
next(reader)
for row in reader:
row = TOKENIZER.encode(row[0].strip())
train_extra_data.append(row)
def create_extra_data(task, prev_task, model, train_extra_data):
if args.real_sample:
logger.info(f"using real data as extra data")
return get_real_data(task, train_extra_data)
task_cnt = args.tasks.index(task)
model_dir = get_model_dir([prev_task])
gen_path = os.path.join(model_dir,"lm.csv")
if os.path.exists(gen_path):
logger.info(f"extra data exists in {gen_path}, read it!")
return read_extra_data(gen_path, train_extra_data)
gen_size = DATA_ATTRS[task]["train"]["data_size"]
gen_size = int(np.ceil(gen_size * args.gen_lm_sample_percentage))
gen_size -= (gen_size % task_cnt)
if args.debug:
gen_size = task_cnt
model.eval()
need_process = OrderedDict()
qa_results = []
for task_name in args.tasks[:task_cnt]:
qa_results.extend([torch.tensor([SPECIAL_TOKEN_IDS[task_name]]) for _ in range(gen_size//task_cnt)])
all_pasts = [[
torch.empty(2, MODEL_CONFIG.n_head, 0, MODEL_CONFIG.n_embd//MODEL_CONFIG.n_head,
dtype=torch.float if args.fp32 else torch.half).cuda()
for _ in range(gen_size)
] for __ in range(MODEL_CONFIG.n_layer)]
max_tot_lens = [args.max_len for _ in range(gen_size)]
for i in range(gen_size):
need_process.update([[i, None]])
if len(need_process) > int(args.memory_sizes[0] * 0.12):
sample_sequence(model, need_process, qa_results, all_pasts, max_tot_lens)
sample_sequence(model, need_process, qa_results, all_pasts, max_tot_lens)
model.train()
qa_results = [res.tolist() for res in qa_results]
train_extra_data.extend(qa_results)
qa_results = [TOKENIZER.decode(res) for res in qa_results]
write_extra_data(gen_path, qa_results)
def logits_to_tokens(next_logits):
filtered_logits = top_k_top_p_filtering(next_logits, top_k=args.top_k_qa, top_p=args.top_p_qa)
log_probs = F.softmax(filtered_logits, dim=-1)
next_tokens = torch.multinomial(log_probs, num_samples=1)
return next_tokens
def lll_unbound_setting(split_size=10,data_type="train",test_target="self"):
data_dir = os.path.join(args.data_dir,"{}_{}".format("_".join(args.tasks),args.gen_lm_sample_percentage))
if data_type == "test":
args.splitted_tasks = [f"task_{i}" for i in range(split_size)]
args.n_train_epochs = {task: args.n_train_epochs for task in args.splitted_tasks}
if test_target in ["self","all"]:
for no in range(split_size):
task = f"task_{no}"
test_data_path = os.path.join(data_dir,f"{task}-test.json")
TASK_DICT[task] = {}
TASK_DICT[task]["test"] = test_data_path
if test_target == "all":
args.tasks += args.splitted_tasks
else:
args.tasks = args.splitted_tasks
elif data_type == "train":
create_lll_unbound_data(split_size)
args.n_train_epochs = {task: args.n_train_epochs for task in args.tasks}
return TASK_DICT
def create_lll_unbound_data(split_size=10):
data_dir = os.path.join(args.data_dir,"{}_{}".format("_".join(args.tasks),args.gen_lm_sample_percentage))
pathlib.Path(data_dir).mkdir(parents=True, exist_ok=True)
datum = []
test_datum = []
data_sizes = []
chunk_sizes = []
for task in args.tasks:
train_data_path = TASK_DICT[task]["train"]
with open(train_data_path, "r") as f:
data = json.load(f)["data"]
data = data_expand(data)
data_sizes.append(len(data))
datum += data
test_data_path = TASK_DICT[task]["test"]
with open(test_data_path, "r") as f:
data = json.load(f)["data"]
data = data_expand(data)
test_datum.append(data)
chunk_size = int(np.ceil(len(datum)/split_size))
tasks = []
for no, i in enumerate(range(0, len(datum), chunk_size)):
task = f"task_{no}"
tasks.append(task)
chunk = datum[i:i + chunk_size] if i < len(datum)-chunk_size else datum[i:]
chunk_sizes.append(len(chunk))
DATA_ATTRS[task] = {"train":{"data_size":None}}
DATA_ATTRS[task]["train"]["data_size"] = len(chunk)
train_data_path = os.path.join(data_dir,f"{task}-train.json")
with open(train_data_path,"w") as f:
json.dump({"data":chunk},f)
TASK_DICT[task] = {}
TASK_DICT[task]["train"] = train_data_path
args.tasks = tasks
sis = get_split_indices(data_sizes,chunk_sizes)
test_split = []
for dic in sis.values():
merged_data = []
for k,v in dic.items():
from_index = int(len(test_datum[k])*v[0])
to_index = int(len(test_datum[k])*v[1])
merged_data+= test_datum[k][from_index:to_index]
test_split.append(merged_data)
for no, chunk in enumerate(test_split):
task = f"task_{no}"
test_data_path = os.path.join(data_dir,f"{task}-test.json")
with open(test_data_path,"w") as f:
json.dump({"data":chunk},f)
TASK_DICT[task]["test"] = test_data_path
def data_expand(data):
datum = []
for d in data:
para = d["paragraphs"]
for p in para:
for qa in p["qas"]:
d = {"context": p["context"], "qas": [qa]}
datum.append({"paragraphs":[d]})
return datum
def get_split_indices(data_sizes,chunk_sizes):
ds = deepcopy(data_sizes)
records = {}
tmp = {}
order = 0 # data_sizes index
i = 0 # chunk_sizes index
while len(data_sizes)>0:
d0 = data_sizes[0]
c0 = chunk_sizes[0]
if d0>c0:
val = c0/ds[order]
else:
val = d0/ds[order]
if order not in tmp:
rec = (0,val)
tmp[order] = val
else:
rec = (tmp[order],tmp[order]+val)
tmp[order] += val
if i in records:
records[i][order] = rec
else:
records[i] = {order: rec}
if d0>c0:
data_sizes[0]-=c0
chunk_sizes.pop(0)
i+=1
else:
chunk_sizes[0]-=d0
data_sizes.pop(0)
order+=1
if d0==c0:
chunk_sizes.pop(0)
i+=1
return records
def store_grad(get_ps, grads, grad_dims, task_id):
i = 0
for param in get_ps():
if param.grad is not None:
beg = 0 if i == 0 else sum(grad_dims[:i])
end = sum(grad_dims[:i+1])
grads[beg: end, task_id].copy_(param.grad.data.view(-1))
i += 1
def overwrite_grad(pp, newgrad, grad_dims):
cnt = 0
for param in pp():
if param.grad is not None:
beg = 0 if cnt == 0 else sum(grad_dims[:cnt])
en = sum(grad_dims[:cnt + 1])
this_grad = newgrad[beg: en].contiguous().view(
param.grad.data.size())
param.grad.data.copy_(this_grad)
cnt += 1
def project2cone2(gradient, memories, margin=0.5, eps=1e-3):
memories_np = memories.cpu().t().double().numpy()
gradient_np = gradient.cpu().contiguous().view(-1).double().numpy()
t = memories_np.shape[0]
P = np.dot(memories_np, memories_np.transpose())
P = 0.5 * (P + P.transpose()) + np.eye(t) * eps
q = np.dot(memories_np, gradient_np) * -1
G = np.eye(t)
h = np.zeros(t) + margin
v = quadprog.solve_qp(P, q, G, h)[0]
x = np.dot(v, memories_np) + gradient_np
gradient.copy_(torch.Tensor(x).view(-1, 1))
| 33,442 | 38.577515 | 150 | py |
LAMOL | LAMOL-master/scheduler.py | # coding=utf-8
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch DataLoader for TFRecords"""
import torch
from torch.optim.lr_scheduler import _LRScheduler
import math
class AnnealingLR(_LRScheduler):
"""Anneals the learning rate from start to zero along a cosine curve."""
DECAY_STYLES = ['linear', 'cos', 'exp', 'const', 'None']
def __init__(self, optimizer, start_lr, warmup_iter, num_iters, decay_style=None, last_iter=-1):
self.optimizer = optimizer
self.start_lr = start_lr
self.warmup_iter = warmup_iter
self.num_iters = last_iter + 1
self.end_iter = num_iters
self.decay_style = decay_style.lower() if isinstance(decay_style, str) else None
self.step(self.num_iters)
def get_lr(self):
# https://openreview.net/pdf?id=BJYwwY9ll pg. 4
if self.warmup_iter > 0 and self.num_iters <= self.warmup_iter:
return float(self.start_lr) * self.num_iters / self.warmup_iter
else:
if self.decay_style == self.DECAY_STYLES[0]:
return self.start_lr*((self.end_iter-(self.num_iters-self.warmup_iter))/self.end_iter)
elif self.decay_style == self.DECAY_STYLES[1]:
return self.start_lr / 2.0 * (math.cos(math.pi * (self.num_iters - self.warmup_iter) / self.end_iter) + 1)
elif self.decay_style == self.DECAY_STYLES[2]:
raise NotImplementedError("Exponential decay not yet implemented")
else:
return self.start_lr
def step(self, step_num=None):
if step_num is None:
step_num = self.num_iters + 1
self.num_iters = step_num
new_lr = self.get_lr()
for group in self.optimizer.param_groups:
group['lr'] = new_lr
def state_dict(self):
sd = {
'start_lr': self.start_lr,
'warmup_iter': self.warmup_iter,
'num_iters': self.num_iters,
'decay_style': self.decay_style,
'end_iter': self.end_iter
}
return sd
def load_state_dict(self, sd):
self.start_lr = sd['start_lr']
self.warmup_iter = sd['warmup_iter']
self.num_iters = sd['num_iters']
self.end_iter = sd['end_iter']
self.decay_style = sd['decay_style']
self.step(self.num_iters)
| 2,930 | 38.608108 | 122 | py |
LAMOL | LAMOL-master/fp16util.py | # coding=utf-8
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
# import mpu
class tofp16(nn.Module):
"""
Utility module that implements::
def forward(self, input):
return input.half()
"""
def __init__(self):
super(tofp16, self).__init__()
def forward(self, input):
return input.half()
def BN_convert_float(module):
"""
Utility function for network_to_half().
Retained for legacy purposes.
"""
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm) and module.affine is True:
module.float()
for child in module.children():
BN_convert_float(child)
return module
def network_to_half(network):
"""
Convert model to half precision in a batchnorm-safe way.
Retained for legacy purposes. It is recommended to use FP16Model.
"""
return nn.Sequential(tofp16(), BN_convert_float(network.half()))
def convert_module(module, dtype):
"""
Converts a module's immediate parameters and buffers to dtype.
"""
for param in module.parameters(recurse=False):
if param is not None:
if param.data.dtype.is_floating_point:
param.data = param.data.to(dtype=dtype)
if param._grad is not None and param._grad.data.dtype.is_floating_point:
param._grad.data = param._grad.data.to(dtype=dtype)
for buf in module.buffers(recurse=False):
if buf is not None and buf.data.dtype.is_floating_point:
buf.data = buf.data.to(dtype=dtype)
def convert_network(network, dtype):
"""
Converts a network's parameters and buffers to dtype.
"""
for module in network.modules():
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm) and module.affine is True:
continue
convert_module(module, dtype)
return network
class FP16Model(nn.Module):
"""
Convert model to half precision in a batchnorm-safe way.
"""
def __init__(self, network):
super(FP16Model, self).__init__()
self.network = convert_network(network, dtype=torch.half)
def forward(self, *inputs):
inputs = tuple(t.half() for t in inputs)
return self.network(*inputs)
def backwards_debug_hook(grad):
raise RuntimeError("master_params recieved a gradient in the backward pass!")
def prep_param_lists(model, flat_master=False):
"""
Creates a list of FP32 master parameters for a given model, as in
`Training Neural Networks with Mixed Precision: Real Examples`_.
Args:
model (torch.nn.Module): Existing Pytorch model
flat_master (bool, optional, default=False): Flatten the master parameters into a single tensor, as a performance optimization.
Returns:
A tuple (``model_params``, ``master_params``). ``model_params`` is a list of the model's parameters for later use with :func:`model_grads_to_master_grads` and :func:`master_params_to_model_params`. ``master_params`` is a list of FP32 master gradients. If ``flat_master=True``, ``master_params`` will be a list with one element.
Example::
model_params, master_params = prep_param_lists(model)
.. warning::
Currently, if ``flat_master=True``, all the model's parameters must be the same type. If the model has parameters of different types, use ``flat_master=False``, or use :class:`FP16_Optimizer`.
.. _`Training Neural Networks with Mixed Precision: Real Examples`:
http://on-demand.gputechconf.com/gtc/2018/video/S81012/
"""
model_params = [param for param in model.parameters() if param.requires_grad]
if flat_master:
# Give the user some more useful error messages
try:
# flatten_dense_tensors returns a contiguous flat array.
# http://pytorch.org/docs/master/_modules/torch/_utils.html
master_params = _flatten_dense_tensors([param.data for param in model_params]).float()
except:
print("Error in prep_param_lists: model may contain a mixture of parameters "
"of different types. Use flat_master=False, or use F16_Optimizer.")
raise
master_params = torch.nn.Parameter(master_params)
master_params.requires_grad = True
# master_params.register_hook(backwards_debug_hook)
if master_params.grad is None:
master_params.grad = master_params.new(*master_params.size())
return model_params, [master_params]
else:
master_params = [param.clone().float().detach() for param in model_params]
for param in master_params:
param.requires_grad = True
return model_params, master_params
def model_grads_to_master_grads(model_params, master_params, flat_master=False):
"""
Copy model gradients to master gradients.
Args:
model_params: List of model parameters created by :func:`prep_param_lists`.
master_params: List of FP32 master parameters created by :func:`prep_param_lists`. If ``master_params`` was created with ``flat_master=True``, ``flat_master=True`` should also be supplied to :func:`model_grads_to_master_grads`.
"""
if flat_master:
# The flattening may incur one more deep copy than is necessary.
master_params[0].grad.data.copy_(
_flatten_dense_tensors([p.grad.data for p in model_params]))
else:
for model, master in zip(model_params, master_params):
if model.grad is not None:
if master.grad is None:
master.grad = Variable(master.data.new(*master.data.size()))
master.grad.data.copy_(model.grad.data)
else:
master.grad = None
def master_params_to_model_params(model_params, master_params, flat_master=False):
"""
Copy master parameters to model parameters.
Args:
model_params: List of model parameters created by :func:`prep_param_lists`.
master_params: List of FP32 master parameters created by :func:`prep_param_lists`. If ``master_params`` was created with ``flat_master=True``, ``flat_master=True`` should also be supplied to :func:`master_params_to_model_params`.
"""
if flat_master:
for model, master in zip(model_params,
_unflatten_dense_tensors(master_params[0].data, model_params)):
model.data.copy_(master)
else:
for model, master in zip(model_params, master_params):
model.data.copy_(master.data)
# Backward compatibility fixes
def to_python_float(t):
if hasattr(t, 'item'):
return t.item()
else:
return t[0]
TORCH_MAJOR = int(torch.__version__.split('.')[0])
TORCH_MINOR = int(torch.__version__.split('.')[1])
# clip_grad_norm = mpu.clip_grad_norm
#elif TORCH_MAJOR == 0 and TORCH_MINOR <= 4:
# clip_grad_norm = torch.nn.utils.clip_grad_norm
#else:
clip_grad_norm = torch.nn.utils.clip_grad_norm_
| 7,680 | 36.468293 | 337 | py |
LAMOL | LAMOL-master/regularizers.py | import abc
import math
import torch
from torch.optim import Optimizer, SGD
from settings import args, FILL_VAL, TOKENS_WEIGHT
from utils import get_losses, get_model_dir
from parallel import DataParallelCriterion
from torch.nn import CrossEntropyLoss, MSELoss
import pickle as pkl
import os
from torch.nn.functional import softmax
class Regularizer(abc.ABC):
def __init__(self, model, parallel_model, dataloaders, task, prev_task=None):
self.model = model
self.parallel_model = parallel_model
self.dataloaders = dataloaders
self.task = task
self.prev_task = prev_task
@abc.abstractmethod
def task_start_do(self):
return NotImplemented
@abc.abstractmethod
def task_end_do(self):
return NotImplemented
def save_reg_params(self):
model_dir = get_model_dir([self.task])
reg_params_path = os.path.join(model_dir, "reg_params.pkl")
with open(reg_params_path, 'wb') as f:
pkl.dump(self.model.reg_params,f)
def load_reg_params(self):
if self.prev_task:
model_dir = get_model_dir([self.prev_task])
reg_params_path = os.path.join(model_dir, "reg_params.pkl")
with open(reg_params_path, 'rb') as f:
self.model.reg_params = pkl.load(f)
input()
class MAS(Regularizer):
def task_start_do(self,freeze_layers=[]):
#self.load_reg_params()
task_start_do(self.model, freeze_layers)
def task_end_do(self):
updater = Omega_update(self.model.parameters(), lr=0.0001, momentum=0.9)
compute_importance(self.model, self.parallel_model, updater, self.dataloaders)
accumulate_reg_params(self.model)
self.save_reg_params()
class EWC(Regularizer):
def task_start_do(self,freeze_layers=[]):
#self.load_reg_params()
task_start_do(self.model, freeze_layers)
def task_end_do(self):
updater = Omega_update(self.model.parameters(), lr=0.0001, momentum=0.9)
compute_importance(self.model, self.parallel_model, updater, self.dataloaders, loss_type="ewc")
accumulate_reg_params(self.model)
self.save_reg_params()
REG_TYPES = {
"mas": MAS,
"ewc": EWC,
}
args.REG_TYPE_KEYS = REG_TYPE_KEYS = list(REG_TYPES.keys())
def task_start_do(model, freeze_layers=[]):
if not hasattr(model,"reg_params"):
initialize_reg_params(model,freeze_layers)
else:
clean_omega_sum(model,freeze_layers)
def initialize_reg_params(model,freeze_layers=[]):
"""initialize an omega for each parameter to zero"""
reg_params={}
for name, param in model.named_parameters():
if not name in freeze_layers:
# print('initializing param',name)
omega=torch.FloatTensor(param.size()).zero_()
omega=omega.cuda()
init_val=param.data.clone()
init_val=init_val.cuda()
reg_param={}
reg_param['omega'] = omega
reg_param['omega_sum'] = omega
#initialize the initial value to that before starting training
reg_param['init_val'] = init_val
reg_params[param]=reg_param
if 'data_count' not in reg_params:
reg_params['data_count'] = 0
reg_params['lambda'] = args.reg_lambda
model.reg_params = reg_params
def clean_omega_sum(model,freeze_layers=[]):
for name, param in model.named_parameters():
if not name in freeze_layers:
omega=torch.FloatTensor(param.size()).zero_()
omega=omega.cuda()
reg_param = model.reg_params.get(param)
reg_param['omega_sum'] = omega
model.reg_params[param]=reg_param
model.reg_params['data_count'] = 0
class Weight_Regularized_AdamW(Optimizer):
""" Implements Adam algorithm with weight decay fix.
Parameters:
lr (float): learning rate. Default 1e-3.
betas (tuple of 2 floats): Adams beta parameters (b1, b2). Default: (0.9, 0.999)
eps (float): Adams epsilon. Default: 1e-6
weight_decay (float): Weight decay. Default: 0.0
correct_bias (bool): can be set to False to avoid correcting bias in Adam (e.g. like in Bert TF repository). Default True.
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-6, weight_decay=0.0, correct_bias=True):
if lr < 0.0:
raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter: {} - should be in [0.0, 1.0[".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter: {} - should be in [0.0, 1.0[".format(betas[1]))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(eps))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay,
correct_bias=correct_bias)
super(Weight_Regularized_AdamW, self).__init__(params, defaults)
def step(self, reg_params, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
reg_lambda=reg_params.get('lambda')
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
# Decay the first and second moment running average coefficient
# In-place operations to update the averages at the same time
exp_avg.mul_(beta1).add_(1.0 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1.0 - beta2, grad, grad)
denom = exp_avg_sq.sqrt().add_(group['eps'])
step_size = group['lr']
if group['correct_bias']: # No bias correction for Bert
bias_correction1 = 1.0 - beta1 ** state['step']
bias_correction2 = 1.0 - beta2 ** state['step']
step_size = step_size * math.sqrt(bias_correction2) / bias_correction1
p.data.addcdiv_(-step_size, exp_avg, denom)
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want to decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
# Add weight decay at the end (fixed version)
#Regularize PART CODE GOES HERE
if p in reg_params:
reg_param=reg_params.get(p)
#get omega for this parameter
omega=reg_param.get('omega')
#initial value when the training start
init_val=reg_param.get('init_val')
curr_weight_val=p.data
#get the difference
weight_dif=curr_weight_val.add(-1,init_val)
#compute the MAS penalty
regulizer=weight_dif.mul(2*reg_lambda*omega)
del weight_dif
del curr_weight_val
del omega
del init_val
#add the MAS regulizer to the gradient
# grad.add_(regulizer)
p.data.add_(-group['lr'], regulizer)
del regulizer
#Regularize PART CODE ENDS
if group['weight_decay'] > 0.0:
p.data.add_(-group['lr'] * group['weight_decay'], p.data)
return loss
# update omega for one task; use in compute_importance
class Omega_update(SGD):
"""
Update the paramerter importance using the gradient of the function output norm. To be used at deployment time.
reg_params:parameters omega to be updated
batch_index,batch_size:used to keep a running average over the seen samples
"""
def __init__(self, params, lr=0.001, momentum=0, dampening=0, weight_decay=0, nesterov=False):
super(Omega_update, self).__init__(params,lr,momentum,dampening,weight_decay,nesterov)
def __setstate__(self, state):
super(Omega_update, self).__setstate__(state)
def step(self, reg_params, batch_size, closure=None):
"""
Performs a single parameters importance update setp
"""
#print('************************DOING A STEP************************')
reg_params['data_count'] += batch_size
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
#if the parameter has an omega to be updated
for p in group['params']:
#print('************************ONE PARAM************************')
if p.grad is None:
continue
if p in reg_params:
#HERE MAS IMPOERANCE UPDATE GOES
#get the gradient
unreg_dp = p.grad.data.clone()
reg_param = reg_params.get(p)
#get parameter omega
omega = reg_param.get('omega_sum')
if args.seq_train_type == "ewc":
omega = omega.add((unreg_dp)**2)
else:
omega = omega.add(unreg_dp.abs_())
reg_param['omega_sum'] = omega
reg_params[p] = reg_param
#HERE MAS IMPOERANCE UPDATE ENDS
return loss#HAS NOTHING TO DO
# update omega for one task
def compute_importance(model, parallel_model, updater, dataloaders, loss_type="l2"):
"""Mimic the depoloyment setup where the model is applied on some samples and those are used to update the importance params
Uses the L2norm of the function output. This is what we MAS uses as default
"""
# model.eval() # Set model to training mode so we get the gradient
# train_loss_fct = DataParallelCriterion(CrossEntropyLoss(ignore_index=FILL_VAL), args.device_ids)
softmax = torch.nn.Softmax(dim=-1)
if loss_type == "l2":
loss_fct = DataParallelCriterion(torch.nn.MSELoss(reduction='mean'), args.device_ids)
elif loss_type == "l1":
loss_fct = DataParallelCriterion(torch.nn.L1Loss(reduction='mean'), args.device_ids)
elif loss_type == "ewc":
CELoss = CrossEntropyLoss(ignore_index=FILL_VAL, reduction='mean', weight=TOKEN_WEIGHT)
loss_fct = DataParallelCriterion(CELoss, args.device_ids)
# Iterate over data.
for dataloader in dataloaders:
for cq, len_cq, cqa, len_cqa, Y, _, _ in dataloader:
# get the inputs
n_inputs = sum(len(_cq) for _cq in cq)
for i in range(len(cqa)):
cq[i] = (cq[i].to(args.device_ids[i]),)
len_cq[i] = len_cq[i].to(args.device_ids[i])
cqa[i] = (cqa[i].to(args.device_ids[i]),)
len_cqa[i] = len_cqa[i].to(args.device_ids[i])
Y[i] = Y[i].to(args.device_ids[i])
# zero the parameter gradients
updater.zero_grad()
# forward
if loss_type != "ewc":
logits = parallel_model(cq)
logits = [logit[range(len(logit)), len_cq[i]-1, :] for i, logit in enumerate(logits)]
#logits = [softmax(logit, dim=-1) for logit in logits]
target_zeros = [torch.zeros(logit.size()).to(args.device_ids[i]) for i, logit in enumerate(logits)]
logits = [softmax(logit) for logit in logits]
if loss_type == "l2":
targets = loss_fct(logits, target_zeros)
elif loss_type == "l1":
targets = loss_fct(logits, target_zeros)
else:
targets, _ = get_losses(parallel_model, cqa, Y, None, None, loss_fct)
targets /= n_inputs
#compute the gradients
targets.backward()
#update the parameters importance
updater.step(model.reg_params, n_inputs)
# omega of task1 + omega of task2 ...
# new_omega=omega_sum/data_count; omega=new_omega+prev_omega
def accumulate_reg_params(model, freeze_layers=[]):
"""accumelate the newly computed omega with the previously stroed one from the old previous tasks"""
for name, param in model.named_parameters():
if not name in freeze_layers:
if param in model.reg_params:
reg_param=model.reg_params.get(param)
# print('restoring previous omega',name)
prev_omega=reg_param.get('omega')
new_omega=reg_param.get('omega_sum') / model.reg_params["data_count"]
acc_omega=torch.add(prev_omega,new_omega)
del reg_param['omega_sum']
reg_param['omega'] = acc_omega
model.reg_params[param]=reg_param
del prev_omega
del new_omega
del acc_omega
else:
if param in model.reg_params:
reg_param=model.reg_params.get(param)
# print('removing unused omega',name)
del reg_param['omega']
del model.reg_params[param]
class Weight_Regularized_SGD(SGD):
r"""Implements SGD training with importance params regulization. IT inherents stochastic gradient descent (optionally with momentum).
Nesterov momentum is based on the formula from
"""
def __init__(self, params, lr=0.001, momentum=0, dampening=0, weight_decay=0, nesterov=False):
super(Weight_Regularized_SGD, self).__init__(params, lr,momentum,dampening,weight_decay,nesterov)
def __setstate__(self, state):
super(Weight_Regularized_SGD, self).__setstate__(state)
def step(self, reg_params,closure=None):
loss = None
if closure is not None:
loss = closure()
reg_lambda=reg_params.get('lambda')
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
for p in group['params']:
if p.grad is None:
continue
d_p = p.grad.data
#MAS PART CODE GOES HERE
#if this param has an omega to use for regulization
if p in reg_params:
reg_param=reg_params.get(p)
#get omega for this parameter
omega=reg_param.get('omega')
#initial value when the training start
init_val=reg_param.get('init_val')
curr_wegiht_val=p.data
#move the tensors to cuda
init_val=init_val.cuda()
omega=omega.cuda()
#get the difference
weight_dif=curr_wegiht_val.add(-1,init_val)
#compute the MAS penalty
regulizer=weight_dif.mul(2*reg_lambda*omega)
del weight_dif
del curr_wegiht_val
del omega
del init_val
#add the MAS regulizer to the gradient
d_p.add_(regulizer)
del regulizer
#MAS PARAT CODE ENDS
if weight_decay != 0:
d_p.add_(weight_decay,p.data.sign())
if momentum != 0:
param_state = self.state[p]
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = d_p.clone()
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(1 - dampening, d_p)
if nesterov:
d_p = d_p.add(momentum, buf)
else:
d_p = buf
p.data.add_(-group['lr'], d_p)
return loss
| 17,354 | 39.549065 | 137 | py |
LAMOL | LAMOL-master/loss_scaler.py | # coding=utf-8
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
# import mpu
# item() is a recent addition, so this helps with backward compatibility.
def to_python_float(t):
if hasattr(t, 'item'):
return t.item()
else:
return t[0]
class LossScaler:
"""
Class that manages a static loss scale. This class is intended to interact with
:class:`FP16_Optimizer`, and should not be directly manipulated by the user.
Use of :class:`LossScaler` is enabled via the ``static_loss_scale`` argument to
:class:`FP16_Optimizer`'s constructor.
Args:
scale (float, optional, default=1.0): The loss scale.
"""
def __init__(self, scale=1):
self.cur_scale = scale
# `params` is a list / generator of torch.Variable
def has_overflow(self, params):
return False
# `x` is a torch.Tensor
def _has_inf_or_nan(x):
return False
def update_scale(self, overflow):
pass
@property
def loss_scale(self):
return self.cur_scale
def scale_gradient(self, module, grad_in, grad_out):
return tuple(self.loss_scale * g for g in grad_in)
def backward(self, loss, retain_graph=False):
scaled_loss = loss*self.loss_scale
scaled_loss.backward(retain_graph=retain_graph)
class DynamicLossScaler:
"""
Class that manages dynamic loss scaling. It is recommended to use :class:`DynamicLossScaler`
indirectly, by supplying ``dynamic_loss_scale=True`` to the constructor of
:class:`FP16_Optimizer`. However, it's important to understand how :class:`DynamicLossScaler`
operates, because the default options can be changed using the
the ``dynamic_loss_args`` argument to :class:`FP16_Optimizer`'s constructor.
Loss scaling is designed to combat the problem of underflowing gradients encountered at long
times when training fp16 networks. Dynamic loss scaling begins by attempting a very high loss
scale. Ironically, this may result in OVERflowing gradients. If overflowing gradients are
encountered, :class:`DynamicLossScaler` informs :class:`FP16_Optimizer` that an overflow has
occurred.
:class:`FP16_Optimizer` then skips the update step for this particular iteration/minibatch,
and :class:`DynamicLossScaler` adjusts the loss scale to a lower value.
If a certain number of iterations occur without overflowing gradients detected,
:class:`DynamicLossScaler` increases the loss scale once more.
In this way :class:`DynamicLossScaler` attempts to "ride the edge" of
always using the highest loss scale possible without incurring overflow.
Args:
init_scale (float, optional, default=2**32): Initial loss scale attempted by :class:`DynamicLossScaler.`
scale_factor (float, optional, default=2.0): Factor used when adjusting the loss scale. If an overflow is encountered, the loss scale is readjusted to loss scale/``scale_factor``. If ``scale_window`` consecutive iterations take place without an overflow, the loss scale is readjusted to loss_scale*``scale_factor``.
scale_window (int, optional, default=1000): Number of consecutive iterations without an overflow to wait before increasing the loss scale.
"""
def __init__(self,
init_scale=2**32,
scale_factor=2.,
scale_window=1000,
min_scale=1,
delayed_shift=1,
consecutive_hysteresis=False):
self.cur_scale = init_scale
self.cur_iter = 0
self.last_overflow_iter = -1
self.scale_factor = scale_factor
self.scale_window = scale_window
self.min_scale = min_scale
self.delayed_shift = delayed_shift
self.cur_hysteresis = delayed_shift
self.consecutive_hysteresis = consecutive_hysteresis
# `params` is a list / generator of torch.Variable
def has_overflow_serial(self, params):
for p in params:
if p.grad is not None and DynamicLossScaler._has_inf_or_nan(p.grad.data):
return True
return False
def has_overflow(self, params):
overflow = self.has_overflow_serial(params)
# Since each model parallel GPU carries only part of the model,
# make sure overflow flag is synced across all the model parallel GPUs
# overflow_gpu = torch.cuda.ByteTensor([overflow])
# torch.distributed.all_reduce(overflow_gpu,
# op=torch.distributed.ReduceOp.MAX,
# group=mpu.get_model_parallel_group())
# overflow = overflow_gpu[0].item()
return bool(overflow)
# `x` is a torch.Tensor
def _has_inf_or_nan(x):
try:
# if x is half, the .float() incurs an additional deep copy, but it's necessary if
# Pytorch's .sum() creates a one-element tensor of the same type as x
# (which is true for some recent version of pytorch).
cpu_sum = float(x.float().sum())
# More efficient version that can be used if .sum() returns a Python scalar
# cpu_sum = float(x.sum())
except RuntimeError as instance:
# We want to check if inst is actually an overflow exception.
# RuntimeError could come from a different error.
# If so, we still want the exception to propagate.
if "value cannot be converted" not in instance.args[0]:
raise
return True
else:
if cpu_sum == float('inf') or cpu_sum == -float('inf') or cpu_sum != cpu_sum:
return True
return False
# `overflow` is boolean indicating whether the gradient overflowed
def update_scale(self, overflow):
if not hasattr(self, 'min_scale'):
self.min_scale = 1
if not hasattr(self, 'delayed_shift'):
self.delayed_shift = 1
if not hasattr(self, 'cur_hysteresis'):
self.cur_hysteresis = 1
if not hasattr(self, 'consecutive_hysteresis'):
self.consecutive_hysteresis = True
if overflow:
# self.cur_scale /= self.scale_factor
if self.delayed_shift == 1 or self.cur_hysteresis == 1:
self.cur_scale = max(self.cur_scale/self.scale_factor, self.min_scale)
else:
self.cur_hysteresis -= 1
self.last_overflow_iter = self.cur_iter
else:
if self.consecutive_hysteresis:
self.cur_hysteresis = self.delayed_shift
if (self.cur_iter - self.last_overflow_iter) % self.scale_window == 0:
if not self.consecutive_hysteresis:
self.cur_hysteresis = self.delayed_shift
self.cur_scale *= self.scale_factor
self.cur_iter += 1
@property
def loss_scale(self):
return self.cur_scale
def scale_gradient(self, module, grad_in, grad_out):
return tuple(self.loss_scale * g for g in grad_in)
def backward(self, loss, retain_graph=False):
scaled_loss = loss*self.loss_scale
scaled_loss.backward(retain_graph=retain_graph)
##############################################################
# Example usage below here -- assuming it's in a separate file
##############################################################
"""
TO-DO separate out into an example.
if __name__ == "__main__":
import torch
from torch.autograd import Variable
from dynamic_loss_scaler import DynamicLossScaler
# N is batch size; D_in is input dimension;
# H is hidden dimension; D_out is output dimension.
N, D_in, H, D_out = 64, 1000, 100, 10
# Create random Tensors to hold inputs and outputs, and wrap them in Variables.
x = Variable(torch.randn(N, D_in), requires_grad=False)
y = Variable(torch.randn(N, D_out), requires_grad=False)
w1 = Variable(torch.randn(D_in, H), requires_grad=True)
w2 = Variable(torch.randn(H, D_out), requires_grad=True)
parameters = [w1, w2]
learning_rate = 1e-6
optimizer = torch.optim.SGD(parameters, lr=learning_rate)
loss_scaler = DynamicLossScaler()
for t in range(500):
y_pred = x.mm(w1).clamp(min=0).mm(w2)
loss = (y_pred - y).pow(2).sum() * loss_scaler.loss_scale
print('Iter {} loss scale: {}'.format(t, loss_scaler.loss_scale))
print('Iter {} scaled loss: {}'.format(t, loss.data[0]))
print('Iter {} unscaled loss: {}'.format(t, loss.data[0] / loss_scaler.loss_scale))
# Run backprop
optimizer.zero_grad()
loss.backward()
# Check for overflow
has_overflow = DynamicLossScaler.has_overflow(parameters)
# If no overflow, unscale grad and update as usual
if not has_overflow:
for param in parameters:
param.grad.data.mul_(1. / loss_scaler.loss_scale)
optimizer.step()
# Otherwise, don't do anything -- ie, skip iteration
else:
print('OVERFLOW!')
# Update loss scale for next iteration
loss_scaler.update_scale(has_overflow)
"""
| 9,788 | 40.130252 | 326 | py |
LAMOL | LAMOL-master/train.py | import torch
from torch.utils.data import DataLoader
from torch import nn
from pytorch_transformers import AdamW, WEIGHTS_NAME, WarmupLinearSchedule
import csv
import numpy as np
import os
import logging
from fp16 import FP16_Module, FP16_Optimizer
from parallel import DataParallelModel, DataParallelCriterion
from collections import OrderedDict
from utils import *
from settings import args, TASK_DICT, init_logging, MODEL_CONFIG, MODEL_CLASS, SPECIAL_TOKENS, CONFIG_CLASS
from settings import TOKENIZER, SPECIAL_TOKEN_IDS, FILL_VAL, SAVE_NAME, FINAL_SAVE_NAME, TOKENS_WEIGHT, CONFIG_NAME
from scheduler import AnnealingLR
from regularizers import REG_TYPES, REG_TYPE_KEYS, Weight_Regularized_AdamW, Weight_Regularized_SGD
from torch.nn import CrossEntropyLoss
logger = logging.getLogger(__name__)
def train(task_ids, model):
tasks = [args.tasks[task_id] for task_id in task_ids]
logger.info("start to train { task: %s, seq train type: %s }" % (tasks, args.seq_train_type))
model_dir = get_model_dir(tasks)
make_dir(model_dir)
train_dataset = [TASK_DICT[t]["train"] for t in tasks]
train_extra_data = []
if "lll" in args.seq_train_type and task_ids[0] > 0 and not args.skip_tasks:
prev_task = args.tasks[task_ids[0]-1]
with torch.no_grad():
create_extra_data(tasks[0], prev_task, model, train_extra_data)
elif "gem" in args.seq_train_type and task_ids[0] > 0:
get_real_data(tasks[0], train_extra_data, accum=False, encode=True)
args.memory_data.append(train_extra_data)
train_extra_data = []
logger.info('extra training data size: {}'.format(len(train_extra_data)))
if not model:
# which_model_to_load = model_dir if os.path.isfile(os.path.join(model_dir, FINAL_SAVE_NAME)) else args.model_name
model = MODEL_CLASS.from_pretrained(args.model_name).cuda()
model.resize_token_embeddings(len(TOKENIZER))
if not args.fp32:
model = FP16_Module(model)
gen_token = get_gen_token(tasks[0])
TOKENIZER.add_tokens([gen_token])
TOKENIZER.save_pretrained(model_dir)
SPECIAL_TOKENS[tasks[0]] = gen_token
SPECIAL_TOKEN_IDS[tasks[0]] = TOKENIZER.convert_tokens_to_ids(gen_token)
logger.info('gen token = {} , gen token id = {}'.format(gen_token, SPECIAL_TOKEN_IDS[tasks[0]]))
MODEL_CONFIG.vocab_size = len(TOKENIZER)
MODEL_CONFIG.to_json_file(os.path.join(model_dir,CONFIG_NAME))
global TOKENS_WEIGHT
if len(TOKENIZER) != TOKENS_WEIGHT.shape[0]:
TOKENS_WEIGHT = torch.cat((TOKENS_WEIGHT, torch.ones([1]).cuda()))
if args.skip_tasks and len(tasks) == 1:
logger.info("*********** skip task: {} ***********".format(tasks[0]))
if tasks[0] in args.skip_tasks:
if len(args.skip_tasks) == 1:
model_dir = get_model_dir(tasks)
model_path = os.path.join(model_dir, FINAL_SAVE_NAME)
config_path = os.path.join(model_dir,CONFIG_NAME)
model_config = CONFIG_CLASS.from_json_file(config_path)
model = MODEL_CLASS(model_config).cuda()
state_dict = torch.load(model_path)
model.load_state_dict(state_dict)
if not args.fp32:
model = FP16_Module(model)
if args.seq_train_type in REG_TYPE_KEYS:
logger.info("calulating reg_params ...")
train_qadata = QADataset(train_dataset, "train", SPECIAL_TOKEN_IDS[tasks[0]], train_extra_data)
max_train_batch_size = max(len(train_qadata) // args.min_n_steps, args.min_batch_size)
train_dataloader = create_dataloader(train_qadata, "train", max_train_batch_size)
parallel_model = DataParallelModel(WrapModel(model), args.device_ids)
regularizer = REG_TYPES[args.seq_train_type](model, parallel_model, [train_dataloader], tasks[0])
regularizer.task_start_do()
regularizer.task_end_do()
torch.save(model.state_dict(), os.path.join(model_dir, FINAL_SAVE_NAME))
logger.info("done reg_params!")
args.skip_tasks.remove(tasks[0])
return model
model.resize_token_embeddings(len(TOKENIZER))
if not args.fp32: # again because resize_token_embeddings makes embedding layer fp32
model = FP16_Module(model)
parallel_model = DataParallelModel(WrapModel(model), args.device_ids)
train_qadata = QADataset(train_dataset, "train", SPECIAL_TOKEN_IDS[tasks[0]], train_extra_data)
max_train_batch_size = max(len(train_qadata) // args.min_n_steps, args.min_batch_size)
train_dataloader = create_dataloader(train_qadata, "train", max_train_batch_size)
if not args.unbound and args.seq_train_type != "multitask":
#n_train_epochs = TASK_DICT[tasks[0]]["n_train_epochs"]
n_train_epochs = args.n_train_epochs[tasks[0]]
else:
n_train_epochs = args.n_train_epochs['_'.join(tasks)]
n_train_optimization_steps = len(train_qadata) * n_train_epochs
logger.info('len of train dataset: {} , max train batch size {} , num of opt steps: {}'.format(
len(train_qadata), max_train_batch_size, n_train_optimization_steps))
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
if "gem" in args.seq_train_type:
model.task_id = task_ids[0]
if not hasattr(model, "grad_dims"):
model.grad_dims = []
for param in model.parameters():
model.grad_dims.append(param.data.numel())
if not hasattr(model, "grads"):
model.grads = torch.zeros(sum(model.grad_dims),len(args.tasks))
model.grads = model.grads.cuda()
if args.seq_train_type in REG_TYPE_KEYS:
optimizer = Weight_Regularized_AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
else:
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
if not args.fp32:
optimizer = FP16_Optimizer(optimizer, static_loss_scale=None, dynamic_loss_scale=True,
dynamic_loss_args={'scale_window': 100, 'min_scale': 1, 'delayed_shift': 2})
scheduler = AnnealingLR(optimizer, start_lr=args.learning_rate, warmup_iter=int(args.n_warmup_ratio*len(train_qadata)),
num_iters=int(n_train_optimization_steps), decay_style=args.decay_style)
train_loss_fct = DataParallelCriterion(CrossEntropyLoss(ignore_index=FILL_VAL, weight=TOKENS_WEIGHT), args.device_ids)
if args.seq_train_type in REG_TYPE_KEYS:
copy_train_dataloader = create_dataloader(train_qadata, "train", max_train_batch_size)
prev_task = args.tasks[task_ids[0]-1]
regularizer = REG_TYPES[args.seq_train_type](model, parallel_model, [copy_train_dataloader], tasks[0], prev_task)
regularizer.task_start_do()
tot_n_steps = 0
train_once = TrainStep(model, optimizer, scheduler)
if "gem" in args.seq_train_type and task_ids[0] != 0:
gem_step = GEMStep(model, parallel_model, train_loss_fct, optimizer)
model.train()
for ep in range(n_train_epochs):
cum_loss, cum_qa_loss, cum_lm_loss, cur_n_inputs = 0, 0, 0, 0
for n_steps, (_, _, cqa, _, Y, gen_X, gen_Y) in enumerate(train_dataloader):
n_inputs = sum(_cqa.shape[0] for _cqa in cqa)
for i in range(len(cqa)):
cqa[i] = (cqa[i].to(args.device_ids[i]),)
Y[i] = Y[i].to(args.device_ids[i])
gen_X[i] = (gen_X[i].to(args.device_ids[i]),)
gen_Y[i] = gen_Y[i].to(args.device_ids[i])
losses = get_losses(parallel_model, cqa, Y, gen_X, gen_Y, train_loss_fct)
loss = sum(losses)
if "gem" in args.seq_train_type and task_ids[0] != 0:
gem_step(task_ids[0])
train_once(loss, n_inputs)
qa_loss = losses[0].item() * n_inputs
lm_loss = losses[1].item() * n_inputs
cum_loss += (qa_loss + lm_loss)
cum_qa_loss += qa_loss
cum_lm_loss += lm_loss
cur_n_inputs += n_inputs
if (n_steps + 1 ) % args.logging_steps == 0:
logger.info('progress {:.3f} , lr {:.1E} , loss {:.3f} , qa loss {:.3f} , lm loss {:.3f} , avg batch size {:.1f}'.format(
ep + cur_n_inputs/len(train_qadata), scheduler.get_lr(), cum_loss/cur_n_inputs, cum_qa_loss/cur_n_inputs, cum_lm_loss/cur_n_inputs,
cur_n_inputs/(n_steps + 1)
))
torch.save(model.state_dict(), os.path.join(model_dir, SAVE_NAME+str(ep+1)))
tot_n_steps += (n_steps + 1)
logger.info('epoch {}/{} done , tot steps {} , lr {:.1E} , loss {:.2f} , qa loss {:.2f} , lm loss {:.2f} , avg batch size {:.1f}'.format(
ep+1, n_train_epochs, tot_n_steps, scheduler.get_lr(), cum_loss/cur_n_inputs, cum_qa_loss/cur_n_inputs, cum_lm_loss/cur_n_inputs, cur_n_inputs/(n_steps+1)
))
# task end do for reg
if args.seq_train_type in REG_TYPE_KEYS:
regularizer.task_end_do()
torch.save(model.state_dict(), os.path.join(model_dir, FINAL_SAVE_NAME))
return model
if __name__ == '__main__':
if not args.debug:
logging.getLogger("pytorch_transformers").setLevel(logging.WARNING)
logging.getLogger("pytorch_transformers.tokenization_utils").setLevel(logging.CRITICAL)
make_dir(args.model_dir_root)
init_logging(os.path.join(args.model_dir_root, 'log_train.txt'))
logger.info('args = {}'.format(str(args)))
model = None
if args.seq_train_type == "multitask":
model = train(list(range(len(args.tasks))), model)
else:
if args.unbound:
TASK_DICT = lll_unbound_setting(split_size=args.unbound)
for task_id in range(len(args.tasks)):
model = train([task_id], model)
| 10,296 | 48.267943 | 166 | py |
LAMOL | LAMOL-master/parallel.py | import threading
import torch
from torch.nn.parallel import DataParallel
from torch.nn.parallel.parallel_apply import get_a_var
from torch.nn.parallel.scatter_gather import scatter
torch_ver = torch.__version__[:3]
__all__ = ['DataParallelModel', 'DataParallelCriterion']
class DataParallelModel(DataParallel):
def forward(self, inputs, **kwargs):
kwargs = scatter(kwargs, self.device_ids[:len(inputs)], self.dim)
if len(self.device_ids) == 1:
return (self.module(*inputs[0], **kwargs[0]),)
replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
outputs = self.parallel_apply(replicas, inputs, kwargs)
return outputs
def replicate(self, module, device_ids):
modules = super(DataParallelModel, self).replicate(module, device_ids)
execute_replication_callbacks(modules)
return modules
class DataParallelCriterion(DataParallel):
def forward(self, inputs, targets, **kwargs):
# input should be already scatterd
# scattering the targets instead
if not self.device_ids:
return self.module(inputs, *targets, **kwargs)
kwargs = scatter(kwargs, self.device_ids[:len(inputs)], self.dim)
if len(self.device_ids) == 1:
return self.module(inputs[0], targets[0], **kwargs[0])
replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
outputs = _criterion_parallel_apply(replicas, inputs, targets, kwargs)
return self.gather(outputs, self.output_device)
def _criterion_parallel_apply(modules, inputs, targets, kwargs_tup=None, devices=None):
assert len(modules) == len(inputs)
assert len(targets) == len(inputs)
if kwargs_tup:
assert len(modules) == len(kwargs_tup)
else:
kwargs_tup = ({},) * len(modules)
if devices is not None:
assert len(modules) == len(devices)
else:
devices = [None] * len(modules)
lock = threading.Lock()
results = {}
if torch_ver != "0.3":
grad_enabled = torch.is_grad_enabled()
def _worker(i, module, input, target, kwargs, device=None):
if torch_ver != "0.3":
torch.set_grad_enabled(grad_enabled)
if device is None:
device = get_a_var(input).get_device()
try:
with torch.cuda.device(device):
# this also avoids accidental slicing of `input` if it is a Tensor
if not isinstance(input, (list, tuple)):
input = (input,)
if not isinstance(target, (list, tuple)):
target = (target,)
output = module(*(input + target), **kwargs)
with lock:
results[i] = output
except Exception as e:
with lock:
results[i] = e
if len(modules) > 1:
threads = [threading.Thread(target=_worker,
args=(i, module, input, target,
kwargs, device),)
for i, (module, input, target, kwargs, device) in
enumerate(zip(modules, inputs, targets, kwargs_tup, devices))]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
else:
_worker(0, modules[0], inputs[0], targets[0], kwargs_tup[0], devices[0])
outputs = []
for i in range(len(inputs)):
output = results[i]
if isinstance(output, Exception):
raise output
outputs.append(output)
return outputs
###########################################################################
# Adapted from Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
#
class CallbackContext(object):
pass
def execute_replication_callbacks(modules):
"""
Execute an replication callback `__data_parallel_replicate__` on each module created
by original replication.
The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)`
Note that, as all modules are isomorphism, we assign each sub-module with a context
(shared among multiple copies of this module on different devices).
Through this context, different copies can share some information.
We guarantee that the callback on the master copy (the first copy) will be called ahead
of calling the callback of any slave copies.
"""
master_copy = modules[0]
nr_modules = len(list(master_copy.modules()))
ctxs = [CallbackContext() for _ in range(nr_modules)]
for i, module in enumerate(modules):
for j, m in enumerate(module.modules()):
if hasattr(m, '__data_parallel_replicate__'):
m.__data_parallel_replicate__(ctxs[j], i)
| 4,814 | 36.038462 | 91 | py |
LAMOL | LAMOL-master/fp16.py | # coding=utf-8
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Stable version of apex FP16 Optimizer"""
import torch
from torch import nn
from torch.autograd import Variable
from torch.nn.parameter import Parameter
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
from loss_scaler import DynamicLossScaler, LossScaler
from fp16util import model_grads_to_master_grads, master_params_to_model_params, clip_grad_norm
FLOAT_TYPES = (torch.FloatTensor, torch.cuda.FloatTensor)
HALF_TYPES = (torch.HalfTensor, torch.cuda.HalfTensor)
def conversion_helper(val, conversion):
"""Apply conversion to val. Recursively apply conversion if `val` is a nested tuple/list structure."""
if not isinstance(val, (tuple, list)):
return conversion(val)
rtn = [conversion_helper(v, conversion) for v in val]
if isinstance(val, tuple):
rtn = tuple(rtn)
return rtn
def fp32_to_fp16(val):
"""Convert fp32 `val` to fp16"""
def half_conversion(val):
val_typecheck = val
if isinstance(val_typecheck, (Parameter, Variable)):
val_typecheck = val.data
if isinstance(val_typecheck, FLOAT_TYPES):
val = val.half()
return val
return conversion_helper(val, half_conversion)
def fp16_to_fp32(val):
"""Convert fp16 `val` to fp32"""
def float_conversion(val):
val_typecheck = val
if isinstance(val_typecheck, (Parameter, Variable)):
val_typecheck = val.data
if isinstance(val_typecheck, HALF_TYPES):
val = val.float()
return val
return conversion_helper(val, float_conversion)
class FP16_Module(nn.Module):
def __init__(self, module):
super(FP16_Module, self).__init__()
self.add_module('module', module.half())
def forward(self, *inputs, **kwargs):
return fp16_to_fp32(self.module(*(fp32_to_fp16(inputs)), **kwargs))
def state_dict(self, destination=None, prefix='', keep_vars=False):
return self.module.state_dict(destination, prefix, keep_vars)
def load_state_dict(self, state_dict, strict=True):
self.module.load_state_dict(state_dict, strict=strict)
def resize_token_embeddings(self, len_tokenizer):
self.module.resize_token_embeddings(len_tokenizer)
# TODO: Update overflow check + downscale to use Carl's fused kernel.
class FP16_Optimizer(object):
"""
:class:`FP16_Optimizer` is designed to wrap an existing PyTorch optimizer,
and manage static or dynamic loss scaling and master weights in a manner transparent to the user.
For standard use, only two lines must be changed: creating the :class:`FP16_Optimizer` instance,
and changing the call to ``backward``.
Example::
model = torch.nn.Linear(D_in, D_out).cuda().half()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
# Name the FP16_Optimizer instance to replace the existing optimizer
# (recommended but not required):
optimizer = FP16_Optimizer(optimizer, static_loss_scale = 128.0)
...
# loss.backward() becomes:
optimizer.backward(loss)
...
Example with dynamic loss scaling::
...
optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
# optional arg to control dynamic loss scaling behavior
# dynamic_loss_args={'scale_window' : 500})
# Usually, dynamic_loss_args is not necessary.
Args:
init_optimizer (torch.optim.optimizer): Existing optimizer created with the parameters to optimize. Internally, :class:`FP16_Optimizer` replaces the passed optimizer's fp16 parameters, if any, with fp32 master parameters copied from the original ones. :class:`FP16_Optimizer` also stores references to the original fp16 parameters, and updates these fp16 parameters from the master fp32 copy at the end of each :attr:`step`.
static_loss_scale (float, optional, default=1.0): Loss scale used internally to scale gradients computed by the model. Any fp16 gradients will be copied to fp32, then downscaled before being applied to the fp32 master params, so ``static_loss_scale`` should not affect learning rate.
dynamic_loss_scale (bool, optional, default=False): Use dynamic loss scaling. If True, this will override any ``static_loss_scale`` option.
dynamic_loss_args (dict, optional, default=None): Dict of kwargs that will be forwarded to the internal :class:`DynamicLossScaler` instance's constructor. Keys of this dict must match kwargs accepted by :class:`DynamicLossScaler`'s constructor. If ``dynamic_loss_args`` is unspecified, :class:`DynamicLossScaler`'s defaults will be used.
verbose (bool, optional, default=True): By default, FP16_Optimizer's constructor prints out the parameters and parameter groups it is ingesting, as a sanity check. If this becomes annoying (e.g. for large models), it can be disabled by passing ``verbose=False``. ``verbose=False`` will not disable printing when the loss scale is readjusted during dynamic loss scaling.
``init_optimizer`` is expected to have been constructed in the ordinary way.
It is recommended (although not required) that the newly constructed :class:`FP16_Optimizer` instance be
named to replace ``init_optimizer``, for two reasons:
First, it means that references to the same name
later in the file will not have to change.
Second, :class:`FP16_Optimizer` reserves the right (as an implementation detail) to
modify ``init_optimizer``. If you do choose a unique name for the new
:class:`FP16_Optimizer` instance, you should only work with this new instance,
because the preexisting optimizer might no longer behave as expected.
``init_optimizer`` may be any Pytorch optimizer.
It may contain a mixture of fp16 and fp32 parameters organized into any number of
``param_groups`` with different hyperparameters. The :class:`FP16_Optimizer` constructor will
ingest these ``param_groups`` and remember them.
Calls to ::
loss.backward()
must be replaced with ::
optimizer.backward(loss)
because :class:`FP16_Optimizer` requires ownership of the backward pass to implement
loss scaling and copies to master gradients.
.. note::
Loss scaling, either static or dynamic, is orthogonal to learning rate, because gradients
are downscaled before being applied. This means that adjusting the loss scale, or using
dynamic loss scaling, should not require retuning the learning rate or any other
hyperparameters.
**Advanced options**
**Closures**: :class:`FP16_Optimizer` can wrap a Pytorch optimizer that receives a closure.
See docstring for :attr:`step`.
**Gradient clipping**: Use :attr:`clip_master_grads`.
**Multiple losses**: If your model accumulates gradients from multiple losses,
this can be made more efficient by supplying ``update_master_grads=False``
to :attr:`backward`. See docstring for :attr:`backward`.
**Manually adjusting loss scale**: The current loss scale can be retrieved or set via ::
print(optimizer.loss_scale)
optimizer.loss_scale = new_loss_scale
For static loss scaling, manually adjusting the loss scale over time is a reasonable
thing to do. During later epochs, gradients may become smaller, and a
higher loss scale may be required, analogous to scheduling the learning rate. Dynamic loss
scaling is more subtle (see :class:`DynamicLossScaler`) and in this case, manually adjusting
the loss scale is not recommended.
**Multi_GPU training**: If the wrapped ``init_optimizer`` was created from a model wrapped in
Pytorch DistributedDataParallel or Apex DistributedDataParallel, :class:`FP16_Optimizer`
should still work as intended.
"""
def __init__(self,
init_optimizer,
static_loss_scale=1.0,
dynamic_loss_scale=False,
dynamic_loss_args=None,
verbose=False):
if not torch.cuda.is_available:
raise SystemError("Cannot use fp16 without CUDA.")
self.verbose = verbose
self.optimizer = init_optimizer
# init_state_dict sets up an alternative way to cast per-param state tensors.
# Stashing here in case https://github.com/pytorch/pytorch/issues/7733 makes it necessary.
# init_state_dict = init_optimizer.state_dict()
self.fp16_groups = []
self.fp32_from_fp16_groups = []
self.fp32_from_fp32_groups = []
for i, param_group in enumerate(self.optimizer.param_groups):
self.maybe_print("FP16_Optimizer processing param group {}:".format(i))
fp16_params_this_group = []
fp32_params_this_group = []
fp32_from_fp16_params_this_group = []
for i, param in enumerate(param_group['params']):
if param.requires_grad:
if param.type() == 'torch.cuda.HalfTensor':
self.maybe_print("FP16_Optimizer received torch.cuda.HalfTensor with {}"
.format(param.size()))
fp16_params_this_group.append(param)
master_param = param.detach().clone().float()
master_param.requires_grad = True
# Copythe model parallel flag.
# master_param.model_parallel = param.model_parallel
param_group['params'][i] = master_param
fp32_from_fp16_params_this_group.append(master_param)
# Reset existing state dict key to the new master param.
# We still need to recast per-param state tensors, if any, to FP32.
if param in self.optimizer.state:
self.optimizer.state[master_param] = self.optimizer.state.pop(param)
elif param.type() == 'torch.cuda.FloatTensor':
self.maybe_print("FP16_Optimizer received torch.cuda.FloatTensor with {}"
.format(param.size()))
fp32_params_this_group.append(param)
param_group['params'][i] = param
else:
raise TypeError("Wrapped parameters must be either "
"torch.cuda.FloatTensor or torch.cuda.HalfTensor. "
"Received {}".format(param.type()))
self.fp16_groups.append(fp16_params_this_group)
self.fp32_from_fp16_groups.append(fp32_from_fp16_params_this_group)
self.fp32_from_fp32_groups.append(fp32_params_this_group)
# Leverage state_dict() and load_state_dict() to recast preexisting per-param state tensors
self.optimizer.load_state_dict(self.optimizer.state_dict())
# alternative way to cast per-param state tensors:
# self.optimizer.load_state_dict(init_state_dict)
if dynamic_loss_scale:
self.dynamic_loss_scale = True
if dynamic_loss_args is not None:
self.loss_scaler = DynamicLossScaler(**dynamic_loss_args)
else:
self.loss_scaler = DynamicLossScaler()
else:
self.dynamic_loss_scale = False
self.loss_scaler = LossScaler(static_loss_scale)
self.overflow = False
self.first_closure_call_this_step = True
self.clip_grad_norm = clip_grad_norm
def maybe_print(self, msg):
if self.verbose:
print(msg)
def __getstate__(self):
raise RuntimeError("FP16_Optimizer should be serialized using state_dict().")
def __setstate__(self, state):
raise RuntimeError("FP16_Optimizer should be deserialized using load_state_dict().")
def zero_grad(self, set_grads_to_None=False):
"""
Zero fp32 and fp16 parameter grads.
"""
# In principle, only the .grad attributes of the model params need to be zeroed,
# because gradients are copied into the FP32 master params. However, we zero
# all gradients owned by the optimizer, just to be safe:
for group in self.optimizer.param_groups:
for p in group['params']:
if set_grads_to_None:
p.grad = None
else:
if p.grad is not None:
p.grad.detach_()
p.grad.zero_()
# Zero fp16 gradients owned by the model:
for fp16_group in self.fp16_groups:
for param in fp16_group:
if set_grads_to_None:
param.grad = None
else:
if param.grad is not None:
param.grad.detach_() # as in torch.optim.optimizer.zero_grad()
param.grad.zero_()
def _check_overflow(self):
params = []
for group in self.fp16_groups:
for param in group:
params.append(param)
for group in self.fp32_from_fp32_groups:
for param in group:
params.append(param)
self.overflow = self.loss_scaler.has_overflow(params)
def _update_scale(self, has_overflow=False):
self.loss_scaler.update_scale(has_overflow)
def _master_params_to_model_params(self):
for fp16_group, fp32_from_fp16_group in zip(self.fp16_groups, self.fp32_from_fp16_groups):
master_params_to_model_params(fp16_group, fp32_from_fp16_group)
def _model_params_to_master_params(self):
for fp16_group, fp32_from_fp16_group in zip(self.fp16_groups, self.fp32_from_fp16_groups):
master_params_to_model_params(fp32_from_fp16_group, fp16_group)
# To consider: Integrate distributed with this wrapper by registering a hook on each variable
# that does the overflow check, gradient copy + downscale, and fp32 allreduce in a different stream.
def _model_grads_to_master_grads(self):
for fp16_group, fp32_from_fp16_group in zip(self.fp16_groups, self.fp32_from_fp16_groups):
model_grads_to_master_grads(fp16_group, fp32_from_fp16_group)
def _downscale_master(self):
if self.loss_scale != 1.0:
for group in self.optimizer.param_groups:
for param in group['params']:
if param.grad is not None:
param.grad.data.mul_(1./self.loss_scale)
def clip_master_grads(self, max_norm, norm_type=2):
"""
Clips fp32 master gradients via ``torch.nn.utils.clip_grad_norm``.
Args:
max_norm (float or int): max norm of the gradients
norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for
infinity norm.
Returns:
Total norm of the current fp32 gradients (viewed as a single vector).
.. warning::
Returns -1 if the most recently computed fp16 gradients overflowed (that is, if ``self.overflow`` is ``True``).
"""
if not self.overflow:
fp32_params = []
for param_group in self.optimizer.param_groups:
for param in param_group['params']:
fp32_params.append(param)
return self.clip_grad_norm(fp32_params, max_norm, norm_type)
else:
return -1
def state_dict(self):
"""
Returns a dict containing the current state of this :class:`FP16_Optimizer` instance.
This dict contains attributes of :class:`FP16_Optimizer`, as well as the state_dict
of the contained Pytorch optimizer.
Example::
checkpoint = {}
checkpoint['model'] = model.state_dict()
checkpoint['optimizer'] = optimizer.state_dict()
torch.save(checkpoint, "saved.pth")
"""
state_dict = {}
state_dict['loss_scaler'] = self.loss_scaler
state_dict['dynamic_loss_scale'] = self.dynamic_loss_scale
state_dict['overflow'] = self.overflow
state_dict['first_closure_call_this_step'] = self.first_closure_call_this_step
state_dict['optimizer_state_dict'] = self.optimizer.state_dict()
state_dict['fp32_from_fp16'] = self.fp32_from_fp16_groups
return state_dict
def load_state_dict(self, state_dict):
"""
Loads a state_dict created by an earlier call to state_dict().
If ``fp16_optimizer_instance`` was constructed from some ``init_optimizer``,
whose parameters in turn came from ``model``, it is expected that the user
will call ``model.load_state_dict()`` before
``fp16_optimizer_instance.load_state_dict()`` is called.
Example::
model = torch.nn.Linear(D_in, D_out).cuda().half()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
optimizer = FP16_Optimizer(optimizer, static_loss_scale = 128.0)
...
checkpoint = torch.load("saved.pth")
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
"""
# I think it should actually be ok to reload the optimizer before the model.
self.loss_scaler = state_dict['loss_scaler']
self.dynamic_loss_scale = state_dict['dynamic_loss_scale']
self.overflow = state_dict['overflow']
self.first_closure_call_this_step = state_dict['first_closure_call_this_step']
self.optimizer.load_state_dict(state_dict['optimizer_state_dict'])
# At this point, the optimizer's references to the model's fp32 parameters are up to date.
# The optimizer's hyperparameters and internal buffers are also up to date.
# However, the fp32 master copies of the model's fp16 params stored by the optimizer are still
# out of date. There are two options.
# 1: Refresh the master params from the model's fp16 params.
# This requires less storage but incurs precision loss.
# 2: Save and restore the fp32 master copies separately.
# We choose option 2.
#
# Pytorch Optimizer.load_state_dict casts saved buffers (e.g. momentum) to the type and device
# of their associated parameters, because it's possible those buffers might not exist yet in
# the current optimizer instance. In our case, as long as the current FP16_Optimizer has been
# constructed in the same way as the one whose state_dict we are loading, the same master params
# are guaranteed to exist, so we can just copy_() from the saved master params.
for current_group, saved_group in zip(self.fp32_from_fp16_groups, state_dict['fp32_from_fp16']):
for current, saved in zip(current_group, saved_group):
current.data.copy_(saved.data)
def step(self, closure=None): # could add clip option.
"""
If no closure is supplied, :attr:`step` should be called after
``fp16_optimizer_obj.backward(loss)``.
:attr:`step` updates the fp32 master copy of parameters using the optimizer supplied to
:class:`FP16_Optimizer`'s constructor, then copies the updated fp32 params into the fp16 params
originally referenced by :class:`FP16_Optimizer`'s constructor, so the user may immediately run
another forward pass using their model.
If a closure is supplied, :attr:`step` may be called without a prior call to
:attr:`backward(loss)`.
This control flow is identical to `ordinary Pytorch optimizer use`_ with closures.
However, the user should take care that any ``loss.backward()`` call within the closure
has been replaced by ``fp16_optimizer_obj.backward(loss)``.
Args:
closure (optional): Closure that will be supplied to the underlying optimizer originally passed to :class:`FP16_Optimizer`'s constructor. closure should call :attr:`zero_grad()` on the :class:`FP16_Optimizer` object, compute the loss, call :attr:`backward(loss)`, and return the loss.
Example with closure::
# optimizer is assumed to be an FP16_Optimizer object, previously constructed from an
# existing pytorch optimizer.
for input, target in dataset:
def closure():
optimizer.zero_grad()
output = model(input)
loss = loss_fn(output, target)
# loss.backward() becomes:
optimizer.backward(loss)
return loss
optimizer.step(closure)
.. warning::
Currently, calling :attr:`step` with a closure is not compatible with dynamic loss scaling.
.. _`ordinary Pytorch optimizer use`:
http://pytorch.org/docs/master/optim.html#optimizer-step-closure
"""
scale = self.loss_scaler.loss_scale
self._update_scale(self.overflow)
if self.overflow:
self.maybe_print("OVERFLOW! Skipping step. Attempted loss scale: {}, reducing to {}"
.format(scale, self.loss_scale))
return
if closure is not None:
retval = self._step_with_closure(closure)
else:
retval = self.optimizer.step()
self._master_params_to_model_params()
return retval
def _step_with_closure(self, closure):
def wrapped_closure():
# helpful for debugging
# print("Calling wrapped_closure, first_closure_call_this_step = {}"
# .format(self.first_closure_call_this_step))
if self.first_closure_call_this_step:
# We expect that the fp16 params are initially fresh on entering self.step(),
# so _master_params_to_model_params() is unnecessary the first time wrapped_closure()
# is called within self.optimizer.step().
self.first_closure_call_this_step = False
else:
# If self.optimizer.step() internally calls wrapped_closure more than once,
# it may update the fp32 params after each call. However, self.optimizer
# doesn't know about the fp16 params at all. If the fp32 params get updated,
# we can't rely on self.optimizer to refresh the fp16 params. We need
# to handle that manually:
self._master_params_to_model_params()
# Our API expects the user to give us ownership of the backward() call by
# replacing all calls to loss.backward() with optimizer.backward(loss).
# This requirement holds whether or not the call to backward() is made within a closure.
# If the user is properly calling optimizer.backward(loss) within "closure,"
# calling closure() here will give the fp32 master params fresh gradients
# for the optimizer to play with, so all wrapped_closure needs to do is call
# closure() and return the loss.
temp_loss = closure()
while(self.overflow):
scale = self.loss_scaler.loss_scale
self._update_scale(self.overflow)
self.maybe_print("OVERFLOW within closure! Skipping step. Attempted loss scale: {}, "
"reducing to {}".format(scale, self.loss_scale))
temp_loss = closure()
return temp_loss
retval = self.optimizer.step(wrapped_closure)
self.first_closure_call_this_step = True
return retval
def backward(self, loss, update_master_grads=True, retain_graph=False):
"""
:attr:`backward` performs the following conceptual steps:
1. fp32_loss = loss.float() (see first Note below)
2. scaled_loss = fp32_loss*loss_scale
3. scaled_loss.backward(), which accumulates scaled gradients into the ``.grad`` attributes of the model's leaves (which may be fp16, fp32, or a mixture, depending how your model was defined).
4. fp16 grads are then copied to the master params' ``.grad`` attributes (see second Note), which are guaranteed to be fp32.
5. Finally, master grads are divided by loss_scale.
In this way, after :attr:`backward`, the master params have fresh gradients,
and :attr:`step` may be called.
.. note::
:attr:`backward` internally converts the loss to fp32 before applying the loss scale.
This provides some additional safety against overflow if the user has supplied an
fp16 loss value.
However, for maximum overflow safety, the user should
compute the loss criterion (MSE, cross entropy, etc) in fp32 before supplying it to
:attr:`backward`.
.. warning::
The gradients found in a model's leaves after the call to
:attr:`backward` should not be regarded as valid in general,
because it's possible
they have been scaled (and in the case of dynamic loss scaling,
the scale factor may change over time).
If the user wants to inspect gradients after a call to :attr:`backward`,
only the master gradients should be regarded as valid. These can be retrieved via
:attr:`inspect_master_grad_data()`.
Args:
loss: The loss output by the user's model. loss may be either float or half (but see first Note above).
update_master_grads (bool, optional, default=True): Option to copy fp16 grads to fp32 grads on this call. By setting this to False, the user can delay the copy, which is useful to eliminate redundant fp16->fp32 grad copies if :attr:`backward` is being called on multiple losses in one iteration. If set to False, the user becomes responsible for calling :attr:`update_master_grads` before calling :attr:`step`.
retain_graph (bool, optional, default=False): Forwards the usual ``retain_graph=True`` option to the internal call to ``loss.backward``. If ``retain_graph`` is being used to accumulate gradient values from multiple backward passes before calling ``optimizer.step``, passing ``update_master_grads=False`` is also recommended (see Example below).
Example::
# Ordinary operation:
optimizer.backward(loss)
# Naive operation with multiple losses (technically valid, but less efficient):
# fp32 grads will be correct after the second call, but
# the first call incurs an unnecessary fp16->fp32 grad copy.
optimizer.backward(loss1)
optimizer.backward(loss2)
# More efficient way to handle multiple losses:
# The fp16->fp32 grad copy is delayed until fp16 grads from all
# losses have been accumulated.
optimizer.backward(loss1, update_master_grads=False)
optimizer.backward(loss2, update_master_grads=False)
optimizer.update_master_grads()
"""
# To consider: try multiple backward passes using retain_grad=True to find
# a loss scale that works. After you find a loss scale that works, do a final dummy
# backward pass with retain_graph=False to tear down the graph. Doing this would avoid
# discarding the iteration, but probably wouldn't improve overall efficiency.
self.loss_scaler.backward(loss.float(), retain_graph=retain_graph)
if update_master_grads:
self.update_master_grads()
def update_master_grads(self):
"""
Copy the ``.grad`` attribute from stored references to fp16 parameters to
the ``.grad`` attribute of the fp32 master parameters that are directly
updated by the optimizer. :attr:`update_master_grads` only needs to be called if
``fp16_optimizer_obj.backward`` was called with ``update_master_grads=False``.
"""
if self.dynamic_loss_scale:
self._check_overflow()
if self.overflow: return
self._model_grads_to_master_grads()
self._downscale_master()
def inspect_master_grad_data(self):
"""
When running with :class:`FP16_Optimizer`,
``.grad`` attributes of a model's fp16 leaves should not be
regarded as truthful, because they might be scaled.
After a call to :attr:`fp16_optimizer_obj.backward(loss)`, if no overflow was encountered,
the fp32 master params' ``.grad``
attributes will contain valid gradients properly divided by the loss scale. However,
because :class:`FP16_Optimizer` flattens some parameters, accessing them may be
nonintuitive. :attr:`inspect_master_grad_data`
allows those gradients to be viewed with shapes corresponding to their associated model leaves.
Returns:
List of lists (one list for each parameter group). The list for each parameter group
is a list of the ``.grad.data`` attributes of the fp32 master params belonging to that group.
"""
if self.overflow:
print("Warning: calling FP16_Optimizer.inspect_master_grad_data while in an overflow state. "
"Gradients are currently invalid (may be inf, nan, or stale). Returning None.")
return None
else:
# The optimizer owns only references to master params.
master_grads_data = []
for param_group in self.optimizer.param_groups:
master_grads_this_group = []
for param in param_group['params']:
if param.grad is not None:
master_grads_this_group.append(param.grad.data)
else:
master_grads_this_group.append(None)
master_grads_data.append(master_grads_this_group)
return master_grads_data
# Promote loss scale so it can be retrieved or set via "fp16_optimizer_instance.loss_scale"
def _get_loss_scale(self):
return self.loss_scaler.loss_scale
def _set_loss_scale(self, value):
self.loss_scaler.cur_scale = value
loss_scale = property(_get_loss_scale, _set_loss_scale)
# Promote state so it can be retrieved or set via "fp16_optimizer_instance.state"
def _get_state(self):
return self.optimizer.state
def _set_state(self, value):
self.optimizer.state = value
state = property(_get_state, _set_state)
# Promote param_groups so it can be retrieved or set via "fp16_optimizer_instance.param_groups"
# (for example, to adjust the learning rate)
def _get_param_groups(self):
return self.optimizer.param_groups
def _set_param_groups(self, value):
self.optimizer.param_groups = value
param_groups = property(_get_param_groups, _set_param_groups)
| 31,831 | 49.28752 | 437 | py |
capacity-approaching-autoencoders | capacity-approaching-autoencoders-master/gammaDIME.py | from keras import backend as K
# gamma-DIME loss
def gamma_dime_loss(args):
# define the parameter gamma
gamma = 1
t_xy = args[0]
t_xy_bar = args[1]
loss = -(gamma*K.mean(K.log(t_xy)) - K.mean(K.pow(t_xy_bar, gamma))+1)
return loss | 256 | 24.7 | 74 | py |
capacity-approaching-autoencoders | capacity-approaching-autoencoders-master/Capacity-Approaching_AE.py | from __future__ import absolute_import, division, print_function, unicode_literals
from keras.layers import Input, Dense, GaussianNoise, Concatenate, Lambda, Reshape, Flatten, Dropout
from keras.layers import BatchNormalization, Activation, ZeroPadding2D
from keras.models import Sequential, Model, load_model
from keras.optimizers import Adam
from keras import backend as K
from uniform_noise import UniformNoise
from gammaDIME import gamma_dime_loss
import os
import argparse
K.clear_session()
import scipy.io as sio
import numpy as np
import tensorflow as tf
from numpy.random import seed
seed(1)
from tensorflow import set_random_seed
set_random_seed(2)
# custom cross-entropy to allow gradient separation and label smoothing regularization
def categorical_crossentropy(y_true, y_pred, from_logits=False, label_smoothing=0.2):
y_pred = K.constant(y_pred) if not K.is_tensor(y_pred) else y_pred
y_true = K.cast(y_true, y_pred.dtype)
if label_smoothing is not 0:
smoothing = K.cast_to_floatx(label_smoothing)
def _smooth_labels():
num_classes = K.cast(K.shape(y_true)[1], y_pred.dtype)
return y_true * (1.0 - smoothing) + (smoothing / num_classes)
y_true = K.switch(K.greater(smoothing, 0), _smooth_labels, lambda: y_true)
return K.categorical_crossentropy(y_true, y_pred, from_logits=from_logits)
# MINE loss
def mine_loss(args):
t_xy = args[0]
t_xy_bar = args[1]
loss = -(K.mean(t_xy) - K.logsumexp(t_xy_bar) + K.log(tf.cast(K.shape(t_xy)[0], tf.float32)))
return loss
# mixed loss
def customLoss(MI):
def dice(yTrue, yPred):
beta = 0.2 # BETA PARAMETER IN THE PAPER, to choose at the beginning
return categorical_crossentropy(yTrue, yPred) - beta*MI
return dice
# shuffling for MINE input
def data_generation_mi(data_x, data_y):
data_xy = np.hstack((data_x, data_y))
data_y_shuffle = np.take(data_y, np.random.permutation(data_y.shape[0]), axis=0, out=data_y)
data_x_y = np.hstack((data_x, data_y_shuffle))
return data_xy, data_x_y
# BLER computation
def compute_BER(x,y):
sh = x.shape
p = sh[0]
ber = 0
for i in range(p):
if np.argmax(y[i,:]) != np.argmax(x[i,:]):
ber = ber +1
return ber/p
# get a "l2 norm of gradients" tensor
def get_gradient_norm(model):
with K.name_scope('gradient_norm'):
grads = K.gradients(model.total_loss, model.trainable_weights)
norm = K.sqrt(sum([K.sum(K.square(g)) for g in grads]))
return norm
# shuffling tensor
def shuffleColumns(x):
return tf.gather(x, tf.random.shuffle(tf.range(tf.shape(x)[0])))
# for parser
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
class CAAE():
def __init__(self, encoder_path, decoder_path, output_directory, MI_type='MINE'):
block_size = 5 # k parameter
num_enc_inputs = pow(2, block_size) # M parameter = 2^k
num_hidden = 2 # 2*N parameter
R = block_size / num_hidden # rate
EbN0dB = 7 # training parameter
N = pow(10, -0.1 * EbN0dB) / (2 * R) # training variance
K.set_learning_phase(1) # set flag to allow noise layer during training
self.encoder_path = encoder_path
self.decoder_path = decoder_path
self.output_directory = output_directory
# if model is already available, load it
if os.path.exists(self.encoder_path) and os.path.exists(self.decoder_path):
self.encoder = load_model(self.encoder_path)
self.decoder = load_model(self.decoder_path)
print("Loaded models...")
else:
# input shape
self.num_enc_inputs = num_enc_inputs
self.num_hidden = num_hidden
self.num_joint = 2*num_hidden
# set optimizers
optimizer = Adam(0.01, 0.5)
optimizer_MI = Adam(0.01, 0.5)
optimizer_MI_est = Adam(0.01, 0.5)
# build the transmitter
self.transmitter = self.build_transmitter()
# build the receiver
self.receiver = self.build_receiver()
# build the mutual information block estimator
self.discriminator = self.build_discriminator(MI_type)
# the transmitter encodes the bits in s
s_in = Input(shape=(self.num_enc_inputs,))
x = self.transmitter(s_in)
# build the channel
#x_n = Lambda(lambda x: np.sqrt(num_hidden)*K.l2_normalize(x,axis=1))(x) # if CONSTANT POWER
x_n = BatchNormalization(axis=-1, center=False, scale=False)(x) # if AVERAGE POWER, use batch normalization
ch = Lambda(lambda x: x)(x_n) # delta channel, you can set up different channels in a different function, e.g., Rayleigh
y = GaussianNoise(np.sqrt(N))(ch) # gaussian layer
#y = UniformNoise(minval=-np.sqrt(3*N), maxval=np.sqrt(3*N)(x_n) # uniform noise layer
# receiver connection
s_out = self.receiver(y)
# build the keras model
self.encoder = Model(s_in,x_n)
# set up the tensors for the mutual information block
T1 = Concatenate(name='network/concatenate_layer_1')([x_n, y])
y_bar_input = Lambda(lambda x:shuffleColumns(x))(y) # shuffle y input as y_bar
T2 = Concatenate(name='network/concatenate_layer_2')([x_n, y_bar_input])
# estimation of joint and marginals
t_xy = self.discriminator(T1)
t_x_y = self.discriminator(T2)
if MI_type == 'gammaDIME':
loss = Lambda(gamma_dime_loss, name='gamma_dime_loss')([t_xy, t_x_y])
else:
# use MINE is default
loss = Lambda(mine_loss, name='mine_loss')([t_xy, t_x_y])
output_MI = Lambda(lambda x: -x)(loss)
self.loss_model = Model(s_in, output_MI)
self.loss_model.add_loss(loss)
self.loss_model.compile(optimizer=optimizer_MI)
# decoder model
y_in = Input(shape=(self.num_hidden,))
s_dec = self.receiver(y_in)
self.decoder = Model(y_in,s_dec)
# combined model
self.combined = Model(s_in, s_out)
loss_model_2 = customLoss(output_MI)
self.combined.compile(loss=loss_model_2, optimizer=optimizer)
# model for the estimation of MI at different SNRs
T1_est = Input(shape=(self.num_joint,))
T2_est = Input(shape=(self.num_joint,))
t_xy = self.discriminator(T1_est)
t_x_y = self.discriminator(T2_est)
if MI_type == 'gammaDIME':
loss_est = Lambda(gamma_dime_loss, name='gamma_dime_loss')([t_xy, t_x_y])
else:
loss_est = Lambda(mine_loss, name='mine_loss')([t_xy, t_x_y])
output_MI_est = Lambda(lambda x: -x)(loss_est)
self.loss_model_est = Model([T1_est, T2_est], output_MI_est)
self.loss_model_est.add_loss(loss_est)
self.loss_model_est.compile(optimizer=optimizer_MI_est)
def build_transmitter(self):
model = Sequential()
model.add(Dense(self.num_enc_inputs, activation="relu", input_dim=self.num_enc_inputs))
# model.add(Dense(100)) if high rate
model.add(Dense(self.num_hidden))
model.summary()
s_in = Input(shape=(self.num_enc_inputs,))
x = model(s_in)
return Model(s_in, x)
def build_receiver(self):
model = Sequential()
model.add(Dense(self.num_enc_inputs, activation="relu", input_dim=self.num_hidden))
model.add(Dense(self.num_enc_inputs, activation='softmax'))
model.summary()
y = Input(shape=(self.num_hidden,))
s_out = model(y)
return Model(y, s_out)
def build_discriminator(self, MI_type):
if MI_type == 'gammaDIME':
activation_type = 'softplus'
else:
activation_type = 'linear'
model = Sequential()
model.add(Dense(200, activation="relu", input_dim=self.num_joint))
model.add(GaussianNoise(0.3)) # It works only during the training
model.add(Dense(200, activation="sigmoid"))
model.add(Dense(1, activation = activation_type))
model.summary()
T = Input(shape=(self.num_joint,))
MI = model(T)
return Model(T,MI)
def train(self, epochs, batch_size_AE, batch_size_MI, k , n):
block_size = k # k parameter
num_enc_inputs = pow(2, block_size) # M parameter = 2^k
p = 1000 # realizations
alphabet = np.eye(num_enc_inputs, dtype='float32') # one-hot encoded values
s_in = np.transpose(np.tile(alphabet, p))
m = n # to avoid confusion with noise
R = block_size / m # (half) rate
EbN0dB = 7
N = pow(10, -0.1 * EbN0dB) / (2 * R) # noise power
MI = np.zeros((1,epochs))
epochs_AE = [10,100,1000] # training epochs for the autoencoder block
epochs_MI = [10,100,100] # training epochs for the MINE block
for e in range(3):
for ij in range(epochs):
# training the MI estimator
for epoch in range(epochs_MI[e]):
idx = np.random.randint(0, s_in.shape[0], batch_size_MI)
s_in_batch = s_in[idx]
loss_MI = self.loss_model.train_on_batch(s_in_batch,[])
# Plot the progress. if needed
# print("%d [MI loss: %f" % (epoch, loss_MI))
mutual_information = self.loss_model.predict(s_in)
MI[0,ij] = np.mean(mutual_information)
# traning auto encoder ON BATCH
loss_AE = np.zeros((epochs_AE[e],))
for epoch in range(epochs_AE[e]):
idx = np.random.randint(0, s_in.shape[0], batch_size_AE)
s_in_batch = s_in[idx]
loss_AE[epoch] = self.combined.train_on_batch(s_in_batch, s_in_batch)
print("%d [CAAE_loss: %f, MI_loss: %f]" % (ij, np.mean(loss_AE), MI[0,ij])) # median for a stable output
# if you want to visualize the constellation and the BLER during the training progress on Matlab
EbN0_dB = range(-14, 29)
ber = np.zeros((43,))
j = 0
p_test = 1000
s_in_t = np.transpose(np.tile(alphabet, p_test))
for EbN0 in EbN0_dB:
N = pow(10, -0.1 * EbN0) / (2 * R)
mean_t = np.zeros((m,))
cov_t = np.dot(N, np.eye(m))
features_r = self.encoder.predict(s_in_t) # get the code
noise = np.random.multivariate_normal(mean_t, cov_t, p_test * num_enc_inputs) # add channel influence
features_n = np.add(features_r, noise) # received code
s_out_t = self.decoder.predict(features_n) # decoded message
ber[j] = compute_BER(s_in_t,s_out_t)
j = j + 1
sio.savemat('BER_%d.mat'%e,{'ber': ber})
features_r = self.encoder.predict(s_in_t)
sio.savemat('code_%d.mat'%e, {'code': features_r})
# save the model
save_path = self.output_directory + "/Models_AE"
if not os.path.exists(save_path):
os.makedirs(save_path)
self.encoder.save(save_path + "/encoder.h5")
self.decoder.save(save_path + "/decoder.h5")
def train_MI(self, epochs_MI, batch_size_MI, k, n):
block_size = k # k parameter
num_enc_inputs = pow(2, block_size) # M parameter = 2^k
m = n # N parameter
R = block_size/m
p = 10000
alphabet = np.eye(num_enc_inputs, dtype='float32') # One-hot encoded values
s_in = np.transpose(np.tile(alphabet, p))
EbN0_dB = range(-14, 19)
j = 0
MI = np.zeros((1,33))
for EbN0 in EbN0_dB:
N = pow(10, -0.1 * EbN0)/ (2 * R)
mean_t = np.zeros((m,))
cov_t = np.dot(N, np.eye(m))
features_r = self.encoder.predict(s_in)
noise = np.random.multivariate_normal(mean_t,cov_t,p*num_enc_inputs)
features_n = np.add(features_r,noise)
# training the MI estimator
data_xy, data_x_y = data_generation_mi(features_r, features_n)
for epoch in range(epochs_MI):
idx = np.random.randint(0, s_in.shape[0], batch_size_MI)
data_xy_batch = data_xy[idx]
data_x_y_batch = data_x_y[idx]
self.loss_model_est.train_on_batch([data_xy_batch, data_x_y_batch],[])
mutual_information = self.loss_model_est.predict([data_xy, data_x_y])
MI[0,j] = np.mean(mutual_information)
print(MI[0,j])
j = j+1
# save the estimated mutual information for Matlab
sio.savemat('MI_estimation.mat', {'Eb':EbN0_dB, 'MI': MI})
def test(self, batch_size_AE, k, n):
block_size = k # k parameter
num_enc_inputs = pow(2, block_size) # M parameter = 2^k
alphabet = np.eye(num_enc_inputs, dtype='float32') # One-hot encoded values
m = n
R = block_size / m
p_test = batch_size_AE
s_in_t = np.transpose(np.tile(alphabet, p_test))
EbN0_dB = range(-14, 19)
ber = np.zeros((33,))
j = 0
for EbN0 in EbN0_dB:
N = pow(10, -0.1 * EbN0) / (2 * R)
mean_t = np.zeros((m,))
cov_t = np.dot(N, np.eye(m))
features_r = self.encoder.predict(s_in_t)
noise = np.random.multivariate_normal(mean_t, cov_t, p_test * num_enc_inputs)
features_n = np.add(features_r, noise)
s_out_t = self.decoder.predict(features_n)
ber[j] = compute_BER(s_in_t, s_out_t)
j = j + 1
sio.savemat('BLER_test.mat', {'ber': ber})
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--load_encoder', help='Path to existing generator1 weights file',
default="Models_AE/encoder.h5")
parser.add_argument('--load_decoder', help='Path to existing discriminator1 weights file',
default="Models_AE/decoder.h5")
parser.add_argument('--output_directory', help="Directoy to save weights and images to.",
default="Output")
parser.add_argument('--train', type=str2bool, help="Start the training process.",
default=False)
parser.add_argument('--MI_type', help="Select the type of estimator to use.",
default="MINE")
args = parser.parse_args()
# Load the model
CAAE = CAAE(args.load_encoder, args.load_decoder, args.output_directory, args.MI_type)
# Check if the folder with the encoder is empty and the flag training is on
if not(os.path.exists(args.load_encoder)) and args.train:
print('Training the model')
CAAE.train(epochs=10, batch_size_AE = 1000, batch_size_MI = 1000, k = 5, n = 2)
CAAE.train_MI(epochs_MI=100, batch_size_MI=1000, k = 5, n = 2)
CAAE.test(batch_size_AE = 4000, k = 5, n = 2)
# Just testing
elif os.path.exists(args.load_encoder):
print('Testing using the loaded models in your folder. If you want to train the model, delete the models')
CAAE.test(batch_size_AE = 4000, k = 5, n = 2)
else:
print('Error in the models loading, please check the correct path')
| 15,913 | 37.626214 | 132 | py |
capacity-approaching-autoencoders | capacity-approaching-autoencoders-master/uniform_noise.py | from keras.engine import Layer
from keras import backend as K
class UniformNoise(Layer):
"""Apply additive uniform noise
Only active at training time since it is a regularization layer.
# Arguments
minval: Minimum value of the uniform distribution
maxval: Maximum value of the uniform distribution
# Input shape
Arbitrary.
# Output shape
Same as the input shape.
"""
def __init__(self, minval=-1.0, maxval=1.0, **kwargs):
super(UniformNoise, self).__init__(**kwargs)
self.supports_masking = True
self.minval = minval
self.maxval = maxval
def call(self, inputs, training=None):
def noised():
return inputs + K.random_uniform(shape=K.shape(inputs),
minval=self.minval,
maxval=self.maxval)
return K.in_train_phase(noised, inputs, training=training)
def get_config(self):
config = {'minval': self.minval, 'maxval': self.maxval}
base_config = super(UniformNoise, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| 1,190 | 30.342105 | 69 | py |
vermouth-martinize | vermouth-martinize-master/doc/source/conf.py | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/stable/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
from pkg_resources import get_distribution
import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# Do not generate APIdocs for members missing docstrings (undoc-members)
os.environ['APIDOC_OPTIONS'] = 'members,show-inheritence,inherited-members'
# Set APIDOC options
#os.environ['SPHINX_APIDOC_OPTIONS'] = 'members,undoc-members,show-inheritance,special-members'
os.environ['SPHINX_APIDOC_OPTIONS'] = 'members'
# -- Project information -----------------------------------------------------
project = 'VerMoUTH'
copyright = '2018, University of Groningen'
author = 'Peter C Kroon, Jonathan Barnoud, Tsjerk A Wassenaar, Siewert-Jan Marrink'
# The full version, including alpha/beta/rc tags
release = get_distribution('vermouth').version
# The short X.Y version
version = '.'.join(release.split('.')[:2])
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
'sphinx.ext.autosectionlabel',
'sphinxcontrib.apidoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
nitpick_ignore = [
('py:class', 'networkx.algorithms.isomorphism.isomorphvf2.GraphMatcher'),
('py:class', 'optional'),
]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'VerMoUTHdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'VerMoUTH.tex', 'VerMoUTH Documentation',
author, 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'vermouth', 'VerMoUTH Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'VerMoUTH', 'VerMoUTH Documentation',
author, 'VerMoUTH', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
apidoc_module_dir = '../../vermouth'
apidoc_output_dir = 'api'
apidoc_separate_modules = True
apidoc_excluded_paths = ['tests', 'redistributed']
autodoc_inherit_docstrings = False
autoclass_content = 'both'
autodoc_default_options = {'members': None,
'undoc-members': None,
'show-inheritance': None}
napoleon_google_docstring = False
napoleon_numpy_docstring = True
napoleon_preprocess_types = False
napoleon_type_aliases = {
'Molecule': 'vermouth.molecule.Molecule',
}
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org', None),
'networkx': ('https://networkx.github.io/documentation/latest', None),
'numpy': ('http://docs.scipy.org/doc/numpy', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference', None),
}
# Borrowed from https://github.com/sphinx-doc/sphinx/issues/5603
# On top of that, networkx.isomorphism.GraphMatcher is not documented, so link
# to the VF2 isomorphism module instead.
# See https://github.com/networkx/networkx/issues/3239
intersphinx_aliases = {
#('py:class', 'networkx.classes.graph.Graph'): ('py:class', 'networkx.Graph'),
#('py:class', 'networkx.algorithms.isomorphism.vf2userfunc.GraphMatcher'): ('py:class', 'networkx.isomorphism.GraphMatcher'),
#('py:class', 'networkx.algorithms.isomorphism.vf2userfunc.GraphMatcher'): ('py:module','networkx.algorithms.isomorphism.isomorphvf2'),
('py:class', 'networkx.isomorphism.GraphMatcher'): ('py:module', 'networkx.algorithms.isomorphism.isomorphvf2')
}
autosectionlabel_prefix_document = True
def add_intersphinx_aliases_to_inv(app):
from sphinx.ext.intersphinx import InventoryAdapter
inventories = InventoryAdapter(app.builder.env)
for alias, target in app.config.intersphinx_aliases.items():
alias_domain, alias_name = alias
target_domain, target_name = target
try:
found = inventories.main_inventory[target_domain][target_name]
try:
inventories.main_inventory[alias_domain][alias_name] = found
except KeyError:
continue
except KeyError:
continue
def setup(app):
app.add_config_value('intersphinx_aliases', {}, 'env')
app.connect('builder-inited', add_intersphinx_aliases_to_inv)
| 8,080 | 32.953782 | 139 | py |
Memristive-Seizure-Detection-and-Prediction-by-Parallel-Convolutional-Neural-Networks | Memristive-Seizure-Detection-and-Prediction-by-Parallel-Convolutional-Neural-Networks-master/network_training/SWEC_ETHZ.py | import os
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, Dataset
from torchvision import datasets, transforms
from torchsummary import summary
import torch.nn.functional as F
from sklearn.model_selection import KFold
from sklearn import preprocessing
import matplotlib.pyplot as plt
import math
from sklearn.decomposition import PCA
import sklearn
import brevitas.nn as qnn
import pandas as pd
from Network import ParallelConvolution
from utils import foldretrieve
# -----------------------------------------------------------
rootPath = '/scratch/jcu/cl/TBioCAS/processed_data/'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
seed = 8
patients = ['01', '02', '03', '05', '06']
dataset = 'SWEC_ETHZ'
dataType = 'features'
numFold = 5
bS = 32
num_epoch = 100
network_size = 32
# -----------------------------------------------------------
print('Using {} device'.format(device),'\n')
### Data Loading (preictal 0, interictal 1)
torch.manual_seed(seed)
np.random.seed(seed)
torch.backends.cudnn.benchmark = False
torch.use_deterministic_algorithms(True)
torch.backends.cudnn.deterministic = True
os.environ['PYTHONHASHSEED'] = str(seed)
df = pd.DataFrame(columns=['Patient', 'Fold', 'Epoch', 'Loss', 'Train Accuracy', 'Test Accuracy'])
for patient in patients:
allData = np.empty((0, 176))
rawLabel = np.empty((0,), dtype=int)
newData = np.load(rootPath+dataType+'/'+dataset+'_'+'patient'+'_'+patient+'_'+'synthetic_preictal.npy')
allData = np.append(allData,newData,axis=0)
rawLabel = np.append(rawLabel,np.zeros((newData.shape[0],),dtype=int),axis=0)
newData = np.load(rootPath+dataType+'/'+dataset+'_'+'patient'+'_'+patient+'_'+'preictal.npy')
allData = np.append(allData,newData,axis=0)
rawLabel = np.append(rawLabel,np.zeros((newData.shape[0],),dtype=int),axis=0)
newData = np.load(rootPath+dataType+'/'+dataset+'_'+'patient'+'_'+patient+'_'+'interictal.npy')
allData = np.append(allData,newData,axis=0)
rawLabel = np.append(rawLabel,np.ones((newData.shape[0],),dtype=int),axis=0)
allLabel = np.zeros((rawLabel.size, rawLabel.max()+1))
allLabel[np.arange(rawLabel.size),rawLabel] = 1
pca = PCA(n_components=64)
allData = pca.fit_transform(allData)
inputbits=6
inputstep=(np.amax(allData) - np.amin(allData))/(2**inputbits-1)
allData = np.round(allData/inputstep)
allData *= inputstep
randInd = np.arange(0,len(allData))
np.random.shuffle(randInd)
allData = allData[randInd]
allLabel = allLabel[randInd]
allData = allData[:math.floor(allData.shape[0]/5)*5]
allLabel = allLabel[:math.floor(allLabel.shape[0]/5)*5]
foldsData = np.split(allData,numFold)
foldsLabel = np.split(allLabel,numFold)
loss_function = nn.BCEWithLogitsLoss()
print('--------------------------------')
print(f'Patient {patient}')
for fold in range(0,numFold):
trainData, testData, trainLabel, testLabel = foldretrieve(fold, foldsData, foldsLabel)
print(f'Fold Number {fold}')
network = ParallelConvolution(size=network_size).to(device)
optimizer = torch.optim.Adam(network.parameters())
lossHist = []
testAccHist = []
trainAccHist = []
bestAcc = 0
bestSen = 0
bestSpe = 0
bestFPC = 1e10
bestAUR = 0
bestEpoch = [0] * 5
for epoch in range(num_epoch):
trainCorrect = 0
trainTotal = 0
testCorrect = 0
testTotal = 0
trainOutputs = np.empty((0,1))
trainLabels = np.empty((0,1))
trainCM = np.zeros((2, 2))
testOutputs = np.empty((0,1))
testLabels = np.empty((0,1))
testCM = np.zeros((2, 2))
for i in range(0,len(trainData),bS):
data = trainData[i:i+bS].to(device,dtype=torch.float)
label = trainLabel[i:i+bS].to(device,dtype=torch.float)
# Zero the gradients
optimizer.zero_grad()
network.eval()
# Perform forward pass
outputs = network(data)
network.train()
# Compute loss
loss = loss_function(outputs, label)
# Perform backward pass
loss.backward()
# Perform optimization
optimizer.step()
network.eval()
outputs = outputs.cpu().detach().numpy()
label = label.cpu().detach().numpy()
outputs = np.argmax(outputs,axis=1)
label = np.argmax(label,axis=1)
trainCM = np.add(trainCM, sklearn.metrics.confusion_matrix(outputs, label, labels=[0, 1]))
trainCorrect += np.sum(outputs==label)
trainTotal += len(outputs)
trainOutputs = np.append(trainOutputs,outputs.reshape((-1,1)),axis=0)
trainLabels = np.append(trainLabels,label.reshape((-1,1)),axis=0)
for i in range(0,len(testData),bS):
data = testData[i:i+bS].to(device,dtype=torch.float)
label = testLabel[i:i+bS].to(device,dtype=torch.float)
network.eval()
# Perform forward pass
outputs = network(data)
# Caclulate statistics
testCorrect += np.sum(np.argmax(outputs.cpu().detach().numpy(),axis=1)==np.argmax(label.cpu().detach().numpy(),axis=1))
testTotal += len(outputs)
outputs = outputs.cpu().detach().numpy()
label = label.cpu().detach().numpy()
outputs = np.argmax(outputs,axis=1)
label = np.argmax(label,axis=1)
testCM = np.add(testCM, sklearn.metrics.confusion_matrix(outputs, label, labels=[0, 1]))
testCorrect += np.sum(outputs==label)
testTotal += len(outputs)
testOutputs = np.append(testOutputs,outputs.reshape((-1,1)),axis=0)
testLabels = np.append(testLabels,label.reshape((-1,1)),axis=0)
if testCorrect/testTotal > bestAcc:
torch.save(network.state_dict(),'SWEC_ETHZ_P{}F{}.pt'.format(patient,fold))
trainF1 = sklearn.metrics.f1_score(trainLabels,trainOutputs)
trainAccuracy = trainCorrect/trainTotal
trainTN, trainFP, trainFN, trainTP = trainCM.ravel()
trainSensitivity = trainTP / (trainTP+trainFN)
trainSpecitivity = trainTN / (trainTN+trainFP)
try:
trainAUROC = sklearn.metrics.roc_auc_score(trainLabels, trainOutputs)
except:
trainAUROC = 0.
testF1 = sklearn.metrics.f1_score(testLabels,testOutputs)
testAccuracy = testCorrect/testTotal
testTN, testFP, testFN, testTP = testCM.ravel()
testSensitivity = testTP / (testTP+testFN)
testSpecitivity = testTN / (testTN+testFP)
try:
testAUROC = sklearn.metrics.roc_auc_score(testLabels, testOutputs)
except:
testAUROC = 0.
if testAccuracy > bestAcc:
bestAcc = testAccuracy
bestEpoch[0] = epoch
if testSensitivity > bestSen:
bestSen = testSensitivity
bestEpoch[1] = epoch
if testSpecitivity > bestSpe:
bestSpe = testSpecitivity
bestEpoch[2] = epoch
if testFP < bestFPC:
bestFPC = testFP
bestEpoch[3] = epoch
if testAUROC > bestAUR:
bestAUR = testAUROC
bestEpoch[4] = epoch
print('Epoch:', epoch, 'Train Loss:', loss.item())
print('Train Accuracy:', trainAccuracy, 'Train Sensitivity:', trainSensitivity, 'Train Specitivity:', trainSpecitivity, 'Train FP Count:', trainFP, 'Train AUROC:', trainAUROC)
print('Test Accuracy:', testAccuracy, 'Test Sensitivity:', testSensitivity, 'Test Specitivity:', testSpecitivity, 'Test FP Count:', testFP, 'Test AUROC:', testAUROC,)
print('\n')
lossHist.append(loss)
trainAccHist.append(trainCorrect/trainTotal)
testAccHist.append(testCorrect/testTotal)
df = df.append({'Patient': patient, 'Fold': fold, 'Epoch': epoch, 'Loss': loss.cpu().item(), 'Train Accuracy': trainAccuracy * 100., 'Test Accuracy': testAccuracy * 100.}, ignore_index=True)
df.to_csv('SWEC_ETHZ.csv', index=False) | 8,593 | 41.756219 | 202 | py |
Memristive-Seizure-Detection-and-Prediction-by-Parallel-Convolutional-Neural-Networks | Memristive-Seizure-Detection-and-Prediction-by-Parallel-Convolutional-Neural-Networks-master/network_training/utils.py | import torch
import numpy as np
def foldretrieve(fold,foldsData,foldsLabel):
testData = foldsData[fold]
testLabel = foldsLabel[fold]
allData = foldsData[0:fold]+foldsData[fold:-1]
allLabel = foldsLabel[0:fold]+foldsLabel[fold:-1]
try:
trainData = np.concatenate([*allData])
except:
trainData = allData
try:
trainLabel = np.concatenate([*allLabel])
except:
trainLabel = allLabel
trainData = np.expand_dims(trainData,1)
testData = np.expand_dims(testData,1)
trainData = torch.tensor(trainData)
testData = torch.tensor(testData)
trainLabel = torch.tensor(trainLabel)
testLabel = torch.tensor(testLabel)
return trainData,testData,trainLabel,testLabel | 743 | 26.555556 | 53 | py |
Memristive-Seizure-Detection-and-Prediction-by-Parallel-Convolutional-Neural-Networks | Memristive-Seizure-Detection-and-Prediction-by-Parallel-Convolutional-Neural-Networks-master/network_training/Network.py | import torch
from torch import nn
import torch.nn.functional as F
import brevitas.nn as qnn
### Network Definition
class ParallelConvolution(nn.Module):
def __init__(self, size=32):
super(ParallelConvolution, self).__init__()
self.conv1 = qnn.QuantConv1d(1,32,size,weight_bit_width=6)
self.conv2 = qnn.QuantConv1d(1,32,62-size,weight_bit_width=6)
self.avgpool = nn.AvgPool1d(2)
self.flatten = nn.Flatten()
self.dropout = nn.Dropout(0.5)
self.linear = qnn.QuantLinear(1088,8,bias=True,weight_bit_width=4)
self.classification = qnn.QuantLinear(8,2,bias=True,weight_bit_width=4)
def forward(self,x):
out1 = F.relu(self.conv1(x))
out2 = F.relu(self.conv2(x))
x = torch.cat((out1, out2),dim=2)
x = self.avgpool(x)
x = self.flatten(x)
x = self.dropout(x)
x = self.linear(x)
out = self.classification(x)
return out | 954 | 33.107143 | 79 | py |
Memristive-Seizure-Detection-and-Prediction-by-Parallel-Convolutional-Neural-Networks | Memristive-Seizure-Detection-and-Prediction-by-Parallel-Convolutional-Neural-Networks-master/network_training/CHBMIT.py | import os
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, Dataset
from torchvision import datasets, transforms
from torchsummary import summary
import torch.nn.functional as F
from sklearn.model_selection import KFold
from sklearn import preprocessing
import matplotlib.pyplot as plt
import math
from sklearn.decomposition import PCA
import sklearn
import brevitas.nn as qnn
import pandas as pd
from Network import ParallelConvolution
from utils import foldretrieve
# -----------------------------------------------------------
rootPath = '/scratch/jcu/cl/TBioCAS/processed_data/'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
seed = 8
patients = ['01', '02', '03', '05', '08']
dataset = 'CHBMIT'
dataType = 'features'
numFold = 5
bS = 32
num_epoch = 100
network_size = 32
# -----------------------------------------------------------
print('Using {} device'.format(device),'\n')
### Data Loading (preictal 0, interictal 1)
torch.manual_seed(seed)
np.random.seed(seed)
torch.backends.cudnn.benchmark = False
torch.use_deterministic_algorithms(True)
torch.backends.cudnn.deterministic = True
os.environ['PYTHONHASHSEED'] = str(seed)
df = pd.DataFrame(columns=['Patient', 'Fold', 'Epoch', 'Loss', 'Train Accuracy', 'Test Accuracy'])
for patient in patients:
allData = np.empty((0, 176))
rawLabel = np.empty((0,), dtype=int)
newData = np.load(rootPath+dataType+'/'+dataset+'_'+'patient'+'_'+patient+'_'+'synthetic_preictal.npy')
allData = np.append(allData,newData,axis=0)
rawLabel = np.append(rawLabel,np.zeros((newData.shape[0],),dtype=int),axis=0)
newData = np.load(rootPath+dataType+'/'+dataset+'_'+'patient'+'_'+patient+'_'+'preictal.npy')
allData = np.append(allData,newData,axis=0)
rawLabel = np.append(rawLabel,np.zeros((newData.shape[0],),dtype=int),axis=0)
newData = np.load(rootPath+dataType+'/'+dataset+'_'+'patient'+'_'+patient+'_'+'interictal.npy')
allData = np.append(allData,newData,axis=0)
rawLabel = np.append(rawLabel,np.ones((newData.shape[0],),dtype=int),axis=0)
allLabel = np.zeros((rawLabel.size, rawLabel.max()+1))
allLabel[np.arange(rawLabel.size),rawLabel] = 1
pca = PCA(n_components=64)
allData = pca.fit_transform(allData)
inputbits=6
inputstep=(np.amax(allData) - np.amin(allData))/(2**inputbits-1)
allData = np.round(allData/inputstep)
allData *= inputstep
randInd = np.arange(0,len(allData))
np.random.shuffle(randInd)
allData = allData[randInd]
allLabel = allLabel[randInd]
allData = allData[:math.floor(allData.shape[0]/5)*5]
allLabel = allLabel[:math.floor(allLabel.shape[0]/5)*5]
foldsData = np.split(allData,numFold)
foldsLabel = np.split(allLabel,numFold)
loss_function = nn.BCEWithLogitsLoss()
print('--------------------------------')
print(f'Patient {patient}')
for fold in range(0,numFold):
trainData, testData, trainLabel, testLabel = foldretrieve(fold, foldsData, foldsLabel)
print(f'Fold Number {fold}')
network = ParallelConvolution(size=network_size).to(device, dtype=torch.float)
optimizer = torch.optim.Adam(network.parameters())
lossHist = []
testAccHist = []
trainAccHist = []
bestAcc = 0
bestSen = 0
bestSpe = 0
bestFPC = 1e10
bestAUR = 0
bestEpoch = [0, 0, 0, 0, 0]
for epoch in range(num_epoch):
trainCorrect = 0
trainTotal = 0
testCorrect = 0
testTotal = 0
trainOutputs = np.empty((0,1))
trainLabels = np.empty((0,1))
trainCM = np.zeros((2, 2))
testOutputs = np.empty((0,1))
testLabels = np.empty((0,1))
testCM = np.zeros((2, 2))
for i in range(0,len(trainData),bS):
data = trainData[i:i+bS].to(device,dtype=torch.float)
label = trainLabel[i:i+bS].to(device,dtype=torch.float)
# Zero the gradients
optimizer.zero_grad()
network.eval()
# Perform forward pass
outputs = network(data)
network.train()
# Compute loss
loss = loss_function(outputs, label)
# Perform backward pass
loss.backward()
# Perform optimization
optimizer.step()
network.eval()
outputs = outputs.cpu().detach().numpy()
label = label.cpu().detach().numpy()
outputs = np.argmax(outputs,axis=1)
label = np.argmax(label,axis=1)
trainCM = np.add(trainCM, sklearn.metrics.confusion_matrix(outputs, label, labels=[0, 1]))
trainCorrect += np.sum(outputs==label)
trainTotal += len(outputs)
trainOutputs = np.append(trainOutputs,outputs.reshape((-1,1)),axis=0)
trainLabels = np.append(trainLabels,label.reshape((-1,1)),axis=0)
for i in range(0,len(testData),bS):
data = testData[i:i+bS].to(device,dtype=torch.float)
label = testLabel[i:i+bS].to(device,dtype=torch.float)
network.eval()
# Perform forward pass
outputs = network(data)
# Caclulate statistics
testCorrect += np.sum(np.argmax(outputs.cpu().detach().numpy(),axis=1)==np.argmax(label.cpu().detach().numpy(),axis=1))
testTotal += len(outputs)
outputs = outputs.cpu().detach().numpy()
label = label.cpu().detach().numpy()
outputs = np.argmax(outputs,axis=1)
label = np.argmax(label,axis=1)
testCM = np.add(testCM, sklearn.metrics.confusion_matrix(outputs, label, labels=[0, 1]))
testCorrect += np.sum(outputs==label)
testTotal += len(outputs)
testOutputs = np.append(testOutputs,outputs.reshape((-1,1)),axis=0)
testLabels = np.append(testLabels,label.reshape((-1,1)),axis=0)
if testCorrect/testTotal > bestAcc:
torch.save(network.state_dict(),'CHBMIT_P{}F{}.pt'.format(patient,fold))
trainF1 = sklearn.metrics.f1_score(trainLabels,trainOutputs)
trainAccuracy = trainCorrect/trainTotal
trainTN, trainFP, trainFN, trainTP = trainCM.ravel()
trainSensitivity = trainTP / (trainTP+trainFN)
trainSpecitivity = trainTN / (trainTN+trainFP)
try:
trainAUROC = sklearn.metrics.roc_auc_score(trainLabels, trainOutputs)
except:
trainAUROC = 0.
testF1 = sklearn.metrics.f1_score(testLabels,testOutputs)
testAccuracy = testCorrect/testTotal
testTN, testFP, testFN, testTP = testCM.ravel()
testSensitivity = testTP / (testTP+testFN)
testSpecitivity = testTN / (testTN+testFP)
try:
testAUROC = sklearn.metrics.roc_auc_score(testLabels, testOutputs)
except:
testAUROC = 0.
if testAccuracy > bestAcc:
bestAcc = testAccuracy
bestEpoch[0] = epoch
if testSensitivity > bestSen:
bestSen = testSensitivity
bestEpoch[1] = epoch
if testSpecitivity > bestSpe:
bestSpe = testSpecitivity
bestEpoch[2] = epoch
if testFP < bestFPC:
bestFPC = testFP
bestEpoch[3] = epoch
if testAUROC > bestAUR:
bestAUR = testAUROC
bestEpoch[4] = epoch
print('Epoch:', epoch, 'Train Loss:', loss.item())
print('Train Accuracy:', trainAccuracy, 'Train Sensitivity:', trainSensitivity, 'Train Specitivity:', trainSpecitivity, 'Train FP Count:', trainFP, 'Train AUROC:', trainAUROC)
print('Test Accuracy:', testAccuracy, 'Test Sensitivity:', testSensitivity, 'Test Specitivity:', testSpecitivity, 'Test FP Count:', testFP, 'Test AUROC:', testAUROC,)
print('\n')
lossHist.append(loss)
trainAccHist.append(trainCorrect/trainTotal)
testAccHist.append(testCorrect/testTotal)
df = df.append({'Patient': patient, 'Fold': fold, 'Epoch': epoch, 'Loss': loss.cpu().item(), 'Train Accuracy': trainAccuracy, 'Test Accuracy': testAccuracy}, ignore_index=True)
df.to_csv('CHBMIT.csv', index=False) | 8,597 | 41.776119 | 188 | py |
Memristive-Seizure-Detection-and-Prediction-by-Parallel-Convolutional-Neural-Networks | Memristive-Seizure-Detection-and-Prediction-by-Parallel-Convolutional-Neural-Networks-master/network_training/Transfer_CHBMIT_SWEC_ETHZ.py | import os
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, Dataset
from torchvision import datasets, transforms
from torchsummary import summary
import torch.nn.functional as F
from sklearn.model_selection import KFold
from sklearn import preprocessing
import matplotlib.pyplot as plt
import math
from sklearn.decomposition import PCA
import sklearn
import brevitas.nn as qnn
import pandas as pd
from itertools import permutations
from Network import ParallelConvolution
from utils import foldretrieve
# -----------------------------------------------------------
rootPath = '/scratch/jcu/cl/TBioCAS/processed_data/'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
seed = 8
patients = {'CHBMIT': ['01', '02', '03', '05', '08'], 'SWEC_ETHZ': ['01', '02', '03', '05', '06']}
datasets = ['CHBMIT', 'SWEC_ETHZ']
dataType = 'features'
numFold = 5
bS = 32
num_epoch_retrain = 10
network_size = 32
# -----------------------------------------------------------
print('Using {} device'.format(device),'\n')
### Data Loading (preictal 0, interictal 1)
torch.manual_seed(seed)
np.random.seed(seed)
torch.backends.cudnn.benchmark = False
torch.use_deterministic_algorithms(True)
torch.backends.cudnn.deterministic = True
os.environ['PYTHONHASHSEED'] = str(seed)
dataset_permutations = permutations(datasets)
for dataset_permutation in dataset_permutations:
print('--------------------------------')
print(dataset_permutation)
pretrained_dataset = dataset_permutation[0]
evaluation_dataset = dataset_permutation[1]
df = pd.DataFrame(columns=['Pretrained Patient', 'Evaluation Patient', 'Fold', 'Epoch', 'Loss', 'Train Accuracy', 'Test Accuracy'])
for patient_idx, patient in enumerate(patients[evaluation_dataset]):
allData = np.empty((0, 176))
rawLabel = np.empty((0,), dtype=int)
newData = np.load(rootPath+dataType+'/'+evaluation_dataset+'_'+'patient'+'_'+patient+'_'+'synthetic_preictal.npy')
allData = np.append(allData,newData,axis=0)
rawLabel = np.append(rawLabel,np.zeros((newData.shape[0],),dtype=int),axis=0)
newData = np.load(rootPath+dataType+'/'+evaluation_dataset+'_'+'patient'+'_'+patient+'_'+'preictal.npy')
allData = np.append(allData,newData,axis=0)
rawLabel = np.append(rawLabel,np.zeros((newData.shape[0],),dtype=int),axis=0)
newData = np.load(rootPath+dataType+'/'+evaluation_dataset+'_'+'patient'+'_'+patient+'_'+'interictal.npy')
allData = np.append(allData,newData,axis=0)
rawLabel = np.append(rawLabel,np.ones((newData.shape[0],),dtype=int),axis=0)
allLabel = np.zeros((rawLabel.size, rawLabel.max()+1))
allLabel[np.arange(rawLabel.size),rawLabel] = 1
pca = PCA(n_components=64)
allData = pca.fit_transform(allData)
inputbits=6
inputstep=(np.amax(allData) - np.amin(allData))/(2**inputbits-1)
allData = np.round(allData/inputstep)
allData *= inputstep
randInd = np.arange(0,len(allData))
np.random.shuffle(randInd)
allData = allData[randInd]
allLabel = allLabel[randInd]
allData = allData[:math.floor(allData.shape[0]/5)*5]
allLabel = allLabel[:math.floor(allLabel.shape[0]/5)*5]
foldsData = np.split(allData,numFold)
foldsLabel = np.split(allLabel,numFold)
loss_function = nn.BCEWithLogitsLoss()
print('--------------------------------')
print(f'Patient {patient}')
for fold in range(0,numFold):
trainData, testData, trainLabel, testLabel = foldretrieve(fold, foldsData, foldsLabel)
print(f'Fold Number {fold}')
# Load the pretrained model
network = ParallelConvolution(size=network_size).to(device)
network.load_state_dict(torch.load('{}_P{}F{}.pt'.format(pretrained_dataset,patients[pretrained_dataset][patient_idx],fold)))
# Freeze the weights of all layers
for name, param in network.named_parameters():
param.requires_grad = False
# Unfreeze the weights of the last two (linear) layers
network.linear.weight.requires_grad = True
network.linear.bias.requires_grad = True
network.classification.weight.requires_grad = True
network.classification.bias.requires_grad = True
optimizer = torch.optim.Adam(network.parameters())
lossHist = []
testAccHist = []
trainAccHist = []
bestAcc = 0
bestSen = 0
bestSpe = 0
bestFPC = 1e10
bestAUR = 0
bestEpoch = [0] * 5
for epoch in range(num_epoch_retrain):
if epoch == 0:
with torch.no_grad():
testCorrect = 0
testTotal = 0
testOutputs = np.empty((0,1))
testLabels = np.empty((0,1))
testCM = np.zeros((2, 2))
for i in range(0,len(testData),bS):
data = testData[i:i+bS].to(device,dtype=torch.float)
label = testLabel[i:i+bS].to(device,dtype=torch.float)
network.eval()
# Perform forward pass
outputs = network(data)
# Caclulate statistics
testCorrect += np.sum(np.argmax(outputs.cpu().detach().numpy(),axis=1)==np.argmax(label.cpu().detach().numpy(),axis=1))
testTotal += len(outputs)
outputs = outputs.cpu().detach().numpy()
label = label.cpu().detach().numpy()
outputs = np.argmax(outputs,axis=1)
label = np.argmax(label,axis=1)
testCM = np.add(testCM, sklearn.metrics.confusion_matrix(outputs, label, labels=[0, 1]))
testCorrect += np.sum(outputs==label)
testTotal += len(outputs)
testOutputs = np.append(testOutputs,outputs.reshape((-1,1)),axis=0)
testLabels = np.append(testLabels,label.reshape((-1,1)),axis=0)
testF1 = sklearn.metrics.f1_score(testLabels,testOutputs)
testAccuracy = testCorrect/testTotal
testTN, testFP, testFN, testTP = testCM.ravel()
testSensitivity = testTP / (testTP+testFN)
testSpecitivity = testTN / (testTN+testFP)
try:
testAUROC = sklearn.metrics.roc_auc_score(testLabels, testOutputs)
except:
testAUROC = 0.
df = df.append({'Pretrained Patient': patients[pretrained_dataset][patient_idx], 'Evaluation Patient': patient, 'Fold': fold, 'Epoch': -1, 'Loss': -1, 'Train Accuracy': -1, 'Test Accuracy': testAccuracy * 100.}, ignore_index=True)
trainCorrect = 0
trainTotal = 0
testCorrect = 0
testTotal = 0
trainOutputs = np.empty((0,1))
trainLabels = np.empty((0,1))
trainCM = np.zeros((2, 2))
testOutputs = np.empty((0,1))
testLabels = np.empty((0,1))
testCM = np.zeros((2, 2))
for i in range(0,len(trainData),bS):
data = trainData[i:i+bS].to(device,dtype=torch.float)
label = trainLabel[i:i+bS].to(device,dtype=torch.float)
# Zero the gradients
optimizer.zero_grad()
network.eval()
# Perform forward pass
outputs = network(data)
network.train()
# Compute loss
loss = loss_function(outputs, label)
# Perform backward pass
loss.backward()
# Perform optimization
optimizer.step()
network.eval()
outputs = outputs.cpu().detach().numpy()
label = label.cpu().detach().numpy()
outputs = np.argmax(outputs,axis=1)
label = np.argmax(label,axis=1)
trainCM = np.add(trainCM, sklearn.metrics.confusion_matrix(outputs, label, labels=[0, 1]))
trainCorrect += np.sum(outputs==label)
trainTotal += len(outputs)
trainOutputs = np.append(trainOutputs,outputs.reshape((-1,1)),axis=0)
trainLabels = np.append(trainLabels,label.reshape((-1,1)),axis=0)
for i in range(0,len(testData),bS):
data = testData[i:i+bS].to(device,dtype=torch.float)
label = testLabel[i:i+bS].to(device,dtype=torch.float)
network.eval()
# Perform forward pass
outputs = network(data)
# Caclulate statistics
testCorrect += np.sum(np.argmax(outputs.cpu().detach().numpy(),axis=1)==np.argmax(label.cpu().detach().numpy(),axis=1))
testTotal += len(outputs)
outputs = outputs.cpu().detach().numpy()
label = label.cpu().detach().numpy()
outputs = np.argmax(outputs,axis=1)
label = np.argmax(label,axis=1)
testCM = np.add(testCM, sklearn.metrics.confusion_matrix(outputs, label, labels=[0, 1]))
testCorrect += np.sum(outputs==label)
testTotal += len(outputs)
testOutputs = np.append(testOutputs,outputs.reshape((-1,1)),axis=0)
testLabels = np.append(testLabels,label.reshape((-1,1)),axis=0)
if testCorrect/testTotal > bestAcc:
torch.save(network.state_dict(),'{}_{}_P{}F{}.pt'.format(pretrained_dataset, evaluation_dataset, patient, fold))
trainF1 = sklearn.metrics.f1_score(trainLabels,trainOutputs)
trainAccuracy = trainCorrect/trainTotal
trainTN, trainFP, trainFN, trainTP = trainCM.ravel()
trainSensitivity = trainTP / (trainTP+trainFN)
trainSpecitivity = trainTN / (trainTN+trainFP)
try:
trainAUROC = sklearn.metrics.roc_auc_score(trainLabels, trainOutputs)
except:
trainAUROC = 0.
testF1 = sklearn.metrics.f1_score(testLabels,testOutputs)
testAccuracy = testCorrect/testTotal
testTN, testFP, testFN, testTP = testCM.ravel()
testSensitivity = testTP / (testTP+testFN)
testSpecitivity = testTN / (testTN+testFP)
try:
testAUROC = sklearn.metrics.roc_auc_score(testLabels, testOutputs)
except:
testAUROC = 0.
if testAccuracy > bestAcc:
bestAcc = testAccuracy
bestEpoch[0] = epoch
if testSensitivity > bestSen:
bestSen = testSensitivity
bestEpoch[1] = epoch
if testSpecitivity > bestSpe:
bestSpe = testSpecitivity
bestEpoch[2] = epoch
if testFP < bestFPC:
bestFPC = testFP
bestEpoch[3] = epoch
if testAUROC > bestAUR:
bestAUR = testAUROC
bestEpoch[4] = epoch
print('Epoch:', epoch, 'Train Loss:', loss.item())
print('Train Accuracy:', trainAccuracy, 'Train Sensitivity:', trainSensitivity, 'Train Specitivity:', trainSpecitivity, 'Train FP Count:', trainFP, 'Train AUROC:', trainAUROC)
print('Test Accuracy:', testAccuracy, 'Test Sensitivity:', testSensitivity, 'Test Specitivity:', testSpecitivity, 'Test FP Count:', testFP, 'Test AUROC:', testAUROC,)
print('\n')
lossHist.append(loss)
trainAccHist.append(trainCorrect/trainTotal)
testAccHist.append(testCorrect/testTotal)
df = df.append({'Pretrained Patient': patients[pretrained_dataset][patient_idx], 'Evaluation Patient': patient, 'Fold': fold, 'Epoch': epoch, 'Loss': loss.cpu().item(), 'Train Accuracy': trainAccuracy * 100., 'Test Accuracy': testAccuracy * 100.}, ignore_index=True)
df.to_csv('Transfer_%s_%s.csv' % (pretrained_dataset, evaluation_dataset), index=False) | 12,878 | 48.918605 | 282 | py |
3DSC | 3DSC-main/superconductors_3D/machine_learning/Apply_ML_Models_v1_3.py | import warnings
# warnings.filterwarnings("ignore")
from sklearn.exceptions import ConvergenceWarning
warnings.simplefilter("ignore", category=ConvergenceWarning)
import os
warnings.simplefilter("ignore", category=FutureWarning)
import sys
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
import tensorflow as tf
# import gpflow
import pandas as pd
# import torch
# import torch.nn as nn
import sklearn
from sklearn.preprocessing import StandardScaler, FunctionTransformer
from sklearn.model_selection import RepeatedKFold, ShuffleSplit, LeaveOneGroupOut, GroupKFold, GroupShuffleSplit
import sklearn.linear_model
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.neural_network import MLPRegressor
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, ExpSineSquared, WhiteKernel, ConstantKernel
from sklearn.neighbors import KNeighborsRegressor
from xgboost import XGBRegressor
from sklearn.decomposition import PCA
import random
import yaml
import datetime
import argparse
import superconductors_3D.machine_learning.Custom_Machine_Learning_v1_3 as ML
# from superconductors_3D.machine_learning.RGM_own import RGM_sklearn
import csv
from collections import namedtuple
import json
from copy import deepcopy
import time
# import mlflow
from superconductors_3D.machine_learning.own_libraries.analysis.Experiments.Run import MLRun, get_hparams
# from superconductors_3D.machine_learning.own_libraries.models.GNN.MEGNet_tf import MEGNet_tf, read_json_file
# from contextlib import redirect_stdout
# from superconductors_3D.machine_learning.own_libraries.utils import Refactoring
# from superconductors_3D.machine_learning.own_libraries.models.GPflow_GP import GPflow_GP
# from superconductors_3D.machine_learning.own_libraries.own_functions import movecol
from superconductors_3D.utils.projectpaths import projectpath
# from superconductors_3D.machine_learning.own_libraries.models.NN import MLP_Lightning
from superconductors_3D.machine_learning.own_libraries.utils.Scalers import restricted_arcsinh, restricted_sinh, restricted_exp, restricted_log
def none_or_str(value):
if value == 'None':
return None
return value
def parse_arguments(args_from_fn, use_models, experiment, output_note, outdirname, calcdir, save_models, CV, n_folds, n_repeats, CV_keep_groups, n_reps, train_frac, domain_colname, is_optimizing_NN, dataset, hparams_file, n_jobs, sample_weights, metrics_sample_weights, use_data_frac, add_params):
"""Parse arguments from command line and if not given from command line insert argument from function."""
parser = argparse.ArgumentParser(description='Apply different ML models to some data and save output.')
parser.add_argument('--dataset', '-d', type=str, help='The input dataset.')
parser.add_argument('--calcdir', '-c', type=str, nargs='?',
help='Change directory to this path.')
parser.add_argument('--outdir', '-u', dest='outdirname', type=str, nargs='?',
help='Directory of the output.')
# parser.add_argument('--trainfracs', '-t', dest='all_trainfracs', type=float, nargs='*', help='List of train fractions.')
parser.add_argument('--experiment', '-e', type=str, help='Identifier for a specific experiment.')
parser.add_argument('--note', dest='output_note', type=str, help='Output text to add to main output file.')
parser.add_argument('--n_reps', dest='n_reps', type=str, help='Number of repetitions of train test split if CV is Random.')
parser.add_argument('--train-frac', dest='train_frac', type=float, help='Train fraction when random CV splitting is used.')
parser.add_argument('--use-models', '-a', dest='use_models', type=str, nargs='*', help='Names of models that shall be trained.')
parser.add_argument('--cv', dest='CV', type=str, help='Cross Validation mode.')
parser.add_argument('--domain-colname', dest='domain_colname', type=none_or_str, help='Name of column in dataset that indicates groups.')
parser.add_argument('--n_folds', dest='n_folds', type=str, help='Number of folds if CV is KFold.')
parser.add_argument('--n_repeats', dest='n_repeats', type=str, help='Number of folds if CV is KFold.')
parser.add_argument('--save-models', dest='save_models', type=bool, help='If models should be saved.')
parser.add_argument('--optimizing', '-o', dest='is_optimizing_NN', help='If optimizing Neural Network.')
parser.add_argument('--hparams-file', dest='hparams_file', help='Path to hyperparameter file.')
parser.add_argument('--n-jobs', dest='n_jobs', help='Number of jobs to run in parallel when possible.')
parser.add_argument('--sample-weights', dest='sample_weights', type=none_or_str, help='String indicating the weights column for each data point for training the models.')
parser.add_argument('--metrics-sample-weights', dest='metrics_sample_weights', type=none_or_str, help='String indicating the weights column for each data point only for the metrics.')
parser.add_argument('--CV-keep-groups', dest='CV_keep_groups', help='Group column name for a KFold with data points of each group in either test or train.')
parser.add_argument('--use-data-frac', dest='use_data_frac', help='For debugging, use only this fraction of data.')
parser.add_argument('--add-params', dest='add_params', type=json.loads, help='Add these parameters to the experiment parameters for easier recognition of results.')
args = argparse.Namespace()
# Add manually defined arguments to args.
args.dataset = dataset
args.experiment = experiment
args.output_note = output_note
args.outdirname = outdirname
args.calcdir = calcdir
args.n_reps = n_reps
args.train_frac = train_frac
args.is_optimizing_NN = is_optimizing_NN
args.dataset = dataset
args.use_models = use_models
args.CV = CV
args.domain_colname = domain_colname
args.n_folds = n_folds
args.n_repeats = n_repeats
args.save_models = save_models
args.hparams_file = hparams_file
args.n_jobs = n_jobs
args.sample_weights = sample_weights
args.metrics_sample_weights = metrics_sample_weights
args.CV_keep_groups = CV_keep_groups
args.use_data_frac = use_data_frac
args.add_params = add_params
# args given as cmd line arguments have the second highest priority.
if __name__ == '__main__':
cmd_args = parser.parse_args()
if len(sys.argv) > 1:
print('Parsed arguments:', cmd_args)
args_dict = vars(args)
cmd_args_dict = {key: val for key, val in vars(cmd_args).items() if not val is None}
args_dict.update(cmd_args_dict)
args = argparse.Namespace(**args_dict)
# args given as function arguments of main() have the highest priority.
if args_from_fn:
print(f'Updating args with arguments from function call: {args_from_fn}')
args_dict = vars(args)
args_dict.update(args_from_fn)
args = argparse.Namespace(**args_dict)
print(f'New arguments: {args}')
return(args)
def print_title(string):
print("""
============================
{}
============================""".format(string))
return()
def is_round(num, prec=6):
"""Checks if a given number is a round value with given precision."""
is_round = round(num) == round(num, prec)
return(is_round)
def make_output_directory(outdirname, label):
"""Get numbered output directory `outdir` in parent directory `outdirname`."""
os.makedirs(outdirname, exist_ok=True)
# Get number that's not already used for a directory.
num = 0
dir_list = os.listdir(outdirname)
while 'results_{}'.format(num) in '\t'.join(dir_list):
num += 1
outdir = os.path.join(outdirname, f'results_{num}_{label}')
if not os.path.exists(outdir):
os.mkdir(outdir)
else:
raise Warning('You should not end up here.')
return(outdir)
def get_train_test_data(df_data, CV, n_folds, domain_colname, trainfrac=None, random_n_reps=1, n_repeats=1, group=None):
"""Gets train and test data doing the specified cross validation.
"""
data_array = df_data.to_numpy()
if CV == 'KFold':
# Convert train fraction and number of total repetititions to KFold input parameters.
if group is None:
split = RepeatedKFold(n_splits=n_folds, n_repeats=n_repeats).split(data_array)
else:
assert n_repeats == 1, 'This is not yet implemented.'
groups = df_data[group].to_numpy()
split = GroupKFold(n_splits=n_folds).split(data_array, groups=groups)
elif CV == 'Random':
assert trainfrac not in (0,1), "ShuffleSplit won't understand that this is a fraction."
assert trainfrac != None
if group is None:
split = ShuffleSplit(train_size=trainfrac, n_splits=random_n_reps).split(data_array)
else:
groups = df_data[group].to_numpy()
split = GroupShuffleSplit(train_size=trainfrac, n_splits=random_n_reps).split(data_array, groups=groups)
elif CV == 'LOGO':
# Work around: LeaveOneGroupOut.split() doesn't seem to work without specified y, probably a bug.
y = np.zeros(len(df_data))
domains = df_data[domain_colname]
split = LeaveOneGroupOut().split(data_array, y, domains)
# Concatenate train and test indices of each repetition.
# train_indices, test_indices = [], []
# for train_index, test_index in split:
# train_indices.append(list(train_index))
# test_indices.append(list(test_index))
for i, (train_indices, test_indices) in enumerate(split):
n_samples = len(df_data)
assert n_samples == len(train_indices) + len(test_indices)
empty = ''
test_or_train = pd.Series(np.full(n_samples, empty))
test_or_train[train_indices] = 'train'
test_or_train[test_indices] = 'test'
# So that I can be sure that in case the indices of df and series don't align it still just adds everything in the right order.
test_or_train = list(test_or_train)
colname = ML.All_Data.All_Data.name_CV_col(i)
df_data[colname] = test_or_train
assert all([df_data[colname].iloc[idx] == 'train' for idx in train_indices])
assert all([df_data[colname].iloc[idx] == 'test' for idx in test_indices])
assert not (df_data[colname] == empty).any(), 'Some of the columns are neither test nor train.'
return(df_data)
# class Sin(nn.Module):
# """Sin activation function."""
# def __init__(self):
# super().__init__()
#
# def forward(self, input):
# result = torch.Sin(input)
# return(result)
# class Multiple_Acts(nn.Module):
# """Layer with multiple different activation functions.
# """
# def __init__(self, acts):
# """`acts` must have as many activation functions (torch functions) as the length of the input will be.
# """
# super().__init__()
# self.acts = acts
# def forward(self, input):
# n_input = len(input)
# assert n_input == len(self.acts)
# result = torch.empty(n_input)
# for i in range(n_input):
# result[i] = self.acts[i](input[i])
# result = torch.Sin(input)
# return(result)
#
# def get_activation_fn(activation: str):
# """Returns torch activation function based on string activation.
# """
# if activation == 'relu':
# activation_fn = nn.ReLU()
# elif activation == 'logistic':
# activation_fn = nn.Sigmoid()
# elif activation == 'tanh':
# activation_fn = nn.Tanh()
# elif activation == 'sin':
# activation_fn = Sin()
# else:
# raise ValueError(f'Activation function {activation} not recognized. Activation functions are lowercase always.')
# return(activation_fn)
#
#
# def get_sequential_NN(input_layer_size: int, hidden_layer_sizes: list, activation: str, output_layers: str):
# """Returns a sequential (Feed Forward) NN. `last_linear` means if the last layer should be linear or with activation function."""
# activation_fn = get_activation_fn(activation)
# out_act_fn = output_layers
# layers = []
# num_layers = len(hidden_layer_sizes)
# for i in range(num_layers):
# if i == 0:
# in_size = input_layer_size
# else:
# in_size = hidden_layer_sizes[i-1]
# out_size = hidden_layer_sizes[i]
# layers.append(nn.Linear(in_size, out_size))
# last_layer = i == num_layers - 1
# if not last_layer:
# layers.append(activation_fn)
# elif out_act_fn != None:
# out_activation_fn = get_activation_fn(out_act_fn)
# layers.append(out_activation_fn)
#
# layers = tuple(layers)
# network = nn.Sequential(*layers)
# return(network)
#
#
# def get_featurizer(input_layer_size, hparams, mode):
# """Returns the first part of the RGM, the featurizer or representation NN.
# """
# hidden_layer_sizes = ML.net_pattern(
# hparams['nn_layers'],
# hparams['nn_base_dim'],
# hparams['nn_end_dim']
# )
# activation = hparams['nn_act']
# # last_linear = False # if last layer linear or with activation fn
# output_layers = hparams['nn_act']
# if mode == 'FeedForward':
# featurizer = get_sequential_NN(input_layer_size, hidden_layer_sizes, activation, output_layers)
# else:
# raise ValueError('mode of Featurizer not recognized.')
# return(featurizer)
#
#
# def get_classifier(output_layer_size, hparams, output_layers=None):
# """Returns a torch sequential NN with specific layers and output_layer_size.
# """
# if hparams['RGM_classifier_layers'] < 1:
# raise ValueError('Invalid "RGM_classifier_layers": {hparams["RGM_classifier_layers"]}')
#
# num_hidden_layers = hparams['RGM_classifier_layers'] - 1
# activation = hparams['nn_act']
# classifier_layers = [hparams['nn_end_dim'] for _ in range(num_hidden_layers)]
# classifier_layers.append(output_layer_size)
#
# classifier = get_sequential_NN(hparams['nn_end_dim'], classifier_layers, activation, output_layers)
# return(classifier)
def get_validation_columns(df_data, args, domain_colname):
"""Adds validation columns to a df based on the current CV columns, so that only the train rows are split again in test and train. The old CV columns will be renamed to 'test_`CV_col`' and the new validation columns will be named from 0 to nfolds*nfolds-1.
"""
all_CV_cols = [col for col in df_data.columns if col.startswith('CV_')]
# Keep old CV columns around for sanity checks.
df_data = df_data.rename(columns={cv: 'test_'+cv for cv in all_CV_cols})
counter = 0
for cv in all_CV_cols:
old_cv = 'test_' + cv
df = df_data.loc[df_data[old_cv] == 'train']
# Use same type of CV for validation columns as for test columns.
df = get_train_test_data(
df_data=df,
CV=args.CV,
n_folds=args.n_folds,
n_repeats=args.n_repeats,
domain_colname=domain_colname,
trainfrac=args.train_frac,
random_n_reps=args.n_reps,
group=args.CV_keep_groups
)
cv_df = df[all_CV_cols]
# Name validation columns from 0 to nfold*nfold-1.
n_cvs = len(all_CV_cols)
rename_cols = {f'CV_{i}': f'CV_{n_cvs*counter+i}' for i in range(n_cvs)}
cv_df = cv_df.rename(columns=rename_cols)
counter +=1
df_data = df_data.join(cv_df)
# Sanity check.
new_cv_cols = list(rename_cols.values())
assert df_data.loc[df_data[old_cv] == 'test', new_cv_cols].isna().all().all(), 'Some of the old test columns do not have NaN columns in the validation columns!'
return df_data
def get_all_models(hparams, n_features, n_targets, use_models, n_domains=1, domain_col=None, output_layers=None, outdir=None, scaler=None, args=None):
"""Definitions of models that I regularly use.
"""
all_models = {}
####################
# 1 NEAREST NEIGHBOR
####################
if '1NN' in use_models:
Nearest_Neighbors = KNeighborsRegressor(n_neighbors=1)
all_models['1NN'] = Nearest_Neighbors
###################
# LINEAR REGRESSION
###################
if 'LR' in use_models:
Linear_Regression = sklearn.linear_model.LinearRegression()
all_models['LR'] = Linear_Regression
################
# Neural Network
################
# Set some hyperparameter variables for the NN.
net_dims = ML.net_pattern(
hparams['nn_layers'],
hparams['nn_base_dim'],
hparams['nn_end_dim']
)
net_dims2 = ML.net_pattern(
hparams['RGM_classifier_layers'],
hparams['nn_end_dim'],
hparams['nn_end_dim']
)
net_dims = net_dims + net_dims2
if 'NNsk' in use_models:
NNsk = MLPRegressor(
hidden_layer_sizes=net_dims,
activation=hparams['nn_act'],
solver='adam',
max_iter=hparams["n_epochs"],
early_stopping=True,
validation_fraction=0.2,
alpha=hparams["nn_l2"],
batch_size=hparams['nn_batch_size'],
learning_rate_init=hparams["learning_rate"],
n_iter_no_change=hparams["nn_patience"]
)
all_models['NNsk'] = NNsk
# ###############
# # Lightning MLP
# ###############
# if 'NNL' in use_models:
# NNL = MLP_Lightning.MLP(
# hidden_layer_sizes=net_dims,
# activation=hparams['nn_act'],
# solver='adam',
# n_epochs=hparams["n_epochs"],
# validation_fraction=0.2,
# alpha=hparams["nn_l2"],
# batch_size=hparams['nn_batch_size'],
# learning_rate=hparams["learning_rate"],
# patience=hparams["nn_patience"]
# )
# all_models['NNL'] = NNL
###############
# Random Forest
###############
if 'RF' in use_models:
n_trees = hparams["RF_n_estimators"]
Random_Forest = RandomForestRegressor(n_estimators=n_trees)
all_models['RF'] = Random_Forest
############################
# Gradient Boosting
############################
if 'GB' in use_models:
n_trees = hparams["GB_n_estimators"]
Gradient_Boosting = GradientBoostingRegressor(n_estimators=n_trees)
all_models['GB'] = Gradient_Boosting
#########
# XGBoost
#########
if 'XGB' in use_models:
XGBoost = XGBRegressor()
all_models['XGB'] = XGBoost
############################
# Gaussian Process
############################
batch_size = 100
epochs = 1000
learning_rate = 0.1
n_inducing_points = 100
lengthscales = np.full(n_features, hparams['GP_lengthscale'])
noise = hparams["GP_alpha"]
if 'GPsk' in use_models:
kernel = ConstantKernel() * RBF(length_scale=lengthscales)
Gaussian_Process = GaussianProcessRegressor(kernel=kernel, alpha=noise**2, normalize_y=True)
all_models['GPsk'] = Gaussian_Process
# kernel = gpflow.kernels.Constant() * gpflow.kernels.RBF(lengthscales=lengthscales)
# if 'GPR' in use_models:
# model = gpflow.models.GPR
# GPR = GPflow_GP(model, kernel, alpha=noise)
# all_models['GPR'] = GPR
#
# kernel = gpflow.kernels.Constant() * gpflow.kernels.RBF(lengthscales=lengthscales)
# if 'SGPR' in use_models:
# model = gpflow.models.SGPR
# SGPR = GPflow_GP(model, kernel, alpha=noise, n_inducing_points=n_inducing_points, standard_scale=True)
# all_models['SGPR'] = SGPR
#
# kernel = gpflow.kernels.Constant() * gpflow.kernels.RBF(lengthscales=lengthscales)
# if 'VGP' in use_models:
# model = gpflow.models.VGP
# VGP = GPflow_GP(model, kernel, alpha=noise, batch_size=batch_size, epochs=epochs, learning_rate=learning_rate)
# all_models['VGP'] = VGP
#
# kernel = gpflow.kernels.Constant() * gpflow.kernels.RBF(lengthscales=lengthscales)
# if 'SVGP' in use_models:
# model = gpflow.models.SVGP
# SVGP = GPflow_GP(model, kernel, alpha=noise, batch_size=batch_size, epochs=epochs, learning_rate=learning_rate, n_inducing_points=n_inducing_points, standard_scale=True, train_noise=True, diff_std_for_sc_and_non_sc=False, natgrad=False, train_noise_scale=False, predict_y=False)
# all_models['SVGP'] = SVGP
#
# kernel = gpflow.kernels.Constant() * gpflow.kernels.RBF(lengthscales=lengthscales)
# if 'SVGP_single' in use_models:
# model = gpflow.models.SVGP
# SVGP = GPflow_GP(model, kernel, alpha=noise, batch_size=batch_size, epochs=epochs, learning_rate=learning_rate, n_inducing_points=n_inducing_points, standard_scale=True, train_noise=True, diff_std_for_sc_and_non_sc=False, natgrad=False, train_noise_scale=False, predict_y=False)
# all_models['SVGP_single'] = SVGP
#
# kernel = gpflow.kernels.Constant() * gpflow.kernels.RBF(lengthscales=lengthscales)
# if 'SVGP_sc_non-sc' in use_models:
# model = gpflow.models.SVGP
# SVGP = GPflow_GP(model, kernel, alpha=noise, batch_size=batch_size, epochs=epochs, learning_rate=learning_rate, n_inducing_points=n_inducing_points, standard_scale=True, train_noise=True, diff_std_for_sc_and_non_sc=True, natgrad=False, train_noise_scale=False, predict_y=False)
# all_models['SVGP_sc_non-sc'] = SVGP
#
# kernel = gpflow.kernels.Constant() * gpflow.kernels.RBF(lengthscales=lengthscales)
# if 'SVGP_features' in use_models:
# model = gpflow.models.SVGP
# SVGP = GPflow_GP(model, kernel, alpha=noise, batch_size=batch_size, epochs=epochs, learning_rate=learning_rate, n_inducing_points=n_inducing_points, standard_scale=True, train_noise=False, diff_std_for_sc_and_non_sc=False, natgrad=False, train_noise_scale=True, predict_y=True)
# all_models['SVGP_features'] = SVGP
#
# kernel = gpflow.kernels.Constant() * gpflow.kernels.RBF(lengthscales=lengthscales) + gpflow.kernels.WhiteKernel(variance=noise**2)
# if 'SVGP_white' in use_models:
# model = gpflow.models.SVGP
# SVGP = GPflow_GP(model, kernel, alpha=0.0011, batch_size=batch_size, epochs=epochs, learning_rate=learning_rate, n_inducing_points=n_inducing_points, standard_scale=True, train_noise=True, diff_std_for_sc_and_non_sc=False, natgrad=False, train_noise_scale=False, predict_y=False)
# all_models['SVGP_white'] = SVGP
#
# kernel = gpflow.kernels.Constant() * gpflow.kernels.RBF(lengthscales=lengthscales)
# if 'SVGP_RGM' in use_models:
# model = gpflow.models.SVGP
# NN_path = 'RGM'
# SVGP_RGM = GPflow_GP(model, kernel, alpha=noise, batch_size=batch_size, epochs=epochs, learning_rate=learning_rate, n_inducing_points=n_inducing_points, standard_scale=True, NN_path=NN_path)
# all_models['SVGP_RGM'] = SVGP_RGM
#
# kernel = gpflow.kernels.Constant() * gpflow.kernels.RBF(lengthscales=lengthscales)
# if 'SVGP_NN' in use_models:
# model = gpflow.models.SVGP
# NN_path = 'NN'
# SVGP_NN = GPflow_GP(model, kernel, alpha=noise, batch_size=batch_size, epochs=epochs, learning_rate=learning_rate, n_inducing_points=n_inducing_points, standard_scale=True, NN_path=NN_path)
# all_models['SVGP_NN'] = SVGP_NN
#
#
# # For all pytorch models:
# input_layer_size = n_features
# featurizer = get_featurizer(input_layer_size, hparams, mode='FeedForward')
# classifier = get_classifier(output_layer_size=n_targets, hparams=hparams, output_layers=output_layers)
#
#
# ############################
# # Regret Minimization Network
# ############################
# if 'RGM' in use_models:
# RGM = RGM_sklearn(
# solver=hparams['nn_solver'],
# max_iter=hparams["n_epochs"],
# batch_size=hparams['nn_batch_size'],
# learning_rate_init=hparams["learning_rate"],
# featurizer=deepcopy(featurizer),
# classifier=deepcopy(classifier),
# batch_mode=hparams['RGM_batch_mode'],
# weight_decay=hparams["nn_l2"],
# rgm_e=hparams['RGM_rgm_e'],
# erm_e=hparams['RGM_erm_e'],
# holdout_e=hparams['RGM_holdout_e'],
# detach_classifier=hparams['RGM_detach_classifier'],
# oracle=hparams['RGM_oracle'],
# ensemble_pred=hparams['RGM_ensemble_pred'],
# validation_fraction=0.2,
# early_stopping=hparams['RGM_early_stopping'], # 'False', 'valid', 'extrapol', ...
# n_iter_no_change=hparams["nn_patience"],
# clip_grad=hparams['NN_clip_grad'],
# num_train_domains=hparams['RGM_num_train_domains'],
# max_n_classifiers=10,
# if_log_metrics=False,
# coeff_lr_classifier=hparams['coeff_lr_classifier'],
# reduce_lr_factor=hparams['NN_reduce_lr_factor'],
# use_tensorboard=False
# )
# RGM.domain_col = domain_col
# all_models['RGM'] = RGM
#
#
# ############################
# # Regret Minimization Network without domains
# ############################
# if 'NN' in use_models:
# NN = RGM_sklearn(
# solver=hparams['nn_solver'],
# max_iter=hparams["n_epochs"],
# batch_size=hparams['nn_batch_size'],
# learning_rate_init=hparams["learning_rate"],
# featurizer=deepcopy(featurizer),
# classifier=deepcopy(classifier),
# batch_mode='Conserve_ratio',
# weight_decay=hparams["nn_l2"],
# rgm_e=1,
# erm_e=1,
# holdout_e=1,
# detach_classifier=False,
# oracle=False,
# ensemble_pred=False,
# validation_fraction=0.2,
# early_stopping='valid',
# n_iter_no_change=hparams["nn_patience"],
# clip_grad=hparams['NN_clip_grad'],
# num_train_domains=1,
# if_log_metrics=False,
# reduce_lr_factor=hparams['NN_reduce_lr_factor'],
# use_tensorboard=False
# )
# all_models['NN'] = NN
#
# #################
# # Original MEGNet
# #################
# if 'MEGNet' in use_models:
# validation_frac = 0.2
# # transfer_model = None if args.add_params['prev_model'] is None else projectpath(args.add_params['prev_model'])
# MEGNet0 = MEGNet_tf(
# use_learnt_elemental_embedding=False,
# epochs=hparams['n_epochs'],
# lr=args.add_params['lr'],#hparams['learning_rate'],
# batch_size=args.add_params['batch_size'],#hparams['nn_batch_size'],
# patience=hparams['nn_patience'],
# l2_coef=args.add_params['l2'],#None,
# dropout=args.add_params['dropout'],
# r_cutoff=4,
# early_stopping=args.add_params['early_stopping'],
# validation_frac=validation_frac,
# loss='mse',
# domain_col=domain_col,
# optimizer_kwargs={'clipnorm': args.add_params['clipnorm']},
# tensorboard=False,
# nblocks=args.add_params['nblocks'],
# n1=args.add_params['n1'],
# n2=args.add_params['n2'],
# n3=args.add_params['n3'],
# lr_exp_decay=args.add_params['lr_exp_decay'],#0.997,
# prev_model=None,
# act=args.add_params['act'],
# npass=args.add_params['npass'],
# n_feat_bond=args.add_params['n_feat_bond']
# )
# all_models['MEGNet'] = MEGNet0
return(all_models)
def train_with_args(args):
os.chdir(args.calcdir)
print("JOB STARTED")
print_title("Experiment: {}".format(args.experiment))
print("Current working directory: %s"%(args.calcdir))
# Get hyperparameters )
args.hparams = get_hparams(args.hparams_file)
args.random_seed = args.hparams['random_seed']
if args.random_seed != None:
print(f"RANDOM SEED FIXED!!!: {args.random_seed}")
else:
args.random_seed = random.randint(0, 1e3)
np.random.seed(args.random_seed)
random.seed(args.random_seed)
# torch.manual_seed(args.random_seed)
tf.random.set_seed(args.random_seed)
domain_colname = args.domain_colname
print(f'Domain: {domain_colname}')
# Add CV mode to name of output directory
outdir = make_output_directory(args.outdirname, label=args.experiment)
print(f'Output directory: {outdir}')
# TODO
# assert not 'n_repetitions' in args.add_params
# wished_CV_cols = []
# # wished_CV_cols = [f'CV_{i}' for i in range(args.add_params['n_repetitions'])]
# if args.add_params['features'] == 'graph':
# usecols = lambda x: (not (x.startswith('SOAP') or x.startswith('PCA') or x.startswith('MAGPIE'))) and ((not x.startswith('CV_')) or (x in wished_CV_cols))
# else:
# usecols = lambda x: (not x.startswith('CV_')) or (x in wished_CV_cols)
# print(f'n_repetitions = {len(wished_CV_cols)}')
# Get dataset.
df_data = pd.read_csv(args.dataset, header=1)
if args.use_data_frac != None:
warnings.warn(f'Using only a fraction of {args.use_data_frac} of data for debugging purposes.')
df_data = df_data.sample(frac=args.use_data_frac).reset_index()
# Add absolute path to graphs.
if args.add_params['features'] == 'graph':
df_data['graph'] = df_data['graph'].apply(projectpath)
magpie_features = [magpie for magpie in df_data.columns if magpie.startswith('MAGPIE')]
soap_features = [soap for soap in df_data.columns if soap.startswith('SOAP')]
pca_soap_features = [f'PCA_SOAP_{i}' for i in range(100)]
electronic_features = ['band_gap_2', 'energy_2', 'energy_per_atom_2', 'formation_energy_per_atom_2', 'total_magnetization_2', 'num_unique_magnetic_sites_2', 'true_total_magnetization_2']
lattice_features = ['lata_2', 'latb_2', 'latc_2']
sym_features = ['cubic', 'hexagonal', 'monoclinic', 'orthorhombic', 'tetragonal', 'triclinic', 'trigonal', 'primitive', 'base-centered', 'body-centered', 'face-centered']
features = []
if 'MAGPIE' in args.add_params['features']:
features += magpie_features
if 'PCA' in args.add_params['features']:
features += pca_soap_features
elif 'SOAP' in args.add_params['features']:
features += soap_features
if 'electro' in args.add_params['features']:
features += electronic_features
if not (args.add_params['features'] == 'MAGPIE' or args.add_params['features'] == 'MAGPIE (all)' or args.add_params['features'] == 'graph'):
features += ['crystal_temp_2']
features += sym_features + lattice_features
if args.add_params['features'] == 'graph':
features += ['graph']
targets = ['tc']
# Define scaling/ transformation of features and targets.
log_transform = FunctionTransformer(
func=restricted_arcsinh,
inverse_func=restricted_sinh,
check_inverse=False
)
Column_Transformer = {}
for feat in features:
Column_Transformer[feat] = StandardScaler() if features != ['graph'] else 'passthrough'
for target in targets:
Column_Transformer[target] = log_transform#StandardScaler()
assert len(Column_Transformer) == len(features + targets)
output_layers = None # None or any from get_activation_fn()
# # Train by physical group.
# group = args.add_params['group']
# print(f'Training only group {group}')
# df_data = df_data.loc[df_data['sc_class'] == group]
# # TODO: remove. For CV experiment: Exclude data points with unclear physical group
# df_data = df_data[df_data['sc_class_unique_sc']]
# Get indices for order in which groups shall be displayed.
if domain_colname != None:
groupcol = df_data[domain_colname]
sorted_grouplabels = ML.unique_sorted(groupcol)
index_group_map = {index: grouplabel for index, grouplabel in enumerate(sorted_grouplabels)}
print('Group indices:')
print(index_group_map)
# Get train and test columns with specified CV algorithm.
if args.CV != None:
df_data = get_train_test_data(
df_data=df_data,
CV=args.CV,
n_folds=args.n_folds,
n_repeats=args.n_repeats,
domain_colname=domain_colname,
trainfrac=args.train_frac,
random_n_reps=args.n_reps,
group=args.CV_keep_groups
)
# # Make some tests which parts of the matching & adaptation algorithm have which effects.
# # if args.add_params['n_exclude_if_too_many_structures']:
# # n_max = 1
# # n_structures = df_data.groupby('formula_sc')['formula_sc'].transform(len)
# # too_many_structures = n_structures > n_max
# # n_exclude_sc = len(df_data[too_many_structures].drop_duplicates("formula_sc"))
# # df_data = df_data[~too_many_structures]
# # print(f'Excluding {n_exclude_sc} superconductors because they have more than {n_max} structures.')
# if args.add_params['drop_duplicate_superconductors']:
# n_before = len(df_data)
# n_sc_before = df_data['formula_sc'].nunique()
# df_data = df_data.drop_duplicates('formula_sc')
# assert n_sc_before == df_data['formula_sc'].nunique()
# print(f'Lost {n_before - len(df_data)} duplicate structures but kept all superconductors.')
# # if args.add_params['only_totreldiff=0']:
# # n_sc_before = df_data['formula_sc'].nunique()
# # df_data = df_data[df_data['totreldiff'] == 0]
# # n_sc_lost = n_sc_before - df_data['formula_sc'].nunique()
# # print(f'Lost {n_sc_lost} superconductors by filtering by totreldiff=0.')
# # if args.add_params['only_abs_matches']:
# # n_sc_before = df_data['formula_sc'].nunique()
# # df_data = df_data[df_data['correct_formula_frac']]
# # n_sc_lost = n_sc_before - df_data['formula_sc'].nunique()
# # print(f'Lost {n_sc_lost} superconductors by keeping only absolute matches.')
# # if args.add_params['without_lattice_feats']:
# # remove_features = sym_features + lattice_features
# # features = [feat for feat in features if not feat in remove_features]
# df_data['weight'] = 1 / df_data.groupby('formula_sc')['formula_sc'].transform(len)
# Make a train data experiment.
# if not args.add_params['train_frac'] is None:
# fraction = args.add_params['train_frac']
# CV_cols = [col for col in df_data.columns if col.startswith('CV_')]
# for cv in CV_cols:
# is_test = df_data[cv] == 'test'
# is_train = df_data[cv] == 'train'
# all_train_groups = df_data.loc[is_train, args.CV_keep_groups].unique()
# n_train_groups = int(len(all_train_groups) * fraction)
# train_groups = np.random.choice(all_train_groups, size=n_train_groups, replace=False)
# df_data.loc[is_train, cv] = np.where(df_data.loc[is_train, args.CV_keep_groups].isin(train_groups), 'train', np.nan)
# assert all(df_data[is_test] == 'test')
# assert all(df_data[is_train].isna() | (df_data[is_train] == 'train'))
# mean_n_sc = (df_data.drop_duplicates('formula_sc')[CV_cols] == 'train').sum().mean()
# args.add_params['mean_n_train_sc'] = float(mean_n_sc)
# Rename test columns and make validation columns instead.
# if args.add_params['HPO'] and CV != None:
# df_data = get_validation_columns(df_data, args, domain_colname)
# Sanity check.
if args.CV_keep_groups == 'chemical_composition_sc':
CV_cols = [col for col in df_data.columns if col.startswith('CV_')]
for CV_col in CV_cols:
is_test = df_data[CV_col] == 'test'
is_train = df_data[CV_col] == 'train'
test_comps = df_data.loc[is_test, 'chemical_composition_sc'].unique()
train_comps = df_data.loc[is_train, 'chemical_composition_sc'].unique()
assert len(np.intersect1d(test_comps, train_comps)) == 0
# Select data by criteria.
# if CV != None:
# from dataset_preparation import _6_select_best_matches_and_prepare_df
# df_data = _6_select_best_matches_and_prepare_df.keep_only_best_matches(df=df_data, criteria=args.add_params['criteria'], n_exclude_if_more_structures=args.add_params['n_exclude_if_more_structures'], output_graph_dir=None)
args.add_params['n_data_points'] = len(df_data)
print('Number of data points:', len(df_data))
# Save values of some variables in the final output file for later convenience.
save_value_of_variables = {key: val for key, val in vars(args).items()}
save_value_of_variables['Domain_colname'] = domain_colname
n_domains = len(df_data[domain_colname].unique()) if domain_colname != None else 1
all_models = get_all_models(args.hparams, len(features), len(targets), args.use_models, n_domains, domain_colname, output_layers, outdir=outdir, args=args)
use_models = {modelname: all_models[modelname] for modelname in args.use_models}
ml = ML.Machine_Learning(
data=df_data,
features=features,
targets=targets,
domain=domain_colname,
sample_weights=args.sample_weights,
metrics_sample_weights=args.metrics_sample_weights,
Column_Transformer=Column_Transformer,
save_models=args.save_models,
save_torch_models=True,
is_optimizing_NN=args.is_optimizing_NN,
save_value_of_variables=save_value_of_variables,
print_features=False,
print_targets=True,
print_domain_score=True,
random_seed=args.random_seed,
save_all_values_and_predictions=True,
n_jobs=args.n_jobs,
copy_files=[args.hparams_file]
)
print('Start training.')
ml.train(use_models, outdir)
# # Make lots of plots.
# plt.ioff()
# plot_dir = os.path.join(outdir, 'plots')
# models_without_loss_curve = ['LR', '1NN', 'RF', 'XGB', 'GPsk', 'GPR', 'SGPR']
# run = MLRun(outdir)
# plot_models = [modelname for modelname in args.use_models if not modelname in models_without_loss_curve]
# run.final_plots(plot_dir, plot_models, df_data, domain_colname, features, targets, use_models, outdir)
# Check if refactoring was successful and everything has stayed the same as in comparison_dir.
# comparison_dir = '/home/timo/superconductors_3D/analysis/results/testing/results_202_'
# refactoring = Refactoring.Refactoring(comparison_dir)
# refactoring.check(outdir)
return(ml, outdir)
def main(args_from_fn):
# =============================================================================
# Define options.
# =============================================================================
# use_models = ['1NN', 'LR', 'XGB', 'SVGP', 'NNsk', 'NN', 'RGM']
use_models = ['XGB']
experiment = ''
add_params = {
# 'features': 'graph',
# 'database': 'MP',
# "act": "relu",
# "dropout": 0.8762999083316979,
# "lr": 0.000008567057310854599,
# "lr_exp_decay": 0.983512961357719,
# "n1": 46,
# "n2": 52,
# "n3": 73,
# "n_feat_bond": 18,
# "nblocks": 2,
# "npass": 8,
# 'batch_size': 56,
# 'clipnorm': 1.063448501785796,
# 'l2': 2.1555727094418956e-7,
# 'n_exclude_if_too_many_structures': False,
# 'drop_duplicate_superconductors': False,
# 'only_totreldiff=0': False,
# 'only_abs_matches': False,
# 'same_n_sc': True,
# 'without_lattice_feats': True,
# 'criteria': ['no_crystal_temp_given_2']
# 'group': 'Oxide',
# 'train_frac': 0.3,
# 'early_stopping': True,
'features': 'MAGPIE+DSOAP',
# 'CV': 'LOGO',
'CV_keep_groups': 'chemical_composition_sc',
# 'domain_colname': None,#'num_elements_sc',
# 'CV_name': 'LOCO_phys',
}
output_note = ''
outdirname = '/home/timo/superconductors_3D/analysis/results/testing'#'/media/timo/ext42/academic_projects/superconductors_3D/analysis/results/testing'
calcdir = os.getcwd()
# Cross validation
CV = 'Random' # `KFold`, `LOGO`, `Random` or None
n_folds = 5 # for KFold
n_repeats = 1 # for KFold
CV_keep_groups = 'chemical_composition_sc' # for KFold, Random
n_reps = 3 # for Random
train_frac = 0.8 # for Random
domain_colname = None # for LOGO
# Weights
sample_weights = 'weight'
metrics_sample_weights = 'weight'
# Dataset
dataset = '/home/timo/superconductors_3D/data_before/final/MP/SC_MP_matches.csv'#projectpath('data', 'final', 'ICSD', 'SC_ICSD_matches.csv')#projectpath('data', 'intermediate', 'MP', '5_features_SC_MP.csv')
# Hyperparameters
hparams_file = 'hparams.yml'
n_jobs = 1
is_optimizing_NN = False # Only run NN and don't plot anything when optimizing NN
save_models = True
# Debugging
use_data_frac = None # None for using all data.
# =============================================================================
# End define options.
# =============================================================================
print('Input:\n', sys.argv)
args = parse_arguments(args_from_fn, use_models, experiment, output_note, outdirname, calcdir, save_models, CV, n_folds, n_repeats, CV_keep_groups, n_reps, train_frac, domain_colname, is_optimizing_NN, dataset, hparams_file, n_jobs, sample_weights, metrics_sample_weights, use_data_frac, add_params)
print('args.add_params:\n', args.add_params)
# Run ML and measure time of run.
starttime = datetime.datetime.now()
ml, _ = train_with_args(args)
duration = datetime.datetime.now() - starttime
print(f'Duration: {duration}')
return ml
if __name__ == "__main__":
ml = main(args_from_fn={}) | 45,766 | 46.328852 | 303 | py |
3DSC | 3DSC-main/superconductors_3D/machine_learning/RGM_own.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 20 17:24:04 2021
@author: timo
This script contains my wrapper of the RGM of 2020 Jin to go with the standard sklearn API.
"""
import os
import numpy as np
from superconductors_3D.machine_learning.Algorithms.RGM_Jin import RGM as RGM_Jin
import torch
import torch.nn as nn
from torch.utils.tensorboard import SummaryWriter
import random
import warnings
from torch.utils.data import Dataset, DataLoader
from chemprop.nn_utils import initialize_weights
from collections import namedtuple
from sklearn.model_selection import StratifiedShuffleSplit
from copy import deepcopy
import mlflow
import time
# import torchviz
from superconductors_3D.machine_learning.own_libraries.own_functions import enforce_types
def log_metrics(metrics: dict, step: float = None, max_tries: float = 10, if_log_metrics: bool = True) -> None:
"""Logs metrics with mlflow but also checks for errors and tries again up to max_tries times if there is a connection error. Logs only if if_log_metrics = True.
"""
if if_log_metrics:
i = 0
while i < max_tries:
try:
mlflow.log_metrics(metrics, step=step)
i = max_tries
except ConnectionError:
warnings.warn('Connection Error, try again.')
time.sleep(0.1)
i += 1
return()
def get_domain_batch(x_data, y_data, w_data, d_data, domain):
"""Returns the parts of x_data, y_data, w_data that are from domain as defined in d_data.
"""
is_domain = d_data == domain
x = x_data[is_domain]
y = y_data[is_domain]
d = d_data[is_domain]
w = w_data[is_domain]
return(x, y, d, w)
def get_all_domain_batches(x_data, y_data, w_data, d_data, all_domains):
"""Returns a list of batches where each entry in the list is a batch of a certain domain.
"""
batches = []
for i, domain in enumerate(all_domains):
batch = get_domain_batch(x_data, y_data, w_data, d_data, domain)
batches.append(batch)
return(batches)
class DomainDataset(Dataset):
def __init__(self, x, y, d, w, device):
self.x = x
self.y = y
self.d = d
self.w = w
self.device = device
# Assert same lengths
length = len(self.x)
assert all([length == len(self.y), length == len(self.d), length == len(self.w)]), 'At least one of features, targets, domains and weights has different length.'
def __len__(self):
return len(self.x)
def __getitem__(self, index):
x = self.x[index]
y = self.y[index]
d = self.d[index]
w = self.w[index]
return x, y, d, w
def collate_fn(self, batch):
x, y, d, w = zip(*batch)
x = torch.stack(x)
y = torch.stack(y)
d = torch.stack(d)
w = torch.stack(w)
return x, y, d, w
class EarlyStopping:
"""Stops early if validation loss doesn't improve after a given patience."""
def __init__(self, n_iter_no_change, delta=0):
"""
Args:
n_iter_no_change (int): How long to wait after last time validation loss improved.
delta (float): Minimum change in the monitored quantity to qualify as an improvement.
Default: 0
"""
self.n_iter_no_change = n_iter_no_change
self.counter = 0
self.best_score = None
self.early_stop = False
self.valid_loss_min = np.Inf
self.delta = delta
def check_loss(self, valid_loss, model, epoch):
score = -valid_loss
if self.best_score is None:
self.best_score = score
self.save_checkpoint(valid_loss, model, epoch)
elif score < self.best_score + self.delta:
self.counter += 1
if self.counter >= self.n_iter_no_change:
self.early_stop = True
else:
self.best_score = score
self.save_checkpoint(valid_loss, model, epoch)
self.counter = 0
def save_checkpoint(self, valid_loss, model, epoch):
'''Saves model when validation loss decrease.'''
# Deepcopy is important, otherwise only reference is passed.
self.best_model_state_dict = deepcopy(model.state_dict())
self.valid_loss_min = valid_loss
self.best_epoch = epoch
class RGM_sklearn(nn.Module):
"""A wrapper around the RGM implementation of 2020 Jin to be able to be called like a standard sklearn module.
"""
@enforce_types
def __init__(self, solver:str, max_iter: int, batch_size: int, learning_rate_init: (float, int), featurizer, classifier, batch_mode: str, weight_decay: (float, int), rgm_e: (float, int), erm_e: (float, int), holdout_e: (float, int), detach_classifier: bool, oracle: bool, ensemble_pred: bool, validation_fraction: float, early_stopping: str, n_iter_no_change: int, num_train_domains: int, max_n_classifiers: int=999999999, clip_grad: (float, int)=np.inf, if_log_metrics: bool=True, random_seed: (int, type(None))=None, coeff_lr_classifier: (float, int)=1., reduce_lr_factor: float=1., use_tensorboard: bool=True):
super(RGM_sklearn, self).__init__()
self.max_epochs = max_iter
self.batch_size = batch_size
self.learning_rate_init = learning_rate_init
self.coeff_lr_classifier = coeff_lr_classifier
self.batch_mode = batch_mode # Conserve_ratio'
self.rgm_e = rgm_e
self.erm_e = erm_e
self.holdout_e = holdout_e
self.detach_classifier = detach_classifier
self.oracle = oracle
self.seed = random_seed
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(f'device: {self.device}')
self.loss_func = nn.MSELoss(reduction='none')
self.featurizer = deepcopy(featurizer)
self.classifier = deepcopy(classifier)
self.solver = solver
self.weight_decay = weight_decay
self.ensemble_pred = ensemble_pred
self.validation_fraction = validation_fraction
self.early_stopping = early_stopping
self.clip_grad = clip_grad
self.if_log_metrics = if_log_metrics
self.num_train_domains = num_train_domains
self.max_n_classifiers = max_n_classifiers
self.use_tensorboard = use_tensorboard
self.reduce_lr_factor = reduce_lr_factor
if early_stopping != 'False':
assert n_iter_no_change != None
self.n_iter_no_change = n_iter_no_change
self.input_args = deepcopy(locals())
self.input_args['estimator_name'] = type(self).__name__
del self.input_args['self']
del self.input_args['featurizer']
del self.input_args['classifier']
self.backward_graphs = []
def get_optimizer(self, solver, parameters, lr, weight_decay):
"""Returns correct optimizer based on `solver`."""
if solver == 'sgd':
optimizer = torch.optim.SGD(parameters, lr=lr, weight_decay=weight_decay)
elif solver == 'adam':
optimizer = torch.optim.Adam(parameters, lr=lr, weight_decay=weight_decay)
elif solver == 'adamw':
optimizer = torch.optim.AdamW(parameters, lr=lr, weight_decay=weight_decay)
elif solver == 'rmsprop':
optimizer = torch.optim.RMSprop(parameters, lr=lr, weight_decay=weight_decay)
else:
raise Warning('Solver "f{solver} not known. Solvers are lowercase always.')
return(optimizer)
def calc_params_norm(self, parameters, norm_type=2):
"""Calculates parameter norm as in torch.nn.utils.clip_grad. Returns torch.tensor().
"""
parameters = [p for p in parameters if p.grad is not None]
if len(parameters) == 0:
return torch.tensor(0.)
device = parameters[0].grad.device
total_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(), norm_type).to(device) for p in parameters]), norm_type)
return(total_norm)
# def make_backwards_graph(self, loss, batch_idx):
# """"Make graph of backward pass."""
# if self.epoch == 0 and batch_idx == 0:
# backward_graph = \
# torchviz.make_dot(loss, dict(self.trainer.named_parameters()))
# self.backward_graphs.append(backward_graph)
# return()
def initialize_model_weights(self, model):
"""Initializes all weights of the model."""
model.apply(self.weight_initialization)
return()
def weight_initialization(self, m):
"""Initializes the layer m with a uniform xavier distribution if it is linear."""
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight.data)
return()
def make_consistent_torch_tensors(self, x_train, y_train, d_train, w_train):
"""Make data to torch tensors for consistency. If weights are not given give it the neutral value 1 and make it a consistent tensor.
"""
if w_train is None:
shape = y_train.shape
w_train = torch.ones(shape, dtype=torch.float, device=self.device)
else:
w_train = torch.tensor(w_train, dtype=torch.float, device=self.device)
x_train = torch.tensor(x_train, dtype=torch.float, device=self.device)
if y_train.ndim == 1:
y_train = y_train.reshape(-1, 1) # fix sklearn inconsistency.
y_train = torch.tensor(y_train, dtype=torch.float, device=self.device)
try:
if len(d_train) == 0:
num_datapoints = len(x_train)
d_train = torch.zeros(num_datapoints, dtype=torch.float, device=self.device)
else:
d_train = torch.tensor(d_train, dtype=torch.float, device=self.device)
except TypeError:
print('Internally converting string domain labels to integers.')
labels, counts = np.unique(d_train, return_counts=True)
sort_idc = np.argsort(counts)
sorted_labels = labels[sort_idc]
domain_dict = {label: idx for idx, label in enumerate(sorted_labels)}
d_train = np.array([domain_dict[d] for d in d_train])
d_train = torch.tensor(d_train, dtype=torch.float, device=self.device)
assert all([length == len(x_train) for length in (len(y_train), len(d_train), len(w_train))])
return(x_train, y_train, d_train, w_train)
def get_validation_data(self, x_train, y_train, d_train, w_train):
"""Splits data in train and validation data."""
assert isinstance(self.validation_fraction, float), 'validation_fraction must be float.'
train_indices, valid_indices = next(StratifiedShuffleSplit(n_splits=1, test_size=self.validation_fraction).split(x_train.cpu(), d_train.cpu()))
x_train, x_valid = x_train[train_indices], x_train[valid_indices]
y_train, y_valid = y_train[train_indices], y_train[valid_indices]
d_train, d_valid = d_train[train_indices], d_train[valid_indices]
w_train, w_valid = w_train[train_indices], w_train[valid_indices]
x_valid = x_valid.to(self.device)
y_valid = y_valid.to(self.device)
d_valid = d_valid.to(self.device)
w_valid = w_valid.to(self.device)
return(x_train, x_valid, y_train, y_valid, d_train, d_valid, w_train, w_valid)
def clip_grad_norm(self):
"""Clips the gradient norm, returns gradient norm before and after and logs them.
"""
grad_norm_before = torch.nn.utils.clip_grad_norm_(self.trainer.parameters(), self.clip_grad).item()
grad_norm_clipped = self.calc_params_norm(self.trainer.parameters()).item()
# self.writer.add_scalar('grad_norm_before', grad_norm_before)
# self.writer.add_scalar('grad_norm_clipped', grad_norm_clipped)
return(grad_norm_before, grad_norm_clipped)
def weighted_average(self, y, weights):
y = y * weights
norm = weights.sum()
return y.sum() / norm
def loss_forward(self, y_pred, y_true, weights):
# TODO
assert (y_pred >= 0).all()
pred_losses = self.loss_func(y_pred, y_true)
pred_loss = self.weighted_average(pred_losses, weights)
return pred_loss
def get_domain_data(self, x_data, y_data, w_data, d_data, domain):
"""Returns the dataset for a single domain.
x_data, y_data, w_data, d_data are the datasets with all domains together. domain is the domain label.
"""
x, y, d, w = get_domain_batch(x_data, y_data, w_data, d_data, domain)
data = DomainDataset(x, y, d, w, self.device)
return(data)
def determine_batch_ratios(self, mode):
"""This function determines the batch sizes per domain ('batch_ratios') based on `mode`.
"""
if mode == 'Conserve_ratio':
# Each domain will have a batch size according to its ratio in the total dataset.
ratios = self.all_domain_counts / sum(self.all_domain_counts)
batch_ratios = self.batch_size * ratios
batch_ratios = torch.round(batch_ratios.float()).int()
drop_last = True
else:
raise ValueError(f'Unknown batch_mode "{mode}".')
if any(batch_ratios == 0):
warnings.warn('One of the domains has very few examples and should have zero examples per batch acccording to batch_size. Setting minimum number of examples to 1.')
min_batch_ratios = torch.ones_like(batch_ratios)
batch_ratios = torch.maximum(batch_ratios, min_batch_ratios)
elif any(batch_ratios > self.all_domain_counts):
warnings.warn('For at least one domain the batchsize is greater than the number of samples, unpredictable behaviour.')
return(batch_ratios, drop_last)
def get_batches(self, x_data, y_data, d_data, w_data, mode):
"""Generates a list of batches for each domain.
"""
batch_ratios, drop_last = self.determine_batch_ratios(mode)
# Get data loaders for each domain that return minibatches scaled like the ratio in the total dataset.
data_loaders = {}
for domain, batch_ratio in zip(self.all_domains, batch_ratios):
batch_ratio = batch_ratio.item()
domain = domain.item()
data = self.get_domain_data(x_data, y_data, w_data, d_data, domain)
data_loaders[domain] = iter(DataLoader(data, batch_size=batch_ratio, drop_last=drop_last, shuffle=True, collate_fn=data.collate_fn))
# Go through batches of each domain. If a domain has no examples left in the dataset, resample it and continue until all domains are through at least once.
num_batches = [len(loader) for loader in data_loaders.values()]
# print('The number of batches per domain is', num_batches)
num_iters = max(num_batches)
for batch_idx in range(num_iters):
batches = []
for domain, batch_ratio in zip(self.all_domains, batch_ratios):
batch_ratio = batch_ratio.item()
domain = domain.item()
d_loader = data_loaders[domain]
d_batch = next(d_loader, None)
# If one domain has no data points left reload data points.
if d_batch == None:
assert self.num_domains > 1
data = self.get_domain_data(x_data, y_data, w_data, d_data, domain)
d_loader = iter(DataLoader(data, batch_size=batch_ratio, drop_last=drop_last, shuffle=True, collate_fn=data.collate_fn))
d_batch = next(d_loader, None)
data_loaders[domain] = d_loader
assert not d_batch == None
batches.append(d_batch)
yield(batches)
def prepare_trainer(self):
"""Builds up the Neural Network trainer with all it's layers, it's optimizer etc.
"""
trainer = RGM_Jin(featurizer=self.featurizer,
classifier=self.classifier,
num_domains=self.num_domains,
rgm_e=self.rgm_e,
erm_e=self.erm_e,
holdout_e=self.holdout_e,
detach_classifier=self.detach_classifier,
oracle=self.oracle,
loss_forward=self.loss_forward,
num_train_domains=self.num_train_domains,
max_n_classifiers=self.max_n_classifiers
)
# Initialize weights new. Very important because otherwise all classifiers have the same initialization!
initialize_weights(trainer)
# self.initialize_model_weights(trainer)
trainer = trainer.to(self.device)
# Get parameter lists for classifier and representation separately for different learning rates.
classifier_params = []
for classifier in (trainer.classifier, trainer.f_k, trainer.g_k):
classifier_params.extend(list(classifier.parameters()))
assert len(list(trainer.parameters())) == len(classifier_params) + len(list(trainer.copy_f_k.parameters())) + len(list(trainer.featurizer.parameters()))
learning_rate_classifier = self.coeff_lr_classifier * self.learning_rate_init
parameters = [
{'params': trainer.featurizer.parameters()},
{'params': classifier_params, 'lr': learning_rate_classifier}
]
trainer.optimizer = self.get_optimizer(solver=self.solver,
parameters=parameters,
lr=self.learning_rate_init,
weight_decay=self.weight_decay
)
trainer.scheduler = torch.optim.lr_scheduler.ExponentialLR(trainer.optimizer, gamma=self.reduce_lr_factor)
return(trainer)
def validate(self, x_train, x_valid, y_train, y_valid, w_train, w_valid):
"""Calculates train and validation loss."""
y_train_pred = self.evaluate(x_train)
y_valid_pred = self.evaluate(x_valid)
train_loss = self.loss_forward(y_pred=y_train_pred, y_true=y_train, weights=w_train).item()
valid_loss = self.loss_forward(y_pred=y_valid_pred, y_true=y_valid, weights=w_valid).item()
return(train_loss, valid_loss)
def train_epoch(self, x_train, y_train, d_train, w_train): # d_train: domain labels
"Train one epoch."
self.trainer.train()
all_batches = self.get_batches(x_train, y_train, d_train, w_train, mode=self.batch_mode)
# Record all losses and the gradient.
losses = {sub_loss: 0 for sub_loss in self.trainer.loss_curve_iter.keys()}
losses['grad_norm_before'] = 0
losses['grad_norm_clipped'] = 0
for batch_idx, d_batches in enumerate(all_batches):
self.trainer.optimizer.zero_grad()
loss = self.trainer(d_batches)
# self.make_backwards_graph(loss, batch_idx)
loss.backward()
grad_norm_before, grad_norm_clipped = self.clip_grad_norm()
self.trainer.optimizer.step()
for sub_loss in self.trainer.loss_curve_iter.keys():
losses[sub_loss] += self.trainer.loss_curve_iter[sub_loss][-1]
losses['grad_norm_before'] += grad_norm_before
losses['grad_norm_clipped'] += grad_norm_clipped
for sub_loss, loss_value in losses.items():
self.loss_curve_[sub_loss].append(loss_value)
def train_until_stop(self, x_train, y_train, d_train, w_train, x_valid, y_valid, w_valid):
"""Trains the model until either all epochs are trained or early_stopping criterion is reached. Label is a string to differentiate the run to train the representation and the run to train the classifier if these are seperated.
"""
if self.early_stopping != 'False':
stop = EarlyStopping(self.n_iter_no_change)
self.loss_curve_['train'] = []
self.loss_curve_['valid'] = []
for epoch in range(self.max_epochs):
self.epoch = epoch
self.train_epoch(x_train, y_train, d_train, w_train)
# Validation on validation set.
train_loss, valid_loss = self.validate(x_train, x_valid, y_train, y_valid, w_train, w_valid)
self.loss_curve_['train'].append(train_loss)
self.loss_curve_['valid'].append(valid_loss)
# metrics = {'train_loss': train_loss, 'valid_loss': valid_loss}
# log_metrics(metrics, step=epoch, if_log_metrics=self.if_log_metrics)
# self.writer.add_scalars('loss', {'train': train_loss, 'valid': valid_loss}, global_step=epoch)
self.trainer.scheduler.step()
# Early stopping, but only after min 20% of epochs.
if self.early_stopping != 'False' and epoch > self.max_epochs / 5:
valid_score = self.loss_curve_[self.early_stopping][-1]
stop.check_loss(valid_score, self.trainer, epoch)
if stop.early_stop == True:
print(f'Stopped early in epoch {epoch}.')
break
if self.early_stopping != 'False' and epoch > self.max_epochs / 5:
self.trainer.load_state_dict(stop.best_model_state_dict)
self.best_epoch = stop.best_epoch
print(f'Best model loaded from epoch {self.best_epoch}.')
return()
def fit(self, x_train, y_train, d_train=[], sample_weight=[]):
"""Train the model for all epochs.
"""
if self.seed != None:
warnings.warn('torch.manual_seed and random.seed behave a bit dangerous, different than np.random_seed. If the torch seed is set it not only is set for afterwards in this run, but it is set even if the variables are cleaned! Only a kernel restart also unsets the torch.manual_seed. I don\'t know if random.seed also persists after cleaning variables but I think so and it definitely persists across modules.')
torch.manual_seed(self.seed)
np.random.seed(self.seed)
random.seed(self.seed)
x_train, y_train, d_train, w_train = self.make_consistent_torch_tensors(x_train, y_train, d_train, sample_weight)
# Get validation data.
if not self.validation_fraction == None:
x_train, x_valid, y_train, y_valid, d_train, d_valid, w_train, w_valid = \
self.get_validation_data(x_train, y_train, d_train, w_train)
else:
raise Warning('Code currently asserts validation set.')
# Initialize variables.
self.num_domains = len(torch.unique(d_train))
self.output_size = y_train.shape[1] if len(y_train.shape) == 2 else 1
self.n_train_samples = len(y_train)
if self.batch_size > len(x_train):
warnings.warn('Got batch size larger than sample size. It is going to be clipped.')
self.batch_size = len(x_train)
self.all_domains, self.all_domain_counts = torch.unique(d_train, return_counts=True)
self.use_classifier = False if self.ensemble_pred else True
self.trainer = self.prepare_trainer()
self.loss_curve_ = {sub_loss: [] for sub_loss in self.trainer.loss_curve_iter.keys()}
self.loss_curve_['grad_norm_before'] = []
self.loss_curve_['grad_norm_clipped'] = []
self.train_until_stop(x_train, y_train, d_train, w_train, x_valid, y_valid, w_valid)
# self.trainer.log_losses = False
# batches_data = get_all_domain_batches(x_train, y_train, w_train, d_train, self.all_domains)
# self.writer.add_graph(self.trainer, [batches_data])
# self.writer.add_embedding(x_train, y_train, tag='data')
# self.writer.flush()
# torch.onnx.export(self.trainer, (batches_data,), self.outpath, export_params=True, opset_version=11, input_names=['input'], output_names=['output'], example_outputs=torch.tensor(1))
# torch.save(self.trainer, self.outpath)
# for loss_name, loss_curve in self.trainer.loss_curve_iter.items():
# self.loss_curve_[loss_name] = loss_curve
# Push model to cpu after training. Could be useful sometimes but not really needed.
self = self.to('cpu')
self.device = 'cpu'
def evaluate(self, x_test):
"""Forward evaluates the network. Can either use the classifier or an ensemble of all extrapolator classifiers.
"""
if self.use_classifier:
model = self.trainer.network
model.eval()
with torch.no_grad():
y_pred = model(x_test)
else:
shape = (self.trainer.n_classifiers, len(x_test), self.output_size)
all_y_pred = torch.zeros(shape, device=self.device)
for k, model in enumerate(self.trainer.ensemble):
model.eval()
with torch.no_grad():
all_y_pred[k, :, :] = model(x_test)
y_pred = torch.quantile(all_y_pred, 0.5, dim=0) # median
return(y_pred)
def predict(self, x_test):
"""Infers data points and outputs a numpy array with the same dtype as the input array.
"""
input_dtype = x_test.dtype
x_test = torch.from_numpy(x_test).float().to(self.device)
self.use_classifier = True if not self.ensemble_pred else False
y_pred = self.evaluate(x_test)
y_pred = np.array(y_pred.cpu(), dtype=input_dtype)
return(y_pred)
| 26,342 | 45.054196 | 617 | py |
3DSC | 3DSC-main/superconductors_3D/machine_learning/Custom_Machine_Learning_v1_3.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 11 10:14:31 2020
@author: timo
This module is for the class MachineLearning that automatically executes a lot of different models and prints and saves all the output.
"""
import warnings
# warnings.filterwarnings("ignore")
import os
# os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import numpy as np
import tensorflow as tf
from matplotlib import pyplot as plt
# import torch
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.utils.validation import check_is_fitted
from sklearn.pipeline import Pipeline
from sklearn.compose import TransformedTargetRegressor, ColumnTransformer
import sklearn.metrics
import sklearn.linear_model
import random
import copy
import yaml
import datetime
from shutil import copyfile
from superconductors_3D.machine_learning.own_libraries.own_functions import movecol, isfloat
# import torch.optim
import itertools
import superconductors_3D.machine_learning.own_libraries.data.All_Data as All_Data
import superconductors_3D.machine_learning.own_libraries.data.All_scores as All_Scores
from superconductors_3D.machine_learning.own_libraries.data.Domain_statistics import save_Domain_statistics
from superconductors_3D.machine_learning.own_libraries.data import Feature_Importances as FI
from contextlib import redirect_stdout
import json
from superconductors_3D.machine_learning.own_libraries.utils.Models import Models, get_modelpath
def load_df_and_metadata(path):
"""Loads the dataframe and if possible the metadata.
"""
try:
with open(path, 'r') as f:
first_line = f.readline()
metadata = json.loads(first_line)
df = pd.read_csv(path, header=1)
# print(f'Loaded df and metadata from {path}.')
except json.JSONDecodeError:
df = pd.read_csv(path)
metadata = {}
print(f'Metadata not found. Loaded df from {path}.')
return(df, metadata)
def save_df_and_metadata(df, metadata, outpath):
"""Save a df as csv with all the important metadata saved as json comment in the first line of the file.
"""
if os.path.exists(outpath):
os.remove(outpath)
with open(outpath, 'a') as f:
json.dump(metadata, f)
f.write('\n')
df.to_csv(f, index=False)
return
def regressor_from_pipeline(pipe):
"""Returns the ML model from a given sklearn Pipeline or TransformedTargetRegressor.
"""
return pipe.regressor_['model']
def get_saved_model(modelname: str, repetition: int, run_dir: str, regressor=False):
"""Finds a single saved model with given name and repetition in run_dir/models and returns it.
"""
model = Models().load(modelname=modelname, repetition=repetition, rundir=run_dir, regressor=regressor)
return(model)
def assert_allclose(x1, x2, atol=1e-5, rtol=1e-3):
"""Asserts that arrays x1 and x2 are either equal or at least close.
"""
try:
assert np.allclose(x1, x2, rtol, atol)
except TypeError:
# If x1 and x2 are e.g. string arrays.
assert all(x1 == x2)
return
def net_pattern(n_layers, base_size, end_size):
"""Calculates layer sizes for each layer from first and last layer so that the layer size continously increases/decreases.
"""
if n_layers != 1:
factor = (end_size / base_size)**(1/(n_layers - 1))
else:
factor = 1
layer_sizes = [int(round(base_size*factor**n)) for n in range(n_layers)]
return(layer_sizes)
def unique_sorted(groupcol):
"""Returns a list with the unique group label sorted by frequency. groupcol must be a pandas series."""
group_occ = groupcol.value_counts().reset_index()
groups = group_occ['index'].tolist()
return(groups)
def print_row(values, name, uncertainties=None, width=7, num_width=3, dec=2, delim=' | '):
"""Prints a nicely formatted row. Name is the left-most entry. Values and uncertainties need to be iterables.
"""
values = np.asanyarray(values)
if uncertainties != None:
uncertainties = np.asanyarray(uncertainties)
assert isinstance(values[0], float) or isinstance(values[0], int)
num_width = 2
print_vals = [f'{val:^.{num_width}g}±{unc:^.{num_width}g}' for val, unc in zip(values, uncertainties)]
print_vals = [f'{string:^{width}.{width}}' for string in print_vals]
else:
if isinstance(values[0], str):
print_vals = [f'{val:^{width}.{width}}' for val in values]
elif isfloat(values[0]):
print_vals = [f'{val:^{width}.{num_width}g}' for val in values]
cells = [f'{name:5.5}']
cells = cells + print_vals
print(delim.join(cells))
print('-'*75)
return()
def Sc_classification(*Tc_arrays):
"""Returns the classification in 0 (non-sc) or 1 (sc) based on the continous value of Tc.
"""
all_sc_class_arrays = []
for Tc in Tc_arrays:
Tc = np.asarray(Tc)
assert all(Tc >= 0), 'We found a negative Tc!'
# Reduce prediction of Tc to sc classes, i.e 0 or 1.
Sc_class = np.where(Tc > 0, 1, 0)
all_sc_class_arrays.append(Sc_class)
return all_sc_class_arrays
warnings.simplefilter("ignore", category=sklearn.exceptions.UndefinedMetricWarning)
def logr2(y_true, y_pred, sample_weight):
"""Calculates the r2 score after taking the arcsinh (like the log)."""
y_true = np.arcsinh(y_true)
y_pred = np.arcsinh(y_pred)
logr2 = sklearn.metrics.r2_score(y_true=y_true, y_pred=y_pred, sample_weight=sample_weight)
return logr2
def Sc_F1(Tc_true, Tc_pred, sample_weight):
"""Calculates the F1 score for a superconductor (with regression of Tc), i.e. before calculating the usual F1 score it sets class=0 if Tc==0 and class=1 if Tc>0.
"""
sc_true_class, sc_pred_class = Sc_classification(Tc_true, Tc_pred)
score = sklearn.metrics.f1_score(sc_true_class, sc_pred_class, sample_weight=sample_weight)
return score
def Sc_accuracy(Tc_true, Tc_pred, sample_weight):
"""Calculates the accuracy score for a superconductor (with regression of Tc), i.e. before calculating the usual accuracy score it sets class=0 if Tc==0 and class=1 if Tc>0.
"""
sc_true_class, sc_pred_class = Sc_classification(Tc_true, Tc_pred)
score = sklearn.metrics.accuracy_score(sc_true_class, sc_pred_class, sample_weight=sample_weight)
return score
def Sc_precision(Tc_true, Tc_pred, sample_weight):
"""Calculates the precision score for a superconductor (with regression of Tc), i.e. before calculating the usual precision score it sets class=0 if Tc==0 and class=1 if Tc>0.
"""
sc_true_class, sc_pred_class = Sc_classification(Tc_true, Tc_pred)
score = sklearn.metrics.precision_score(sc_true_class, sc_pred_class, sample_weight=sample_weight)
return score
def Sc_recall(Tc_true, Tc_pred, sample_weight):
"""Calculates the recall score for a superconductor (with regression of Tc), i.e. before calculating the usual recall score it sets class=0 if Tc==0 and class=1 if Tc>0.
"""
sc_true_class, sc_pred_class = Sc_classification(Tc_true, Tc_pred)
score = sklearn.metrics.recall_score(sc_true_class, sc_pred_class, sample_weight=sample_weight)
return score
def specificity(y_true, y_pred, sample_weight):
"""Calculate the specificity (the recall of the negative class).
"""
# With sklearn.
score_with_sklearn = sklearn.metrics.recall_score(y_true, y_pred, pos_label=0, sample_weight=sample_weight)
# Own implementation.
# Invert classes.
y_true = np.abs(y_true - 1)
y_pred = np.abs(y_pred - 1)
# Calculate recall of inverted classes.
score = sklearn.metrics.recall_score(y_true, y_pred, sample_weight=sample_weight)
# Double checking both implementations.
assert score == score_with_sklearn
return score
def Sc_specificity(Tc_true, Tc_pred, sample_weight):
"""Calculates the specificity score for a superconductor (with regression of Tc), i.e. before calculating the usual specificity score it sets class=0 if Tc==0 and class=1 if Tc>0.
"""
sc_true_class, sc_pred_class = Sc_classification(Tc_true, Tc_pred)
score = specificity(sc_true_class, sc_pred_class, sample_weight=sample_weight)
return score
def Sc_G_means(Tc_true, Tc_pred, sample_weight):
"""Calculates the G-means score (geometric mean of Recall and specificity) for a superconductor (with regression of Tc), i.e. before calculating the usual specificity score it sets class=0 if Tc==0 and class=1 if Tc>0.
"""
recall = Sc_recall(Tc_true, Tc_pred, sample_weight=sample_weight)
spec = Sc_specificity(Tc_true, Tc_pred, sample_weight=sample_weight)
score = np.sqrt(recall*spec)
return score
def Sc_OoB(Tc_true, Tc_pred, sample_weight, bound_max=200):
"""Calculates what fraction of data points is Out of Bounds (OoB), i.e. how many are outliers. Doesn't need Tc_true, this is just for the sake of consistency.
"""
is_OoB = np.isclose(Tc_pred, bound_max, atol=1).astype(int)
if not (sample_weight is None):
is_OoB = sample_weight * is_OoB
score = sum(is_OoB) / len(is_OoB)
return score
def Sc_MARE(Tc_true, Tc_pred, sample_weight, min_Tc=0):
"""Calculates the mean absolute relative error. Note, this definition is slightly different from the usual MAPE to accomodate for superconductors, e.g. it only looks at data points with a Tc higher than min_Tc.
"""
Tc_true, Tc_pred = np.asarray(Tc_true), np.asarray(Tc_pred)
is_sc = (Tc_true > min_Tc) & (Tc_pred > min_Tc)
Tc_true = Tc_true[is_sc]
Tc_pred = Tc_pred[is_sc]
if not (sample_weight is None):
sample_weight = sample_weight[is_sc]
norm = np.maximum(Tc_true, Tc_pred)
diff = np.abs(Tc_true - Tc_pred)
score = diff / norm
if len(score) > 0:
score = np.average(score, weights=sample_weight)
else:
warnings.warn('The MARE can not be calculated, setting to 0.')
score = 0
return score
def Sc_SMAPE(Tc_true, Tc_pred, sample_weight, min_Tc=0):
"""Calculates the mean absolute relative error. Note, this definition is slightly different from the usual MAPE to accomodate for superconductors, e.g. it only looks at data points with a Tc higher than min_Tc.
"""
Tc_true, Tc_pred = np.asarray(Tc_true), np.asarray(Tc_pred)
is_sc = (Tc_true > min_Tc) & (Tc_pred > min_Tc)
Tc_true = Tc_true[is_sc]
Tc_pred = Tc_pred[is_sc]
if not (sample_weight is None):
sample_weight = sample_weight[is_sc]
norm = (Tc_true + Tc_pred)
diff = np.abs(Tc_true - Tc_pred)
score = diff / norm
if len(score) > 0:
score = np.average(score, weights=sample_weight)
else:
warnings.warn('The SMAPE can not be calculated, setting to 0.')
score = 0
return score
def out_of_sigma(y_true, y_pred, sigma_lower_bound, sigma_upper_bound):
"""Calculates which fraction of data points is out of the lower and upper uncertainty bounds.
"""
y_true = np.asarray(y_true)
y_pred = np.asarray(y_pred)
sigma_lower_bound = np.asarray(sigma_lower_bound)
sigma_upper_bound = np.asarray(sigma_upper_bound)
assert all(y_pred >= sigma_lower_bound) and all(y_pred <= sigma_upper_bound), 'Predicions don\'t lie between sigma bounds.'
# Calculate uncertainty for each data point. Use upper bound when overestimating and lower bound when underestimating.
error = y_pred - y_true
overestimated = error > 0
uncertainty_bounds = np.where(overestimated, sigma_lower_bound, sigma_upper_bound)
uncertainty = np.abs(y_pred - uncertainty_bounds)
error = np.abs(error)
out_of_sigma = error > uncertainty
# Fraction.
result = sum(out_of_sigma) / len(out_of_sigma)
return result
def name_score_column(target, scorename, CV):
"""Returns the name of the columns of `All_scores.csv`.
"""
return f'{target}_{scorename}_{CV}'
def get_scores(score, model, target, CV, all_scores_path):
"""Returns scores of specified model and target from the file `all_scores_path` which is an instance of `All_scores.csv`.
"""
df, _ = load_df_and_metadata(all_scores_path)
# df = pd.read_csv(all_scores_path)
df = df[df['Model'] == model]
colname = name_score_column(target, score, CV)
scores = df[colname]
return(scores)
def tolist(*variables):
results = []
for var in variables:
if isinstance(var, str) or isinstance(var, int) or isinstance(var, float):
var = [var]
results.append(var)
return(results)
def inverse_transform_std(mu, std, scaler):
"""Makes the inverse transform of the std by transforming upper and lower bound. Returns upper and lower bound after the inverse transform.
"""
lower_conf = mu - std
upper_conf = mu + std
lower_conf_trans = scaler.inverse_transform(lower_conf)
upper_conf_trans = scaler.inverse_transform(upper_conf)
return lower_conf_trans, upper_conf_trans
# For file Domain_statistics.csv
N_REPETITION = 'rand_instance'
MODEL = 'Model'
TEST_OR_TRAIN = 'test_or_train'
SIZE = 'Size'
# How many sigma uncertainty should be used for the uncertainty of the targets of data points returned from models (e.g. a Gaussian Process). This does NOT apply to the displayed uncertainty of metrics of models. Throughout all scripts one should refer to this variable.
SIGMA = 2
class Machine_Learning():
""" This class contains functions to train with data on several models and automatically write the output to several files.
"""
def __init__(self, data, features, targets, domain=None, sample_weights=None, metrics_sample_weights=None, Column_Transformer=None, save_models=False, is_optimizing_NN=False, save_value_of_variables=[], NN_valfrac=0.2, print_features=True, print_targets=True, print_domain_score=False, random_seed=None, average_func=np.mean, save_all_values_and_predictions=True, save_torch_models=False, n_jobs=1, copy_files=[]):
# For debugging, to be able to always get the same output.
self.random_seed = random_seed # "None" for randomization.
np.random.seed(self.random_seed)
random.seed(self.random_seed)
# torch.manual_seed(self.random_seed)
tf.random.set_seed(self.random_seed)
df_data = copy.deepcopy(data.reset_index(drop=True)) # Important for make_result_df()
self.targets = targets
self.df_data = df_data
self.n_datapoints = len(self.df_data)
self.features = features
self.n_features = len(features)
self.domain = domain
self.sample_weights = sample_weights
self.metrics_sample_weights = metrics_sample_weights
assert all([feat in self.df_data.columns for feat in self.features]), "One or more of the given features does not exists in the data."
assert all([target in self.df_data.columns for target in self.targets]), "One or more of the targets does not exist in the data."
self.n_targets = len(targets)
self.Column_Transformer = Column_Transformer if Column_Transformer != None else {col: StandardScaler() for col in features + targets}
self.CV_cols = [col for col in self.df_data.columns if col.startswith('CV_')]
self.n_repetitions = len(self.CV_cols)
self.all_n_repetitions = list(range(self.n_repetitions))
assert self.CV_cols == [All_Data.All_Data.name_CV_col(i) for i in self.all_n_repetitions], 'Could not recognize CV columns with train and test indices in df_data.'
self.train_lengths = [sum(self.df_data[col] == 'train') for col in self.CV_cols]
if len(set(self.train_lengths)) == 1:
self.train_lengths = self.train_lengths[0]
self.test_lengths = [sum(self.df_data[col] == 'test') for col in self.CV_cols]
if len(set(self.test_lengths)) == 1:
self.test_lengths = self.test_lengths[0]
# Check for NaN features
nan_feature_cols = [col for col in self.features if self.df_data[col].isna().any()]
if nan_feature_cols:
raise ValueError(f'Error: Some of the values of the following features are NaN: {",".join(nan_feature_cols)}')
self.save_models = save_models
self.average_func = average_func
self.NN_n_reps = 2
self.NN_valfrac = NN_valfrac
self.is_optimizing_NN = is_optimizing_NN
self.save_all_values_and_predictions = save_all_values_and_predictions
self.save_torch_models = save_torch_models
self.experiment = save_value_of_variables['experiment']
self.n_jobs = n_jobs
self.copy_files = copy_files
self.all_scores = {"r2": sklearn.metrics.r2_score,
"logr2": logr2,
"MAE": sklearn.metrics.mean_absolute_error,
# "MdAE": sklearn.metrics.median_absolute_error,
"MSLE": sklearn.metrics.mean_squared_log_error,
# "MARE": Sc_MARE,
"SMAPE": Sc_SMAPE,
# "OoB": Sc_OoB,
# "ScSpec": Sc_specificity,
# "ScRecall": Sc_recall,
# "ScG": Sc_G_means,
# "ScAcc": Sc_accuracy,
# "ScPrec": Sc_precision
}
self.all_scorenames = list(self.all_scores.keys())
if self.is_optimizing_NN == True:
self.save_models = False
# Write these variables to the final output file for later convenience.
self.store_variables = {}
self.store_variables["Length of train/ test set"] = (self.train_lengths, self.test_lengths)
self.store_variables["Features"] = self.features
self.store_variables["Targets"] = self.targets
self.store_variables["Save models"] = self.save_models
self.store_variables["Random seed"] = self.random_seed
self.store_variables["Average function"] = self.average_func
self.store_variables["Repetitions per model"] = self.n_repetitions
self.store_variables["Repetitions for NN"] = self.NN_n_reps
self.store_variables["Sigma (Uncertainty)"] = SIGMA
for varname, value in save_value_of_variables.items():
self.store_variables[varname] = value
# Initiate variables for later data storage.
self.df_all_score_results = pd.DataFrame()
self.all_values_and_predictions = self.df_data
self.all_domain_stats = pd.DataFrame()
self.all_loss_curves = {}
# Set internal variables.
self.num_features = len(self.features)
self.print_features = print_features
self.print_targets = print_targets
self.print_domain_score = print_domain_score
def get_train_and_test_data(self, CV_col, domain_col=None, sample_weights_col=None, metrics_sample_weights_col=None):
"""Return all train and test data."""
df = self.df_data
x = df[self.features].to_numpy()
y = df[self.targets].to_numpy()
test = df[CV_col] == 'test'
train = df[CV_col] == 'train'
x_train, x_test = x[train], x[test]
y_train, y_test = y[train], y[test]
if domain_col != None:
d_train = df[train][domain_col].to_numpy()
d_test = df[test][domain_col].to_numpy()
else:
d_train, d_test = np.zeros(sum(train)), np.zeros(sum(test))
if sample_weights_col != None:
w_train = df[train][sample_weights_col].to_numpy()
w_test = df[test][sample_weights_col].to_numpy()
else:
w_train, w_test = None, None
if metrics_sample_weights_col != None:
mw_train = df[train][metrics_sample_weights_col].to_numpy()
mw_test = df[test][metrics_sample_weights_col].to_numpy()
else:
mw_train, mw_test = None, None
return(x, y, x_train, x_test, y_train, y_test, d_train, d_test, w_train, w_test, mw_train, mw_test)
def all_score_results(self, all_train_scores, all_test_scores, modelname):
""" Write all scores to dataframe.
"""
df_scores = pd.DataFrame()
df_scores['Repetition'] = list(range(self.n_repetitions))
for target_idx, target_name in enumerate(self.targets):
for score_idx, score_name in enumerate(self.all_scores.keys()):
train_scorename = name_score_column(target_name, score_name, 'train')
test_scorename = name_score_column(target_name, score_name, 'test')
df_scores[train_scorename] = all_train_scores[:,score_idx, target_idx]
df_scores[test_scorename] = all_test_scores[:,score_idx, target_idx]
df_scores["Model"] = modelname
return(df_scores)
def unique_transformer(self):
"""Returns a unique transformer for all targets because sklearn doesn\'t suppourt multiple target scalers.
"""
all_target_scalers = [self.Column_Transformer[target] for target in self.targets]
# Check for uniqueness
for t1, t2 in itertools.product(all_target_scalers, all_target_scalers):
if not type(t1) == type(t2):
raise Warning('Sklearn doesn\'t support different scaling of differet targets.')
# Get unique target scaler.
target_scaler = all_target_scalers[0]
return target_scaler
def train_regressor(self, model, x_train, y_train, d_train, w_train):
"""Train a regressor of a model with data and return the regressor.
"""
if self.sample_weights != None:
x_train, y_train, d_train, w_train = sklearn.utils.shuffle(x_train, y_train, d_train, w_train)
else:
x_train, y_train, d_train = sklearn.utils.shuffle(x_train, y_train, d_train)
# Transform and scale features and targets in a pipeline.
transform_columns = []
for idx, colname in enumerate(self.features):
transformer = self.Column_Transformer[colname]
entry = (colname, transformer, [idx])
transform_columns.append(entry)
feature_transformer = ColumnTransformer(transform_columns)
pipe = Pipeline([
('ColumnTransformer', feature_transformer),
('model', model)
])
target_transformer = self.unique_transformer()
regr = CustomTransformedTargetRegressor(regressor=pipe, transformer=target_transformer)
# Fit with domain and sample weights if possible.
train_kwargs = {}
if hasattr(model, 'domain_col'):
print('Fitting model with specified domains.')
train_kwargs['model__d_train'] = d_train
try:
regr.fit(x_train, y_train, model__sample_weight=w_train, **train_kwargs)
if self.sample_weights != None:
print('Fitted model with sample weights.')
except TypeError:
if self.sample_weights != None:
print('Model doesn\'t support sample weights.')
regr.fit(x_train, y_train, **train_kwargs)
return(regr)
def apply_model(self, modelname, init_model, outdir):
"""Applies the model to the data to get the r² and MAE score and writes them into the numerical output file.
"""
# For debugging, to be able to always get the same output.
# TODO: Remove this shit.
# np.random.seed(self.random_seed)
# random.seed(self.random_seed)
# torch.manual_seed(self.random_seed)
print(f"\n ### {modelname}:")
n_scores = len(self.all_scores)
all_train_scores = np.zeros((self.n_repetitions, n_scores, self.n_targets))
all_test_scores = np.zeros((self.n_repetitions, n_scores, self.n_targets))
self.all_loss_curves[modelname] = []
for i in range(self.n_repetitions):
# Very important to have a new model for each run! Otherwise they might influence each other.
# try:
model = copy.deepcopy(init_model)
# except TypeError:
# print('CAN\'T COPY MODEL!!!')
# model = init_model
# Setup SummaryWriter for Tensorboard. # TODO
# =============================================================================
# if hasattr(model, 'use_tensorboard') and model.use_tensorboard:
# tb_dir = os.path.join(outdir, '../0tensorboard', os.path.basename(outdir) + f'_{modelname}_{i}')
# model.writer = SummaryWriter(tb_dir)
# model.outpath = get_modelpath(outdir, modelname, i) + '.pt'
# =============================================================================
# Add NN_path to GP to train on transformed features of model `NN_path`.
if hasattr(model, 'NN_path') and model.NN_path is not None:
NN_path = model.NN_path
if not os.path.exists(NN_path) and NN_path in self.all_models:
NN_path = get_modelpath(outdir, NN_path, i) + '.pkl'
model.NN_path = NN_path
# Get train and test data.
try:
domain_col = model.domain_col
assert domain_col == self.domain
except AttributeError:
domain_col = None
CV_col = All_Data.All_Data.name_CV_col(i)
x, y, x_train, x_test, y_train, y_test, d_train, d_test, w_train, w_test, mw_train, mw_test = self.get_train_and_test_data(CV_col=CV_col, domain_col=domain_col, sample_weights_col=self.sample_weights, metrics_sample_weights_col=self.metrics_sample_weights)
# Train regressor in pipeline.
regr = self.train_regressor(model, x_train, y_train, d_train, w_train)
# Get predictions for scoring.
y_pred_train = regr.predict(x_train)
y_pred_test = regr.predict(x_test)
assert y_train.shape == y_pred_train.shape
assert y_test.shape == y_pred_test.shape
# train/test_scores have shape (n_scores, n_targets) where n_scores is the number of score functions.
train_scores = self.scores(y_train, y_pred_train, weights=mw_train)
test_scores = self.scores(y_test, y_pred_test, weights=mw_test)
all_train_scores[i,:,:] = train_scores
all_test_scores[i,:,:] = test_scores
# TODO: Make logging with tensorboard or something else.
# =============================================================================
# if hasattr(model, 'use_tensorboard'):
# r2_idx = list(self.all_scores.keys()).index('r2')
# # add_hparams only takes these data types.
# hparams = {key: val if type(val) in [int, float, str, bool, torch.tensor] else str(val) for key, val in model.input_args.items()}
# model.writer.add_hparams(
# hparams,
# {'r2_train': train_scores[r2_idx],
# 'r2_test': test_scores[r2_idx]
# }
# )
# model.writer.flush()
# # Pickle can't deal with tensorboard logger.
# delattr(model, 'writer')
# =============================================================================
if self.save_models == True:
Models().save(regr=regr, rundir=outdir, modelname=modelname, repetition=i)
# Construct dataframe to save all true and predicted data together.
if not self.is_optimizing_NN:
# Get predictions and uncertainty if possible.
# The std is a tuple of upper and lower bound because it will usually not be symmetrical due to the scaling.
# The here called output `y_pred_std` is actually SIGMA * std (SIGMA is a global variable). This is so that one can dynamically change which degreee of uncertainty one wants. This is implemented in CustomTransformedTargetRegressor.
try:
y_pred, y_pred_std = regr.predict(x, return_std=True)
scaled_unc, y_pred_std_lower, y_pred_std_upper = y_pred_std
assert (y_pred >= y_pred_std_lower).all() and (y_pred <= y_pred_std_upper).all(), 'Prediction not between uncertainty bounds.'
except TypeError:
y_pred = regr.predict(x)
for idx in range(self.n_targets):
target = self.targets[idx]
preds = y_pred[:,idx]
colname = All_Data.All_Data.name_preds_col(modelname, i, target)
self.all_values_and_predictions[colname] = preds
# Add uncertainty to df if it exists.
try:
scaled_unc = scaled_unc[:,idx]
std_lower = y_pred_std_lower[:,idx]
std_upper = y_pred_std_upper[:,idx]
std_lower_colname = All_Data.All_Data.name_unc_col(modelname, i, target, kind='lower')
std_upper_colname = All_Data.All_Data.name_unc_col(modelname, i, target, kind='upper')
scaled_unc_colname = All_Data.All_Data.name_unc_col(modelname, i, target, kind='scaled_unc')
self.all_values_and_predictions[std_lower_colname] = std_lower
self.all_values_and_predictions[std_upper_colname] = std_upper
self.all_values_and_predictions[scaled_unc_colname] = scaled_unc
except UnboundLocalError:
pass
# Some sanity tests due to paranoia.
assert_allclose(x, self.all_values_and_predictions[self.features].to_numpy())
assert_allclose(x_train, x[self.all_values_and_predictions[CV_col] == 'train'])
assert_allclose(y_pred_test, y_pred[self.all_values_and_predictions[CV_col] == 'test'])
if not self.is_optimizing_NN:
df_all_scores = self.all_score_results(all_train_scores, all_test_scores, modelname)
self.df_all_score_results = self.df_all_score_results.append(df_all_scores)
# Calculate averages and standard deviations over all repetitions.
train_score = self.average_func(all_train_scores, axis=0)
test_score = self.average_func(all_test_scores, axis=0)
train_score_std = np.std(all_train_scores, axis=0)
test_score_std = np.std(all_test_scores, axis=0)
self.print_overall_performance_scores(train_score, train_score_std, test_score, test_score_std)
if self.domain != None and self.print_domain_score:
# Get scores and statistics for each domain
df_domain_stats = self.get_domain_stats(modelname, self.domain)
self.all_domain_stats = self.all_domain_stats.append(df_domain_stats)
self.print_domain_stats(df_domain_stats, self.domain)
# Write score output to file
self.save_numerical_output(outdir, modelname, train_score, test_score, train_score_std, test_score_std)
return
def train(self, all_models, outdir):
print(" ### Start training")
self.all_models = all_models
# Make directories for the output.
outdir = self.prepare_directories(outdir)
# Copy specified files to outdir.
for file in self.copy_files:
file_in_new_dir = os.path.join(outdir, file)
copyfile(file, file_in_new_dir)
# Store all arguments.
with open(outdir + "/arguments", 'w') as f:
yaml.dump(self.store_variables, f)
# Print input information.
print(" --- Data:")
if self.print_features:
print(" Features: {}".format(self.features))
if self.print_targets:
print(" Targets: {}".format(self.targets))
print(" Num features: {}".format(len(self.features)))
print(" Train data size: {}".format(self.train_lengths))
print(" Test data size: {}".format(self.test_lengths))
print(" Num repetitions: {}".format(self.n_repetitions))
# =============================================================================
# Train and evaluate all given models.
# =============================================================================
for modelname, model in all_models.items():
starttime = datetime.datetime.now()
self.apply_model(modelname, model, outdir)
duration = datetime.datetime.now() - starttime
print(f'Training time of {self.n_repetitions} instances of {modelname}: {duration}')
# Save data
if not self.is_optimizing_NN:
all_models_names = list(all_models.keys())
if self.save_all_values_and_predictions:
All_Data_file = os.path.join(outdir, "All_values_and_predictions.csv")
All_Data.save_All_Data(df=self.all_values_and_predictions,
outpath=All_Data_file,
targets=self.targets,
n_repetitions=self.n_repetitions,
features=self.features,
domains=self.domain,
models=all_models_names,
sample_weights=self.sample_weights,
SIGMA=SIGMA,
)
All_scores_file = os.path.join(outdir, "All_scores.csv")
All_Scores.save_All_scores(df=self.df_all_score_results,
outpath=All_scores_file,
targets=self.targets,
scorenames=self.all_scorenames,
CVs=['train', 'test'],
models=all_models_names
)
if len(self.all_domain_stats) > 0:
Domain_statistics_file = os.path.join(outdir, "Domain_statistics.csv")
save_Domain_statistics(df=self.all_domain_stats,
outpath=Domain_statistics_file,
domains=self.domain,
targets=self.targets,
scorenames=self.all_scorenames,
models=all_models_names
)
try:
# Try to get all feature importances
if self.save_models:
importances = FI.feature_importances_from_models(
rundir=outdir,
features=self.features,
modelnames=all_models_names,
repetitions=self.all_n_repetitions
)
# Save feature importances
feat_imps_file = os.path.join(outdir, FI.data_name)
FI.save_Feature_Importances(
df=importances,
outpath=feat_imps_file,
features=self.features,
modelnames=all_models_names,
repetitions=self.all_n_repetitions
)
else:
print('Could not try to get feature importances because no model saved.')
except AttributeError:
pass # No models with feature importances found.
print("Successfully saved all data.")
return()
def prepare_directories(self, outdir):
"""Make directories for the output."""
if not os.path.exists(outdir):
raise ValueError('outdir doesn\'t exist!')
if (not os.path.exists("%s/models"%(outdir))) and self.save_models:
os.makedirs("%s/models"%(outdir))
return(outdir)
def print_overall_performance_scores(self, train_scores, train_scores_std, test_scores, test_scores_std):
"""Prints overall_scores in a nice format. The uncertainty is noted so that it is ± half of the std.
"""
for target_idx, target_name in enumerate(self.targets):
print(f' --- Target: {target_name}')
for score_idx, score_name in enumerate(self.all_scores.keys()):
train_score = train_scores[score_idx][target_idx]
train_score_std = train_scores_std[score_idx][target_idx]
test_score = test_scores[score_idx][target_idx]
test_score_std = test_scores_std[score_idx][target_idx]
print(f' --- {score_name}:\tTraining: {train_score:.3f} ± {train_score_std:.3f}\t\tTesting: {test_score:.3f} ± {test_score_std:.3f}')
return
def scores(self, all_y_true, all_y_pred, weights=None):
"""Returns an array with shape (num_scores, num_targets) with the scores calculated with the functions in all_scores for every target in the second dimension of all_y_true.
"""
assert all_y_true.ndim == 2
all_y_true = np.array(all_y_true)
all_y_pred = np.array(all_y_pred)
num_targets = all_y_true.shape[1]
num_data = all_y_true.shape[0]
if not (weights is None):
assert len(weights) == num_data
num_scores = len(self.all_scores)
# Calculate scores
scores = np.zeros((num_scores, num_targets))
for score_idx, name in enumerate(self.all_scores.keys()):
func = self.all_scores[name]
for target_idx, (y_true, y_pred) in enumerate(zip(all_y_true.T, all_y_pred.T)):
score = func(y_true, y_pred, sample_weight=weights)
scores[score_idx, target_idx] = score
return np.array(scores)
def result_statistics(self, y_true, y_pred, targetname, weights=None):
"""Calculates several statistical properties of y_true and y_pred.
"""
y_true, y_pred = np.asanyarray(y_true), np.asanyarray(y_pred)
if not (weights is None):
assert len(y_true) == len(y_pred) and len(y_pred) == len(weights)
stats = {}
for score_name, score_fn in self.all_scores.items():
score = score_fn(y_true, y_pred, sample_weight=weights)
stats[f'{score_name}_{targetname}'] = score
stats[f'mean_{targetname}_true'] = y_true.mean()
stats[f'std_{targetname}_true'] = y_true.std()
stats[f'mean_{targetname}_pred'] = y_pred.mean()
stats[f'std_{targetname}_pred'] = y_pred.std()
return(stats)
def get_domain_stats(self, modelname, domain_col):
"""Returns a df with the scores for each domain.
"""
domains = pd.unique(self.all_values_and_predictions[domain_col])
stats = {}
counter = 0
for n, domain in itertools.product(self.all_n_repetitions, domains):
is_domain = self.all_values_and_predictions[domain_col] == domain
df_domain = self.all_values_and_predictions[is_domain]
CV_col = All_Data.All_Data.name_CV_col(n)
CVs = sorted(pd.unique(df_domain[CV_col]))
for CV in CVs:
df = df_domain[df_domain[CV_col] == CV]
# Append descriptive data for this row.
stats[counter] = {
N_REPETITION: n,
MODEL: modelname,
TEST_OR_TRAIN: CV,
domain_col: domain
}
# Calculate statistics for each target.
for target in self.targets:
col = All_Data.All_Data.name_preds_col(modelname, n, target)
y_pred = df[col]
y_true = df[target]
weights = None if self.metrics_sample_weights is None else df[self.metrics_sample_weights]
interesting_stats = self.result_statistics(y_true, y_pred, target, weights=weights)
stats[counter].update(interesting_stats)
stats[counter].update({SIZE: len(df)})
counter += 1
df_domain_stats = pd.DataFrame.from_dict(data=stats, orient='index')
return(df_domain_stats)
def print_domain_stats(self, df_domain_stats, domain_col):
"""Prints test scores and other stats per domain.
"""
# Print only test results otherwise it would be too much.
df_domain_stats = df_domain_stats[df_domain_stats['test_or_train'] == 'test']
# Average over multiple runs to have the final test score of each domain.
df_domain_stats = df_domain_stats.groupby([MODEL, domain_col]).mean().reset_index()
# Print domains with many datapoints first.
df_domain_stats = df_domain_stats.sort_values(by='Size', ascending=False)
domains = df_domain_stats[domain_col].tolist()
print('\n')
print_row(domains, 'Domain')
# Print scores of targets.
for score in self.all_scores.keys():
for target in self.targets:
score_name = f'{score}_{target}'
vals = df_domain_stats[score_name]
print_row(vals, score_name)
# Print statistics of targets.
for target in self.targets:
for end in ['true', 'pred']:
name = f'{target}_{end}'
vals = df_domain_stats[f'mean_{target}_{end}'].tolist()
stds = df_domain_stats[f'std_{target}_{end}'].tolist()
print_row(vals, name, stds)
def save_numerical_output(self, outdir, modelname, train_scores, test_scores, train_scores_std, test_scores_std):
"""Write formatted output scores to file.
"""
with open(outdir + "/Numerical results", "a+") as f:
f.write("\n ### {}:\n".format(modelname))
with redirect_stdout(f):
self.print_overall_performance_scores(train_scores, train_scores_std, test_scores, test_scores_std)
f.close()
return()
def plot_feature_correlations(self, outdir, x, y, x_scaler, y_scaler):
"""Plot some feature correlations."""
print("Plotting feature correlations...")
if not os.path.exists("%s/feature_correlation"%(outdir)):
os.makedirs("%s/feature_correlation"%(outdir))
for idx in range(len(self.features)):
#print("do best feature %i"%(idx))
plt.figure()
feature_name=self.features[idx]
if "pH" not in feature_name and "cross" not in feature_name:
plt.scatter(x_scaler.inverse_transform(x)[:,idx],y_scaler.inverse_transform(y))
else:
xs_unscaled_here=x_scaler.inverse_transform(x)[:,idx]
xs_set=[xs_unscaled_here[0]]
for value in xs_unscaled_here:
distances=[abs(value-v2) for v2 in xs_set]
if max(distances)>0.01:
xs_set.append(value)
indeces=[]
for v2 in xs_set:
indeces.append([])
for v_index, value in enumerate(xs_unscaled_here):
d=abs(value-v2)
if d<0.01:
indeces[-1].append(v_index)
#x_min=np.min(x[:,idx])
#x_max=np.max(x[:,idx])
#x_mean=0.5*(x_min+x_max)
#indeces_low=np.where(x[:,idx]<x_mean)[0]
#indeces_high=np.where(x[:,idx]>x_mean)[0]
for counter,indeces_here in enumerate(indeces):
parts=plt.violinplot([y_scaler.inverse_transform(y)[indeces_here].T[0]], positions=[float(xs_set[counter])], vert=True, widths=0.18, showmeans=False, showextrema=True, showmedians=False)
for pcidx, pc in enumerate(parts['bodies']):
pc.set_facecolor("C%i"%(counter))
pc.set_edgecolor('k')
pc.set_alpha(0.7)
parts['cbars'].set_color("k")
parts['cmaxes'].set_color("k")
parts['cmins'].set_color("k")
#parts=plt.violinplot([y_scaler.inverse_transform(y)[indeces_high].T[0]], positions=[1.0], vert=True, widths=0.18, showmeans=False, showextrema=True, showmedians=False)
#for pcidx, pc in enumerate(parts['bodies']):
# pc.set_facecolor("C2")
# pc.set_edgecolor('k')
# pc.set_alpha(0.7)
#parts['cbars'].set_color("k")
#parts['cmaxes'].set_color("k")
#parts['cmins'].set_color("k")
#if "pH" in feature_name:
# plt.xticks(xs_set, ["pH=1","pH=7"])
# #plt.xlim([-0.7,1.7])
#num_off=len(indeces_low)
#num_on=len(indeces_high)
#plt.text(0.2,14.0,"%i"%(num_off))
#plt.text(1.2,14.0,"%i"%(num_on))
# r2 = sklearn.metrics.r2_score(y,x[:,idx])
#plt.ylim([7.0,13.0])
#plt.xlabel("%s (r$^2$ = %.3f)"%(feature_name, r2), fontname=fontname)
plt.xlabel("%s"%(feature_name))
plt.ylabel("Radius difference [px]")
plt.savefig("%s/feature_correlation/best_20_%s.png"%(outdir, feature_name), dpi=300)
plt.close()
return()
def save_scatter_plots(self, model_short, y_train, y_test, y_pred_train, y_pred_test, r2_train, r2_test, outdir, colors, index=None):
"""Make scatter plots of measured vs predicted data."""
plt.figure()
plt.scatter(y_train, y_pred_train, marker="o", c=colors[0], label="Training: r$^2$ = %.3f"%(r2_train))
plt.scatter(y_test, y_pred_test, marker="o", c=colors[1], label="Testing: r$^2$ = %.3f"%(r2_test))
plt.xlabel("Measured normalized radius (%)")
plt.ylabel("Predicted normalized radius (%)")
plt.legend(loc="upper left")
if index != None:
savepath = "%s/scatter_plots/full_data_%s_%s.png"%(outdir, model_short, index)
else:
savepath = "%s/scatter_plots/full_data_%s.png"%(outdir, model_short)
plt.savefig(savepath,dpi=300)
plt.close()
return()
def make_result_df(self, train, test, y_pred_train, y_pred_test, modelname, i, x_train, y_train, x_test, y_test):
"""Returns a dataframe with all the initial data points and the predictions on them.
"""
# Names of additional columns.
test_train_colname = 'test_or_train'
model_colname = 'Model'
rand_instance_colname = 'rand_instance'
# Get data and assert that it is in the correct order.
df = self.df_data
train_index = df.index[train]
test_index = df.index[test]
train_entries = df.index.isin(train_index)
test_entries = df.index.isin(test_index)
assert np.allclose(df[train_entries][self.features].to_numpy(), x_train)
assert np.allclose(df[test_entries][self.features].to_numpy(), x_test)
assert np.allclose(df[train_entries][self.targets].to_numpy(), y_train)
assert np.allclose(df[test_entries][self.targets].to_numpy(), y_test)
# Get true and predicted target names and rename df accordingly.
pred_target_names = []
true_target_names = []
for target in self.targets:
df = df.rename(columns={target: target+'_true'})
true_target_names.append(target+'_true')
pred_target_names.append(target+'_pred')
# Build dataframes of predictions of test and train data.
train_pred = pd.DataFrame(
data=y_pred_train,
index=train_index,
columns=pred_target_names
)
train_pred[test_train_colname] = 'train'
test_pred = pd.DataFrame(
data=y_pred_test,
index=test_index,
columns=pred_target_names
)
test_pred[test_train_colname] = 'test'
# Merge predictions and initial data.
assert df.equals(df.reset_index(drop=True))
predictions = train_pred.append(test_pred)
df = df.join(predictions)
df = movecol(df, cols=pred_target_names, to=true_target_names[-1])
# Add name of used model and random instance.
df[model_colname] = modelname
df[rand_instance_colname] = i
new_cols = [rand_instance_colname, model_colname, test_train_colname]
column_order = new_cols + [col for col in df.columns.tolist() if not col in new_cols]
df = df[column_order]
return(df)
def custom_median(self, array):
"""Calculates the median of an array and make sure to return an element of the array even if it has even length.
"""
sorted_array = np.sort(array)
length = len(array)
if length%2 == 1:
idx = int(length/2 - 0.5)
else:
average = array.mean()
possible_indices = [int(length/2), int(length/2-1)]
if abs(sorted_array[possible_indices[0]] - average) <= abs(sorted_array[possible_indices[1]] - average):
idx = possible_indices[0]
else:
idx = possible_indices[1]
median = sorted_array[idx]
return(median)
def standard_models(self, hparams, n_features, n_targets, n_domains):
"""Definitions of models that I regularly use. Rather used as a library to quickly look up and copy paste models than as a function.
"""
pass
# =============================================================================
# ###################
# # LINEAR REGRESSION
# ###################
# Linear_Regression = sklearn.linear_model.LinearRegression()
#
# ################
# # Neural Network
# ################
# # Set some hyperparameter variables for the NN.
# net_dims = net_pattern(
# hparams['nn_layers'],
# hparams['nn_base_dim'],
# hparams['nn_end_size']
# )
# Neural_Network = MLPRegressor(
# hidden_layer_sizes=net_dims,
# activation=hparams['nn_act'],
# solver="adam",
# max_iter=hparams["n_epochs"],
# early_stopping=False,
# # validation_fraction=0.1,
# alpha=0,#hparams["nn_l2"],
# # momentum=0,
# # learning_rate="invscaling",
# learning_rate="constant",
# batch_size=hparams['nn_batch_size'],
# learning_rate_init=hparams["learning_rate"],
# n_iter_no_change=9999#hparams["nn_patience"]
# )
#
#
# ###############
# # Random Forest
# ###############
# n_trees = hparams["RF_n_estimators"]
# Random_Forest = RandomForestRegressor(n_estimators=n_trees)
#
#
# ############################
# # Gradient Boosting
# ############################
# n_trees = hparams["GB_n_estimators"]
# Gradient_Boosting = GradientBoostingRegressor(n_estimators=n_trees)
#
#
# ############################
# # Gaussian Process
# ############################
# kernel_scale = np.ones(n_features)
# noise = hparams["GP_alpha"]
# Gaussian_Process = GaussianProcessRegressor(kernel=RBF_sk(length_scale=kernel_scale), alpha=noise)
#
#
# ############################
# # Regret Minimization Network with sklearn
# ############################
# input_layer_size = n_features
# net_dims = net_pattern(
# hparams['nn_layers'],
# hparams['nn_base_dim'],
# hparams['nn_end_size']
# )
# module = RGM_sk(input_layer_size, net_dims, n_targets)
# RGM_sk_model = NeuralNetRegressor(
# module,
# optimizer=torch.optim.Adam,
# lr=hparams["learning_rate"],
# max_epochs=hparams["n_epochs"],
# batch_size=hparams['nn_batch_size'],
# train_split=None,
# callbacks='disable'
# )
#
#
# def get_featurizer(input_layer_size, hidden_layer_sizes, activation):
# """Returns the first part of the RGM, the featurizer or representation NN.
# """
# layers = []
# for i in range(len(hidden_layer_sizes)):
# if i == 0:
# in_size = input_layer_size
# else:
# in_size = hidden_layer_sizes[i-1]
# out_size = hidden_layer_sizes[i]
# layers.append(nn.Linear(in_size, out_size))
# if activation == 'relu':
# activation_fn = nn.ReLU()
# else:
# raise ValueError('Activation function not known.')
# layers.append(activation_fn)
# layers = tuple(layers)
# featurizer = nn.Sequential(*layers)
# return(featurizer)
#
# ############################
# # Regret Minimization Network
# ############################
# input_layer_size = n_features
# net_dims = net_pattern(
# hparams['nn_layers'],
# hparams['nn_base_dim'],
# hparams['nn_end_size']
# )
# featurizer = get_featurizer(input_layer_size, net_dims, 'relu')
#
# # Set options for RGM.
# RGM_args = namedtuple('RGM_args', ['linear', 'hidden_size', 'output_size', 'num_domains', 'rgm_e', 'erm_e', 'holdout_e', 'detach_classifier', 'oracle'])
# RGM_args.linear = True
# RGM_args.hidden_size = net_dims[-1]
# RGM_args.output_size = n_targets
# RGM_args.num_domains = n_domains
# RGM_args.rgm_e = 1
# RGM_args.erm_e = 1
# RGM_args.holdout_e = 1
# RGM_args.detach_classifier = True
# RGM_args.oracle = False
#
# RGM = RGM_sklearn(hidden_layer_sizes=net_dims,
# activation='relu',
# solver='adam',
# max_iter=hparams["n_epochs"],
# batch_size=hparams['nn_batch_size'],
# learning_rate_init=hparams["learning_rate"],
# featurizer=featurizer,
# batch_mode='Conserve_ratio',
# RGM_args=RGM_args
# )
#
# all_models = {
# "Linear_Regression": Linear_Regression,
# "Neural_Network": Neural_Network,
# "Random_Forest": Random_Forest,
# "Gradient_Boosting": Gradient_Boosting,
# "Gaussian_Process": Gaussian_Process,
# "RGM_sk": RGM_sk_model,
# "RGM": RGM
# }
# return(all_models)
# =============================================================================
class CustomTransformedTargetRegressor(TransformedTargetRegressor):
def predict(self, X, **predict_params):
"""Predict using the base regressor, applying inverse.
The regressor is used to predict and the ``inverse_func`` or
``inverse_transform`` is applied before returning the prediction.
Parameters
"""
check_is_fitted(self)
pred = self.regressor_.predict(X, **predict_params)
returns_std = 'return_std' in predict_params.keys() and predict_params['return_std'] == True
if returns_std:
pred, std = pred
# Use the specified degree of uncertainty throughout all scripts.
std = std * SIGMA
assert pred.shape == std.shape, 'Maybe you are trying to fit a GP to several targets or so? This doesn\'t work. Anyway, there\'s a shape mismatch.'
if pred.ndim == 1:
pred = pred.reshape(-1, 1)
pred_trans = self.transformer_.inverse_transform(pred)
if returns_std:
if std.ndim == 1:
std = std.reshape(-1, 1)
lower_conf_trans, upper_conf_trans = inverse_transform_std(pred, std, self.transformer_)
# Squeeze if prediction will be squeezed as well.
if (
self._training_dim == 1
and lower_conf_trans.ndim == 2
and lower_conf_trans.shape[1] == 1
):
lower_conf_trans = lower_conf_trans.squeeze(axis=1)
upper_conf_trans = upper_conf_trans.squeeze(axis=1)
if (
self._training_dim == 1
and pred_trans.ndim == 2
and pred_trans.shape[1] == 1
):
pred_trans = pred_trans.squeeze(axis=1)
if returns_std:
assert len(set([pred_trans.shape, std.shape, lower_conf_trans.shape, upper_conf_trans.shape])) == 1, 'Prediction and uncertainty have different shapes.'
return pred_trans, (std, lower_conf_trans, upper_conf_trans)
else:
return pred_trans
# Ideas for refactoring:
# Instead of giving n_repetitions, directly give a list with the CV column so that for repeatibility we can have CV columns for several things in the dataset. Write the columns to metadata of All_Data.
# Make if possible to have different features/ targets per model?
# Make it possible to calculate scores more flexible and calculate only some scores for each target (and maybe model?).
# Train loop over all given models:
# Function: Train all repetitions, save models.
# Next functions: Either take passed on models (in case model can't be saved like tensorflow model) or read in saved models (for later analysis or if memory is not big enough)
# Function: Calculate df All_Data.
# Function: Calculate all train and test scores from All_Data.
# For plotting function also either pass on all models or read in saved models.
# cd into the correct directory at the begginning of Apply_ML_models and then cd into previos directory again.
# In Apply_ML_models add single flag to debug and then set settings accordingly. | 61,052 | 46.697656 | 418 | py |
3DSC | 3DSC-main/superconductors_3D/machine_learning/Algorithms/RGM_Jin_original.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import numpy as np
import random
class GradientReversal(torch.autograd.Function):
beta = 1.
@staticmethod
def forward(self, x):
return x.view_as(x)
@staticmethod
def backward(self, grad_output):
return -GradientReversal.beta * grad_output
class RGM(nn.Module):
def __init__(self, featurizer, linear, hidden_size, output_size, num_domains, rgm_e, erm_e, holdout_e, detach_classifier, oracle, loss_forward, num_train_domains):
# Custom: Replaced args.(...) by the regarding variable.
super(RGM, self).__init__()
self.featurizer = featurizer
if linear:
self.classifier = nn.Linear(hidden_size, output_size)
self.copy_f_k = nn.ModuleList(
[nn.Linear(hidden_size, output_size).requires_grad_(False) for _ in range(num_domains)]
)
self.f_k = nn.ModuleList(
[nn.Linear(hidden_size, output_size) for _ in range(num_domains)]
)
self.g_k = nn.ModuleList(
[nn.Linear(hidden_size, output_size) for _ in range(num_domains)]
)
else:
self.classifier = nn.Sequential(
nn.Linear(hidden_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, output_size),
)
self.copy_f_k = nn.ModuleList([
nn.Sequential(
nn.Linear(hidden_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, output_size),
).requires_grad_(False) for _ in range(num_domains)
])
self.f_k = nn.ModuleList([
nn.Sequential(
nn.Linear(hidden_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, output_size),
) for _ in range(num_domains)
])
self.g_k = nn.ModuleList([
nn.Sequential(
nn.Linear(hidden_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, output_size),
) for _ in range(num_domains)
])
self.network = nn.Sequential(self.featurizer, self.classifier)
self.loss_forward = loss_forward
self.rgm_e = rgm_e
self.num_domains = num_domains
self.register_buffer('update_count', torch.tensor([0]))
self.loss_curve_iter = {} # Custom
self.train_representation = True # Custom
self.train_classifier = True # Custom
# def loss_forward(self, preds, batch_y, mask): # Custom
# pred_loss = self.loss_func(preds, batch_y) * mask
# return pred_loss.sum() / mask.sum()
def forward(self, batches):
assert len(batches) == 2
for k in range(self.num_domains):
self.copy_f_k[k].load_state_dict(self.f_k[k].state_dict())
erm_loss = 0
all_phis = []
for batch_x, batch_y, batch_d, mask in batches:
phi_x = self.featurizer(batch_x)
all_phis.append(phi_x)
preds = self.classifier(phi_x)
erm_loss = erm_loss + self.loss_forward(preds, batch_y, mask)
regret = 0
for k in range(self.num_domains):
_, batch_y, _, mask = batches[k]
phi_k = all_phis[k]
preds = self.copy_f_k[k](phi_k)
oracle_preds = self.g_k[k](GradientReversal.apply(phi_k))
regret = regret + self.loss_forward(preds, batch_y, mask) + self.loss_forward(oracle_preds, batch_y, mask)
holdout_loss = 0
for k in range(self.num_domains):
_, batch_y, _, mask = batches[1 - k] # hardcode: 2 domains
phi_x = all_phis[1 - k].detach() # phi does not help f_{-e}
preds = self.f_k[k](phi_x)
holdout_loss = holdout_loss + self.loss_forward(preds, batch_y, mask)
loss = erm_loss + holdout_loss + self.rgm_e * regret
return loss / self.num_domains
| 4,194 | 37.842593 | 167 | py |
3DSC | 3DSC-main/superconductors_3D/machine_learning/Algorithms/RGM_Jin_210519_with_train_seperately_and_all_train_separately.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.jit import TracerWarning
import math
import numpy as np
import random
from copy import deepcopy
import warnings
class GradientReversal(torch.autograd.Function):
beta = 1.
@staticmethod
def forward(self, x):
return x.view_as(x)
@staticmethod
def backward(self, grad_output):
return -GradientReversal.beta * grad_output
class RGM(nn.Module):
def __init__(self, featurizer, classifier, num_domains, rgm_e, erm_e, holdout_e, detach_classifier, oracle, loss_forward, num_train_domains):
super(RGM, self).__init__()
# Deepcopy to not influence the original featurizer and classifier.
self.featurizer = deepcopy(featurizer)
self.classifier = deepcopy(classifier)
# extrapolator f_{-e}
self.f_k = nn.ModuleList(deepcopy(classifier) for _ in range(num_domains))
# oracle f_{e}
self.g_k = nn.ModuleList(deepcopy(classifier) for _ in range(num_domains))
self.copy_f_k = nn.ModuleList(deepcopy(classifier) for _ in range(num_domains))
for copy_f_k in self.copy_f_k:
copy_f_k.requires_grad_(False)
self.copy_g_k = nn.ModuleList(deepcopy(classifier) for _ in range(num_domains))
for copy_g_k in self.copy_g_k:
copy_g_k.requires_grad_(False)
self.copy_classifier = deepcopy(self.classifier)
self.copy_classifier.requires_grad_(False)
self.network = nn.Sequential(self.featurizer, self.classifier)
self.init_network = deepcopy(self.network)
self.ensemble = nn.ModuleList([nn.Sequential(self.featurizer, self.f_k[k]) for k in range(num_domains)])
self.rgm_e = rgm_e
self.erm_e = erm_e
self.holdout_e = holdout_e
self.detach_classifier = detach_classifier
self.oracle = oracle
self.num_domains = num_domains
if num_train_domains == None:
self.num_train_domains = num_domains - 1
else:
self.num_train_domains = num_train_domains
if self.num_train_domains > num_domains:
raise ValueError('num_train_domains must be <= num_domains.')
self.train_representation = True
self.train_classifier = True
self.train_only_classifiers = True
self.train_only_representation = True
self.loss_forward = loss_forward
self.register_buffer('update_count', torch.tensor([0]))
self.log_losses = True
self.loss_curve_iter = {}
self.loss_curve_iter[f'extrapol'] = []
self.loss_curve_iter[f'oracle'] = []
self.loss_curve_iter['erm'] = []
self.loss_curve_iter['holdout'] = []
self.loss_curve_iter['regret'] = []
def get_train_and_extrapolation_domains(self):
"""Returns all train and extrapolation domain indices (second dimension) for all auxiliary classifiers (first dimension).
"""
if self.num_train_domains == self.num_domains - 1:
all_train_domains = [[i for i in range(self.num_domains) if i != k] for k in range(self.num_domains)]
all_extra_domains = [[k] for k in range(self.num_domains)]
elif self.num_train_domains == 1:
all_train_domains = [[k] for k in range(self.num_domains)]
all_extra_domains = [[i for i in range(self.num_domains) if i != k] for k in range(self.num_domains)]
else:
raise Warning('num_train_domains other than 1 or num_domains not implemented yet.')
return(all_train_domains, all_extra_domains)
def forward(self, batches):
warnings.filterwarnings("ignore", category=TracerWarning)
# Check that each batch in batches is from one domain.
for _, _, batch_d, _ in batches:
assert len(torch.unique(batch_d)) == 1
assert len(np.unique([torch.unique(batch_d).item() for _, _, batch_d, _ in batches])) == self.num_domains
for k in range(self.num_domains):
self.copy_f_k[k].load_state_dict(self.f_k[k].state_dict())
self.copy_g_k[k].load_state_dict(self.g_k[k].state_dict())
self.copy_classifier.load_state_dict(self.classifier.state_dict())
# Evaluate representation
all_phis = []
for batch_x, _, _, _ in batches:
phi_x = self.featurizer(batch_x)
phi_x = phi_x if self.train_only_representation else phi_x.detach()
all_phis.append(phi_x)
if self.train_only_representation:
assert phi_x.grad_fn != None
else:
assert phi_x.grad_fn == None
# Compute L(f◦phi)
erm_loss = torch.tensor(0)
if self.train_classifier:
for k, (_, batch_y, batch_d, mask) in enumerate(batches):
phi_x = all_phis[k]
if self.detach_classifier:
phi_x = phi_x.detach()
if self.train_only_representation and not self.detach_classifier:
assert phi_x.grad_fn != None
else:
assert phi_x.grad_fn == None
if self.train_only_classifiers:
preds = self.classifier(phi_x)
assert preds.grad_fn != None
else:
preds = self.copy_classifier(phi_x)
if self.train_only_representation and not self.detach_classifier:
assert preds.grad_fn != None
else:
assert preds.grad_fn == None
norm = self.num_domains
erm_loss = erm_loss + self.loss_forward(preds, batch_y, mask) / norm
# Set train and extrapolation domain indices for each of the auxiliary classifiers.
all_train_domains, all_extra_domains = self.get_train_and_extrapolation_domains()
# Compute regret R^e(phi).
extra_loss = torch.tensor(0)
oracle_loss = torch.tensor(0)
if self.num_domains > 1 and self.train_representation:
for k, extra_domains in enumerate(all_extra_domains): # Loop classifiers
for j in extra_domains: # Loop domains
_, batch_y, batch_d, mask = batches[j]
phi_k = all_phis[j]
preds = self.copy_f_k[k](phi_k) # f_{-e}◦phi (extrapolator)
if self.train_only_representation:
assert preds.grad_fn != None
else:
assert preds.grad_fn == None
norm = len(extra_domains)
extra_loss = extra_loss + self.loss_forward(preds, batch_y, mask) / norm
# The minus in the loss function in the paper 2020 Jin here is introduced by the GradientReversal Layer.
if self.oracle:
if self.train_only_classifiers:
oracle_preds = self.g_k[k](GradientReversal.apply(phi_k)) # f_{e}◦phi
assert oracle_preds.grad_fn != None
else:
oracle_preds = self.copy_g_k[k](GradientReversal.apply(phi_k)) # f_{e}◦phi
if self.train_only_representation:
assert oracle_preds.grad_fn != None
else:
assert oracle_preds.grad_fn == None
oracle_loss = \
oracle_loss + self.loss_forward(oracle_preds, batch_y, mask) / norm
if self.train_only_representation:
assert extra_loss.grad_fn != None
else:
assert extra_loss.grad_fn == None
if self.oracle:
assert oracle_loss.grad_fn != None
else:
assert oracle_loss.grad_fn == None
regret = extra_loss + oracle_loss
# Compute L^{-e}(f_{-e}◦phi). Only for training f_{-e}, gradient is not backpropagated on phi (detached).
holdout_loss = torch.tensor(0)
if self.num_domains > 1 and self.train_representation and self.train_only_classifiers:
for k, train_domains in enumerate(all_train_domains): # Loop classifiers
# Train the kth classifier on all train_domains.
for j in train_domains: # Loop domains
_, batch_y, batch_d, mask = batches[j]
phi_x = all_phis[j].detach() # phi does not help f_{-e}
preds = self.f_k[k](phi_x)
assert preds.grad_fn != None
norm = len(train_domains)
holdout_loss = holdout_loss + self.loss_forward(preds, batch_y, mask) / norm
assert holdout_loss.grad_fn != None
loss = (self.erm_e * erm_loss + self.rgm_e * (self.holdout_e * holdout_loss + regret)) / self.num_domains
self.regret = regret.item()
if self.train_representation and self.log_losses:
self.loss_curve_iter[f'extrapol'].append(extra_loss.item())
self.loss_curve_iter[f'oracle'].append(oracle_loss.item())
self.loss_curve_iter['regret'].append(regret.item())
self.loss_curve_iter['holdout'].append(holdout_loss.item())
if self.train_classifier and self.log_losses:
self.loss_curve_iter['erm'].append(erm_loss.item())
return loss
| 9,738 | 44.723005 | 145 | py |
3DSC | 3DSC-main/superconductors_3D/machine_learning/Algorithms/RGM_Jin.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.jit import TracerWarning
import math
import numpy as np
import random
from copy import deepcopy
import warnings
from itertools import combinations
class GradientReversal(torch.autograd.Function):
beta = 1.
@staticmethod
def forward(self, x):
return x.view_as(x)
@staticmethod
def backward(self, grad_output):
return -GradientReversal.beta * grad_output
class RGM(nn.Module):
def __init__(self, featurizer, classifier, num_domains, rgm_e, erm_e, holdout_e, detach_classifier, oracle, loss_forward, num_train_domains, max_n_classifiers):
super(RGM, self).__init__()
self.num_domains = num_domains
if num_train_domains < 0:
num_train_domains = self.num_domains + num_train_domains
if self.num_domains != 1 and (num_train_domains > self.num_domains - 1 or num_train_domains < 0):
raise ValueError(f'Invalid value for num_train_domains: {num_train_domains}')
# Set train and extrapolation domain indices for each of the auxiliary classifiers.
self.all_train_domains, self.all_extra_domains = self.get_train_and_extrapolation_domains(num_train_domains, max_n_classifiers)
self.n_classifiers = len(self.all_train_domains)
# Deepcopy to not influence the original featurizer and classifier.
self.featurizer = deepcopy(featurizer)
self.classifier = deepcopy(classifier)
# extrapolator f_{-e}
self.f_k = nn.ModuleList(deepcopy(classifier) for _ in range(self.n_classifiers))
# oracle f_{e}
self.g_k = nn.ModuleList(deepcopy(classifier) for _ in range(self.n_classifiers))
self.copy_f_k = nn.ModuleList(deepcopy(classifier) for _ in range(self.n_classifiers))
for copy_f_k in self.copy_f_k:
copy_f_k.requires_grad_(False)
self.network = nn.Sequential(self.featurizer, self.classifier)
self.ensemble = nn.ModuleList([nn.Sequential(self.featurizer, self.f_k[k]) for k in range(self.n_classifiers)])
self.rgm_e = rgm_e
self.erm_e = erm_e
self.holdout_e = holdout_e
self.detach_classifier = detach_classifier
self.oracle = oracle
self.loss_forward = loss_forward
self.register_buffer('update_count', torch.tensor([0]))
self.log_losses = True
self.loss_curve_iter = {}
self.loss_curve_iter['extrapol'] = []
self.loss_curve_iter['oracle'] = []
self.loss_curve_iter['erm'] = []
self.loss_curve_iter['holdout'] = []
self.loss_curve_iter['regret'] = []
self.loss_curve_iter['eff_regret'] = []
self.loss_curve_iter['total'] = []
self.loss_curve_iter['eff_loss'] = []
self.loss_curve_iter['rep_loss'] = []
def get_train_and_extrapolation_domains(self, num_train_domains, max_n_classifiers):
"""Returns all train and extrapolation domain indices (second dimension) for all auxiliary classifiers (first dimension).
"""
if self.num_domains > 1:
domain_idc = range(self.num_domains)
all_train_domains = tuple(combinations(domain_idc, num_train_domains))
n_train_domains = len(all_train_domains)
if n_train_domains > max_n_classifiers:
all_train_d_idc = range(0, n_train_domains)
choose_random_idc = np.random.choice(all_train_d_idc, size=max_n_classifiers, replace=False)
all_train_domains = tuple([domains for i, domains in enumerate(all_train_domains) if i in choose_random_idc])
# Extrapolation domains are all domains that are not train domains.
all_extra_domains = []
for train_domains in all_train_domains:
extra_domains = []
for i in range(self.num_domains):
if not i in train_domains:
extra_domains.append(i)
all_extra_domains.append(tuple(extra_domains))
all_extra_domains = tuple(all_extra_domains)
else:
all_train_domains = ()
all_extra_domains = ()
return(all_train_domains, all_extra_domains)
def forward(self, batches):
warnings.filterwarnings("ignore", category=TracerWarning)
# Check that each batch in batches is from one domain.
for _, _, batch_d, _ in batches:
assert len(torch.unique(batch_d)) == 1
assert len(np.unique([torch.unique(batch_d).item() for _, _, batch_d, _ in batches])) == self.num_domains
assert len(batches) == self.num_domains
# Get copy of f_k without gradient backprop that emulates frozen weights of f_k.
for k in range(self.n_classifiers):
self.copy_f_k[k].load_state_dict(self.f_k[k].state_dict())
# Evaluate representation
all_phis = []
for batch_x, _, _, _ in batches:
phi_x = self.featurizer(batch_x)
all_phis.append(phi_x)
# Compute L(f◦phi)
erm_loss = torch.tensor(0)
for k, (_, batch_y, batch_d, mask) in enumerate(batches):
phi_x = all_phis[k]
if self.detach_classifier:
phi_x = phi_x.detach()
preds = self.classifier(phi_x)
erm_loss = erm_loss + self.loss_forward(preds, batch_y, mask)
norm = self.num_domains
erm_loss = erm_loss / norm
# Compute regret R^e(phi).
extra_loss = torch.tensor(0)
oracle_loss = torch.tensor(0)
for k, extra_domains in enumerate(self.all_extra_domains): # Loop classifiers
for j in extra_domains: # Loop domains
_, batch_y, batch_d, mask = batches[j]
phi_k = all_phis[j]
preds = self.copy_f_k[k](phi_k) # f_{-e}◦phi (extrapolator)
norm = len(extra_domains) * len(self.all_extra_domains)
extra_loss = extra_loss + self.loss_forward(preds, batch_y, mask) / norm
# The minus in the loss function in the paper 2020 Jin is introduced by the GradientReversal Layer.
if self.oracle:
oracle_preds = self.g_k[k](GradientReversal.apply(phi_k)) # f_{e}◦phi
oracle_loss = \
oracle_loss + self.loss_forward(oracle_preds, batch_y, mask) / norm
regret = extra_loss + oracle_loss
eff_regret = extra_loss.item() - oracle_loss.item()
# Compute L^{-e}(f_{-e}◦phi). Only for training f_{-e}, gradient is not backpropagated on phi (detached).
holdout_loss = torch.tensor(0)
for k, train_domains in enumerate(self.all_train_domains): # Loop classifiers
# Train the kth classifier on all train_domains.
for j in train_domains: # Loop domains
_, batch_y, batch_d, mask = batches[j]
phi_x = all_phis[j].detach() # phi does not help f_{-e}
preds = self.f_k[k](phi_x)
norm = len(train_domains) * len(self.all_train_domains)
holdout_loss = holdout_loss + self.loss_forward(preds, batch_y, mask) / norm
erm_loss = self.erm_e * erm_loss
holdout_loss = self.rgm_e * self.holdout_e * holdout_loss
regret = self.rgm_e * regret
eff_regret = self.rgm_e * eff_regret
oracle_loss = self.rgm_e * oracle_loss
extra_loss = self.rgm_e * extra_loss
eff_loss = (erm_loss + holdout_loss + extra_loss - oracle_loss).item()
loss = erm_loss + holdout_loss + regret
if self.log_losses:
if self.detach_classifier:
rep_loss = eff_regret
else:
rep_loss = erm_loss.item() + eff_regret
self.loss_curve_iter['extrapol'].append(extra_loss.item())
self.loss_curve_iter['oracle'].append(oracle_loss.item())
self.loss_curve_iter['regret'].append(regret.item())
self.loss_curve_iter['eff_regret'].append(eff_regret)
self.loss_curve_iter['holdout'].append(holdout_loss.item())
self.loss_curve_iter['erm'].append(erm_loss.item())
self.loss_curve_iter['total'].append(loss.item())
self.loss_curve_iter['rep_loss'].append(rep_loss)
self.loss_curve_iter['eff_loss'].append(eff_loss)
return loss
| 8,734 | 44.494792 | 164 | py |
3DSC | 3DSC-main/superconductors_3D/machine_learning/Algorithms/RGM_Jin_210424_only_one_class.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import numpy as np
import random
class GradientReversal(torch.autograd.Function):
beta = 1.
@staticmethod
def forward(self, x):
return x.view_as(x)
@staticmethod
def backward(self, grad_output):
return -GradientReversal.beta * grad_output
class RGM(nn.Module):
def __init__(self, featurizer, loss_func, args):
super(RGM, self).__init__()
self.featurizer = featurizer
if args.linear:
# f,trained on all environments
self.classifier = nn.Linear(args.hidden_size, args.output_size)
# copy of f_{-e}
self.copy_f_k = nn.ModuleList(
[nn.Linear(args.hidden_size, args.output_size).requires_grad_(False) for _ in range(args.num_domains)]
)
# extrapolator f_{-e}
self.f_k = nn.ModuleList(
[nn.Linear(args.hidden_size, args.output_size) for _ in range(args.num_domains)]
)
# oracle predictor f_{e}
self.g_k = nn.ModuleList(
[nn.Linear(args.hidden_size, args.output_size) for _ in range(args.num_domains)]
)
else:
# f,trained on all environments
self.classifier = nn.Sequential(
nn.Linear(args.hidden_size, args.hidden_size),
nn.ReLU(),
nn.Linear(args.hidden_size, args.output_size),
)
# copy of f_{-e}
self.copy_f_k = nn.ModuleList([
nn.Sequential(
nn.Linear(args.hidden_size, args.hidden_size),
nn.ReLU(),
nn.Linear(args.hidden_size, args.output_size),
).requires_grad_(False) for _ in range(args.num_domains)
])
# extrapolator f_{-e}
self.f_k = nn.ModuleList([
nn.Sequential(
nn.Linear(args.hidden_size, args.hidden_size),
nn.ReLU(),
nn.Linear(args.hidden_size, args.output_size),
) for _ in range(args.num_domains)
])
# oracle predictor f_{e}
self.g_k = nn.ModuleList([
nn.Sequential(
nn.Linear(args.hidden_size, args.hidden_size),
nn.ReLU(),
nn.Linear(args.hidden_size, args.output_size),
) for _ in range(args.num_domains)
])
self.network = nn.Sequential(self.featurizer, self.classifier)
self.loss_func = loss_func
self.rgm_e = args.rgm_e
self.num_domains = args.num_domains
self.register_buffer('update_count', torch.tensor([0]))
def loss_forward(self, preds, batch_y, mask):
pred_loss = self.loss_func(preds, batch_y) * mask
return pred_loss.sum() / mask.sum()
def forward(self, batches):
# assert len(batches) == 2 # TODO
for k in range(self.num_domains):
self.copy_f_k[k].load_state_dict(self.f_k[k].state_dict())
# Compute L(f◦phi)
erm_loss = 0
all_phis = []
for batch_x, batch_y, mask in batches:
phi_x = self.featurizer(batch_x)
all_phis.append(phi_x)
preds = self.classifier(phi_x)
erm_loss = erm_loss + self.loss_forward(preds, batch_y, mask)
# Compute regret R^e(phi).
regret = 0
# =============================================================================
# for k in range(self.num_domains):
# _, batch_y, mask = batches[k]
# phi_k = all_phis[k]
# preds = self.copy_f_k[k](phi_k) # f_{-e}◦phi
# oracle_preds = self.g_k[k](GradientReversal.apply(phi_k)) # f_{e}◦phi
# # The minus in the loss function in the paper 2020 Jin here is introduced by the GradientReversal Layer.
# regret = regret + self.loss_forward(preds, batch_y, mask) + self.loss_forward(oracle_preds, batch_y, mask)
# =============================================================================
# TODO
# Compute L^{-e}(f_{-e}◦phi). Only for training f_{-e}, gradient is not backpropagated on phi (detached).
holdout_loss = 0
# =============================================================================
# for k in range(self.num_domains):
# _, batch_y, mask = batches[1 - k] # hardcode: 2 domains
# phi_x = all_phis[1 - k].detach() # phi does not help f_{-e}
# preds = self.f_k[k](phi_x)
# holdout_loss = holdout_loss + self.loss_forward(preds, batch_y, mask)
# =============================================================================
loss = erm_loss + holdout_loss + self.rgm_e * regret
return loss / self.num_domains
| 5,006 | 40.040984 | 122 | py |
3DSC | 3DSC-main/superconductors_3D/machine_learning/Algorithms/RGM_Jin_210515_old_train_seperetely.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.jit import TracerWarning
import math
import numpy as np
import random
from copy import deepcopy
import warnings
class GradientReversal(torch.autograd.Function):
beta = 1.
@staticmethod
def forward(self, x):
return x.view_as(x)
@staticmethod
def backward(self, grad_output):
return -GradientReversal.beta * grad_output
class RGM(nn.Module):
def __init__(self, featurizer, classifier, num_domains, rgm_e, erm_e, holdout_e, detach_classifier, oracle, loss_forward, num_train_domains):
super(RGM, self).__init__()
# Deepcopy to not influence the original featurizer and classifier.
self.featurizer = deepcopy(featurizer)
self.classifier = deepcopy(classifier)
self.copy_f_k = nn.ModuleList(deepcopy(classifier) for _ in range(num_domains))
for copy_f_k in self.copy_f_k:
copy_f_k.requires_grad_(False)
# extrapolator f_{-e}
self.f_k = nn.ModuleList(deepcopy(classifier) for _ in range(num_domains))
# oracle f_{e}
self.g_k = nn.ModuleList(deepcopy(classifier) for _ in range(num_domains))
self.network = nn.Sequential(self.featurizer, self.classifier)
self.init_network = deepcopy(self.network)
self.ensemble = nn.ModuleList([nn.Sequential(self.featurizer, self.f_k[k]) for k in range(num_domains)])
self.rgm_e = rgm_e
self.erm_e = erm_e
self.holdout_e = holdout_e
self.detach_classifier = detach_classifier
self.oracle = oracle
self.num_domains = num_domains
if num_train_domains == None:
self.num_train_domains = num_domains - 1
else:
self.num_train_domains = num_train_domains
if self.num_train_domains > num_domains:
raise ValueError('num_train_domains must be <= num_domains.')
self.train_representation = True
self.train_classifier = True
self.loss_forward = loss_forward
self.register_buffer('update_count', torch.tensor([0]))
self.log_losses = True
self.loss_curve_iter = {}
self.loss_curve_iter[f'extrapol'] = []
self.loss_curve_iter[f'oracle'] = []
self.loss_curve_iter['erm'] = []
self.loss_curve_iter['holdout'] = []
self.loss_curve_iter['regret'] = []
def get_train_and_extrapolation_domains(self):
"""Returns all train and extrapolation domain indices (second dimension) for all auxiliary classifiers (first dimension).
"""
if self.num_train_domains == self.num_domains - 1:
all_train_domains = [[i for i in range(self.num_domains) if i != k] for k in range(self.num_domains)]
all_extra_domains = [[k] for k in range(self.num_domains)]
elif self.num_train_domains == 1:
all_train_domains = [[k] for k in range(self.num_domains)]
all_extra_domains = [[i for i in range(self.num_domains) if i != k] for k in range(self.num_domains)]
else:
raise Warning('num_train_domains other than 1 or num_domains not implemented yet.')
return(all_train_domains, all_extra_domains)
def forward(self, batches):
warnings.filterwarnings("ignore", category=TracerWarning)
# Check that each batch in batches is from one domain.
for _, _, batch_d, _ in batches:
assert len(torch.unique(batch_d)) == 1
assert len(np.unique([torch.unique(batch_d).item() for _, _, batch_d, _ in batches])) == self.num_domains
for k in range(self.num_domains):
self.copy_f_k[k].load_state_dict(self.f_k[k].state_dict())
# Evaluate representation
all_phis = []
for batch_x, _, _, _ in batches:
phi_x = self.featurizer(batch_x)
all_phis.append(phi_x)
# Compute L(f◦phi)
erm_loss = torch.tensor(0)
if self.train_classifier:
for k, (_, batch_y, batch_d, mask) in enumerate(batches):
phi_x = all_phis[k]
if self.detach_classifier:
phi_x = phi_x.detach()
preds = self.classifier(phi_x)
erm_loss = erm_loss + self.loss_forward(preds, batch_y, mask)
# Set train and extrapolation domain indices for each of the auxiliary classifiers.
all_train_domains, all_extra_domains = self.get_train_and_extrapolation_domains()
# Compute regret R^e(phi).
extra_loss = torch.tensor(0)
oracle_loss = torch.tensor(0)
if self.num_domains > 1 and self.train_representation:
for k, extra_domains in enumerate(all_extra_domains): # Loop classifiers
for j in extra_domains: # Loop domains
_, batch_y, batch_d, mask = batches[j]
phi_k = all_phis[j]
preds = self.copy_f_k[k](phi_k) # f_{-e}◦phi (extrapolator)
extra_loss = extra_loss + self.loss_forward(preds, batch_y, mask)
# The minus in the loss function in the paper 2020 Jin here is introduced by the GradientReversal Layer.
if self.oracle:
oracle_preds = self.g_k[k](GradientReversal.apply(phi_k)) # f_{e}◦phi
oracle_loss = \
oracle_loss + self.loss_forward(oracle_preds, batch_y, mask)
regret = extra_loss + oracle_loss
# Compute L^{-e}(f_{-e}◦phi). Only for training f_{-e}, gradient is not backpropagated on phi (detached).
holdout_loss = torch.tensor(0)
if self.num_domains > 1 and self.train_representation:
for k, train_domains in enumerate(all_train_domains): # Loop classifiers
# Train the kth classifier on all train_domains.
for j in train_domains: # Loop domains
_, batch_y, batch_d, mask = batches[j]
phi_x = all_phis[j].detach() # phi does not help f_{-e}
preds = self.f_k[k](phi_x)
norm = len(train_domains)
holdout_loss = holdout_loss + self.loss_forward(preds, batch_y, mask) / norm
loss = (self.erm_e * erm_loss + self.rgm_e * (self.holdout_e * holdout_loss + regret)) / self.num_domains
if self.train_representation and self.log_losses:
self.loss_curve_iter[f'extrapol'].append(extra_loss.item())
self.loss_curve_iter[f'oracle'].append(oracle_loss.item())
self.loss_curve_iter['regret'].append(regret.item())
self.loss_curve_iter['holdout'].append(holdout_loss.item())
if self.train_classifier and self.log_losses:
self.loss_curve_iter['erm'].append(erm_loss.item())
return loss
| 7,049 | 43.620253 | 145 | py |
3DSC | 3DSC-main/superconductors_3D/machine_learning/own_libraries/models/GPflow_GP.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 3 21:13:25 2021
@author: Timo Sommer
This script contains an implementation of a Gaussian Process that works with the GPflow models.
"""
import torch
from sklearn.base import RegressorMixin, BaseEstimator
import tensorflow as tf
import tensorflow_probability as tfp
import numpy as np
import gpflow
import pickle
from copy import deepcopy
from sklearn.preprocessing import StandardScaler
import os
import warnings
from gpflow.utilities import print_summary, set_trainable
from gpflow.optimizers import NaturalGradient
class GPflow_GP(RegressorMixin, BaseEstimator):
def __init__(self, model, kernel, alpha, mean_function=None, batch_size=100, epochs=1000, learning_rate=0.1, n_inducing_points=100, NN_path=None, standard_scale=False, natgrad=False, diff_std_for_sc_and_non_sc=False, train_noise=True, train_noise_scale=False, predict_y=False):
"""alpha: Estimated std of data. float for constant noise, array of floats for these noises as absolute std. Pass function to compute noise std from input y.
`natgrad`: If the variational parameters should be trained using Natural Gradients instead of Adam.
"""
self.model = model
self.kernel = kernel
self.alpha = alpha
self.mean_function = mean_function
self.batch_size = batch_size
self.epochs = epochs
self.learning_rate = learning_rate
self.n_inducing_points = n_inducing_points
self.NN_path = NN_path
self.standard_scale = standard_scale
self.natgrad = natgrad
self.diff_std_for_sc_and_non_sc = diff_std_for_sc_and_non_sc
self.train_noise = train_noise
self.train_noise_scale = train_noise_scale
self.predict_y = predict_y
def get_array_noise_var(self, alpha, y):
try:
# If alpha is function.
noise_std = alpha(y)
print('Using specified function for noise.')
except TypeError:
try:
# If alpha is float.
alpha = float(alpha)
noise_std = np.full(y.shape, alpha)
print('Using single float for noise, but in likelihood.')
except TypeError:
# If alpha is array.
noise_std = np.asarray(alpha)
print('Using specified array for alpha')
noise_std = noise_std.reshape(y.shape)
noise_var = noise_std**2
return noise_var
def get_noise_var(self, y):
"""Implement variance per data point if model allows it, otherwise set the same variance per data point.
"""
if self.model in self.homogeneous_noise_models:
# GPflow model only takes a single float for noise as `noise_var`.
try:
noise_variance = {'noise_variance': float(self.alpha)**2}
print('Using single float as noise.')
except TypeError:
raise ValueError('Parameter `alpha` must be float for this model.')
else:
if y.shape[1] == 1:
if self.diff_std_for_sc_and_non_sc:
# Allow different variances for all data points with Tc=0 and all other data points.
print('Using different noise for Tc=0 and Tc>0.')
likelihood = gpflow.likelihoods.SwitchedLikelihood(
[gpflow.likelihoods.Gaussian(variance=self.alpha**2),
gpflow.likelihoods.Gaussian(variance=self.alpha**2)]
)
non_sc_tc = min(y)
groups = np.where(y > non_sc_tc, 0, 1)
y = np.hstack([y, groups])
elif self.train_noise_scale:
# Train a separarate GP to learn the scale of the noise.
likelihood = gpflow.likelihoods.HeteroskedasticTFPConditional(
distribution_class=tfp.distributions.Normal,
scale_transform=tfp.bijectors.Softplus()
)
self.kernel = gpflow.kernels.SeparateIndependent([
self.kernel, # This is k1, the kernel of f1
self.kernel, # this is k2, the kernel of f2
])
else:
try:
# If alpha is float.
print('Using single float as noise.')
likelihood = gpflow.likelihoods.Gaussian(variance=float(self.alpha)**2)
except TypeError:
# If alpha is iterable or function.
print('Alpha is iterable or function.')
noise_var = self.get_array_noise_var(self.alpha, y)
likelihood = HeteroskedasticGaussian()
y = np.hstack([y, noise_var])
noise_variance = {'likelihood': likelihood}
else:
raise NotImplementedError('Expression above not yet working with multiple y (see num_latent_GPs of HeteroskedasticGaussian()).')
return noise_variance, y
def get_inducing_variable(self, X):
"""Choose `M` inducing points for sparse model from data `X`.
"""
if self.model in self.sparse_models:
assert not (self.n_inducing_points is None)
M = self.n_inducing_points
Z = X[:M, :].copy()
if self.train_noise_scale:
Z = gpflow.inducing_variables.SeparateIndependentInducingVariables(
[
gpflow.inducing_variables.InducingPoints(Z),
gpflow.inducing_variables.InducingPoints(Z),
]
)
inducing_variable = {'inducing_variable': Z}
else:
inducing_variable = {}
return inducing_variable
def run_adam(self, X, y):
"""Utility function running the Adam optimizer.
"""
assert not (self.batch_size is None) and not (self.epochs is None)
# Commented out because seems wrong though it was in tutorial.
# try:
# gpflow.set_trainable(self.gp.inducing_variable, False)
# except AttributeError:
# pass
# Train the variational parameters using natural gradients.
if self.natgrad:
set_trainable(self.gp.q_mu, False)
set_trainable(self.gp.q_sqrt, False)
natgrad = NaturalGradient(gamma=1)
variational_params = [(self.gp.q_mu, self.gp.q_sqrt)]
train_dataset = tf.data.Dataset.from_tensor_slices((X, y)).repeat().shuffle(len(X))
train_iter = iter(train_dataset.batch(self.batch_size))
# Create an Adam Optimizer action
training_loss = self.gp.training_loss_closure(train_iter, compile=True)
adam = tf.optimizers.Adam(self.learning_rate)
@tf.function
def optimization_step():
adam.minimize(training_loss, self.gp.trainable_variables)
if self.natgrad:
natgrad.minimize(training_loss, var_list=variational_params)
self.loss_curve_ = []
for step in range(self.epochs):
optimization_step()
self.loss_curve_.append(training_loss().numpy())
return
def transform_X_with_NN(self, X):
"""Load pytorch NN and use it to transform X.
"""
if not hasattr(self, 'NN_featurizer'):
if os.path.exists(self.NN_path):
with open(self.NN_path, 'rb') as f:
NN = pickle.load(f)
self.NN_featurizer = NN.regressor['model'].featurizer
else:
warnings.warn('Model {self.NN_path} not found. Continue training on original input features.')
return(X)
print(f'Training GP on output of {self.NN_path}.')
else:
print(f'Evaluating GP on output of {self.NN_path}.')
X = deepcopy(X)
X = torch.tensor(X, dtype=torch.float)
X = self.NN_featurizer(X)
X = X.cpu().detach().numpy()
X = np.float64(X) # As it was before
return X
def inverse_transform_std(self, mu, std, scaler):
"""Makes the inverse transform of the std by transforming upper and lower bound. Returns upper and lower bound after the inverse transform.
"""
lower_conf = mu - std
upper_conf = mu + std
lower_conf_trans = scaler.inverse_transform(lower_conf)
upper_conf_trans = scaler.inverse_transform(upper_conf)
mu_trans = scaler.inverse_transform(mu)
assert np.allclose(np.mean([lower_conf_trans, upper_conf_trans], axis=0), mu_trans), 'Std bounds not symmetrical.'
std_trans = (upper_conf_trans - lower_conf_trans) / 2
return std_trans
def fit(self, X, y):
self.homogeneous_noise_models = [gpflow.models.GPR, gpflow.models.SGPR]
self.sparse_models = [gpflow.models.SGPR, gpflow.models.SVGP]
self.variational_models = [gpflow.models.VGP, gpflow.models.SVGP]
# GPflow needs 2D input for y, otherwise bugs happen.
X = deepcopy(np.asarray(X))
y = deepcopy(np.asarray(y))
if y.ndim == 1:
y = y.reshape(-1, 1)
n_targets = y.shape[1]
X, y = self._validate_data(X, y, multi_output=True, y_numeric=True)
if self.NN_path != None:
X = self.transform_X_with_NN(X)
self.fitted_on_NN = True
else:
self.fitted_on_NN = False
# Standard scale X and y to have mean 0 and std 1.
if self.standard_scale:
self.X_scaler, self.y_scaler = StandardScaler(), StandardScaler()
X = self.X_scaler.fit_transform(X)
y = self.y_scaler.fit_transform(y)
noise_variance, y = self.get_noise_var(y)
if self.model != gpflow.models.SVGP:
data = {'data': (X, y)}
else:
data = {'num_data': len(X)}
inducing_variable = self.get_inducing_variable(X)
if self.train_noise_scale:
num_latent_GPs = noise_variance['likelihood'].latent_dim
else:
num_latent_GPs = n_targets
print(f'Number of latent GPs: {num_latent_GPs}')
self.gp = self.model(**data,
kernel=self.kernel,
mean_function=self.mean_function,
num_latent_gps=num_latent_GPs,
**noise_variance,
**inducing_variable
)
if not self.train_noise:
# Keep specified noise fixed.
try:
set_trainable(self.gp.likelihood.variance, False)
# set_trainable(self.gp.kernel.kernels[1].variance, False)
except AttributeError:
print('Noise can not be trained.')
# print_summary(self.gp)
# Make fit.
if not self.model in self.variational_models:
opt = gpflow.optimizers.Scipy()
opt.minimize(self.gp.training_loss, self.gp.trainable_variables)
else:
self.run_adam(X, y)
# print_summary(self.gp)
return(self)
def predict(self, X, return_std=False):
X = np.asarray(X)
if self.fitted_on_NN:
X = self.transform_X_with_NN(X)
if self.standard_scale:
X = self.X_scaler.transform(X)
if self.predict_y:
y, var = self.gp.predict_y(X)
else:
y, var = self.gp.predict_f(X)
y, var = np.asarray(y), np.asarray(var)
std = np.sqrt(var)
# if self.diff_std_for_sc_and_non_sc:
# y = y[:,:-1] # Remove group column
# max_uncertain_tc = std[0,1]
# std = np.where(y.squeeze() > max_uncertain_tc, std[:,0], std[:,1])
# std = std.reshape(y.shape)
# Transform mean and std back.
if self.standard_scale:
std = self.inverse_transform_std(y, std, self.y_scaler)
y = self.y_scaler.inverse_transform(y)
if return_std:
return(y, std)
else:
return(y)
class HeteroskedasticGaussian(gpflow.likelihoods.Likelihood):
def __init__(self, **kwargs):
# this likelihood expects a single latent function F, and two columns in the data matrix Y:
super().__init__(latent_dim=1, observation_dim=2, **kwargs)
def _log_prob(self, F, Y):
# log_prob is used by the quadrature fallback of variational_expectations and predict_log_density.
# Because variational_expectations is implemented analytically below, this is not actually needed,
# but is included for pedagogical purposes.
# Note that currently relying on the quadrature would fail due to https://github.com/GPflow/GPflow/issues/966
Y, NoiseVar = Y[:, 0], Y[:, 1]
return gpflow.logdensities.gaussian(Y, F, NoiseVar)
def _variational_expectations(self, Fmu, Fvar, Y):
Y, NoiseVar = Y[:, 0], Y[:, 1]
Fmu, Fvar = Fmu[:, 0], Fvar[:, 0]
return (
-0.5 * np.log(2 * np.pi)
- 0.5 * tf.math.log(NoiseVar)
- 0.5 * (tf.math.square(Y - Fmu) + Fvar) / NoiseVar
)
# The following two methods are abstract in the base class.
# They need to be implemented even if not used.
def _predict_log_density(self, Fmu, Fvar, Y):
raise NotImplementedError
def _predict_mean_and_var(self, Fmu, Fvar):
raise NotImplementedError | 14,397 | 39.787535 | 281 | py |
3DSC | 3DSC-main/superconductors_3D/machine_learning/own_libraries/models/NN/MLP_Lightning.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 4 10:36:50 2021
@author: Timo Sommer
This script includes a standard Neural Network based on pytorch lightning.
"""
import torch
from torch.nn import functional as F
from torch import nn
from torch.utils.data import DataLoader
from pytorch_lightning.core.lightning import LightningModule
from pytorch_lightning.core.datamodule import LightningDataModule
import numpy as np
import pytorch_lightning as pl
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint
from sklearn.model_selection import train_test_split
import warnings
warnings.filterwarnings("ignore", ".*does not have many workers.*")
def get_activation_fn(activation: str):
"""Returns torch activation function based on string activation.
"""
if activation == 'relu':
activation_fn = nn.ReLU()
elif activation == 'logistic':
activation_fn = nn.Sigmoid()
elif activation == 'tanh':
activation_fn = nn.Tanh()
else:
raise ValueError(f'Activation function {activation} not recognized. Activation functions are lowercase always.')
return(activation_fn)
def get_sequential_NN(input_layer_size: int, hidden_layer_sizes: list, activation: str):
"""Returns a sequential (Feed Forward) NN. `last_linear` means if the last layer should be linear or with activation function.
"""
activation_fn = get_activation_fn(activation)
layers = []
num_layers = len(hidden_layer_sizes)
for i in range(num_layers):
if i == 0:
in_size = input_layer_size
else:
in_size = hidden_layer_sizes[i-1]
out_size = hidden_layer_sizes[i]
layers.append(nn.Linear(in_size, out_size))
last_layer = i == num_layers - 1
if not last_layer:
layers.append(activation_fn)
layers = tuple(layers)
network = nn.Sequential(*layers)
return(network)
class DataModule(LightningDataModule):
def __init__(self, X, y, batch_size, validation_fraction):
super().__init__()
self.X = X
self.y = y
self.batch_size = batch_size
self.validation_fraction = validation_fraction
def prepare_data(self):
# called only on 1 GPU
return
def setup(self, stage):
# called on every GPU
X_train, X_val, y_train, y_val = train_test_split(
self.X,
self.y,
test_size=self.validation_fraction
)
self.train_data = TrainDataset(X_train, y_train)
self.val_data = TrainDataset(X_val, y_val)
def train_dataloader(self):
train_dataloader = DataLoader(
self.train_data,
batch_size=self.batch_size,
shuffle=True
)
return train_dataloader
def val_dataloader(self):
val_dataloader = DataLoader(
self.val_data,
batch_size=self.batch_size
)
return val_dataloader
class TrainDataset(torch.utils.data.Dataset):
def __init__(self, X, y):
self.X = torch.from_numpy(X).float()
self.y = torch.from_numpy(y).float()
def __len__(self):
return len(self.y)
def __getitem__(self, idx):
return self.X[idx], self.y[idx]
class TestDataset(torch.utils.data.Dataset):
def __init__(self, X):
self.X = torch.from_numpy(X).float()
def __len__(self):
return len(self.X)
def __getitem__(self, idx):
return self.X[idx]
class LightningMLP(LightningModule):
def __init__(self,
solver,
learning_rate,
alpha,
n_features,
n_targets,
hidden_layer_sizes,
activation
):
super().__init__()
self.solver = solver
self.learning_rate = learning_rate
self.alpha = alpha
self.n_features = n_features
self.n_targets = n_targets
self.hidden_layer_sizes = hidden_layer_sizes
self.activation = activation
self.loss_func = self.get_loss_func()
self.loss_curve_ = {'train': [], 'valid': []}
self.make_architecture()
return
def configure_optimizers(self):
if self.solver == 'adam':
optimizer = torch.optim.Adam(
self.parameters(),
lr=self.learning_rate,
weight_decay=self.alpha
)
else:
raise NotImplementedError(f'Optimizer {self.solver} not found.')
return optimizer
def make_architecture(self):
"""Generates `self.network`.
"""
layer_sizes = list(self.hidden_layer_sizes) + [self.n_targets]
self.network = get_sequential_NN(
input_layer_size=self.n_features,
hidden_layer_sizes=layer_sizes,
activation=self.activation
)
return
def get_loss_func(self):
loss_func = nn.MSELoss()
return loss_func
def get_loss(self, y_pred, y_true):
loss = self.loss_func(y_pred, y_true)
return loss
def forward(self, X):
"""Used for inference.
"""
y_pred = self.network(X)
return y_pred
def training_step(self, batch, batch_idx):
"""Used for training.
"""
X, y_true = batch
y_pred = self(X)
loss = self.get_loss(y_pred, y_true)
# log
self.log('train_loss', loss, on_epoch=True)
self.loss_curve_['train'].append(loss.item())
return loss
def validation_step(self, batch, batch_idx):
"""Used for validation.
"""
X, y_true = batch
y_pred = self(X)
loss = self.get_loss(y_pred, y_true)
# log
self.log("val_loss", loss)
self.loss_curve_['valid'].append(loss.item())
return loss
class MLP():
def __init__(self,
hidden_layer_sizes: tuple=(100),
activation: str='relu',
solver: str='adam',
n_epochs: int=200,
batch_size: int=200,
learning_rate: float=1e-3,
alpha: float=1e-4,
validation_fraction: (int, type(None))=None,
patience: int=10,
# clip_grad: float=np.inf,
# log_metrics: bool=True,
# random_seed: (int, type(None))=None,
# use_tensorboard: bool=True
**kwargs
):
self.hidden_layer_sizes = hidden_layer_sizes
self.activation = activation
self.solver = solver
self.n_epochs = n_epochs
self.batch_size = batch_size
self.learning_rate = learning_rate
self.alpha = alpha
self.validation_fraction = validation_fraction
self.patience = patience
self.kwargs = kwargs
def standardize_input(self, X, y):
"""Standardizes input parameters.
"""
# Module parameters
self.n_epochs = int(self.n_epochs)
self.batch_size = int(self.batch_size)
self.learning_rate = float(self.learning_rate)
self.alpha = float(self.alpha)
if self.validation_fraction is not None:
self.early_stopping = True
self.validation_fraction = float(self.validation_fraction)
assert (0 < self.validation_fraction < 1)
else:
self.early_stopping = False
self.validation_fraction = 0
self.patience = int(self.patience)
# Input arrrays
self.n_features = X.shape[1]
if y.ndim == 1:
y = y.reshape(-1, 1)
self.targets_1D = True
else:
self.targets_1D = False
self.n_targets = y.shape[1]
return X, y
def configure_callbacks(self):
"""Configure all callbacks here.
"""
callbacks = []
# Early stopping
if self.early_stopping:
early_stopping = EarlyStopping(
monitor="val_loss",
patience=self.patience
)
callbacks.append(early_stopping)
return callbacks
def fit(self, X, y):
# TODO
# Add repeatibility (https://pytorch-lightning.readthedocs.io/en/latest/common/trainer.html)
X, y = self.standardize_input(X, y)
self.callbacks = self.configure_callbacks()
data = DataModule(X, y, self.batch_size, self.validation_fraction)
model = LightningMLP(
solver=self.solver,
learning_rate=self.learning_rate,
alpha=self.alpha,
n_features = self.n_features,
n_targets=self.n_targets,
hidden_layer_sizes=self.hidden_layer_sizes,
activation=self.activation
)
self.trainer = pl.Trainer(
max_epochs=self.n_epochs,
callbacks=self.callbacks,
enable_progress_bar=False,
logger=False,
**self.kwargs
)
self.trainer.fit(model=model, datamodule=data)
self.loss_curve_ = self.trainer.model.loss_curve_
return
def convert_predictions_to_numpy(self, y_pred):
"""Sanitizes the output of `self.trainer.predict` to make a numpy array with the same number of dimensions as the input targets.
"""
y_pred = np.array(y_pred[0].tolist()).reshape(-1, self.n_targets)
if self.targets_1D:
y_pred = y_pred.ravel()
return y_pred
def predict(self, X):
dataset = TestDataset(X)
dataloader = DataLoader(dataset, batch_size=len(X))
# model_callback
y_pred = self.trainer.predict(
model=self.trainer.model,
dataloaders=dataloader,
ckpt_path='best'
)
y_pred = self.convert_predictions_to_numpy(y_pred)
return y_pred
| 11,648 | 30.569106 | 136 | py |
3DSC | 3DSC-main/superconductors_3D/machine_learning/own_libraries/models/GNN/MEGNet_tf.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 14 09:12:44 2021
@author: Timo Sommer
This script contains an implementation of MEGNet from the original authors based on tensorflow with an sklearn API.
"""
from sklearn.base import BaseEstimator, RegressorMixin
from megnet.models import MEGNetModel
from megnet.data.crystal import CrystalGraph, CrystalGraphDisordered, _AtomEmbeddingMap
import json
import numpy as np
import os
import sklearn
import megnet
import tensorflow as tf
import keras.backend as K
import networkx as nx
import matplotlib.pyplot as plt
from superconductors_3D.machine_learning.own_libraries.utils.Models import Models, regressor_from_pipeline, Pickle_tf
import warnings
warnings.filterwarnings('ignore', '.*The `lr` argument is deprecated, use `learning_rate` instead.*')
def read_json_file(path_to_file):
try:
with open(path_to_file) as f:
return json.load(f)
except Exception as e:
print(f'File {path_to_file} throws an error:')
raise Exception(e)
# TODO
# Early Stopping with grouped validation set.
# Loss function with only positive values and with Huber Loss.
def sc_huber(ytrue, ypred, min_value=0):
"""Computes the huber loss and also sets all negative predicted y values to 0 beforehand. Useful for superconductors.
"""
ypred_min = tf.maximum(ypred, min_value)
loss = tf.keras.losses.huber(ytrue, ypred_min)
return loss
# def train_test_split(X, y, sample_weight, test_size, groups):
# """Splits data into train and test. If groups is given, a GroupShuffleSplit is used instead of a usual ShuffleSplit.
# """
# if groups is None:
# CV = sklearn.model_selection.ShuffleSplit(test_size=test_size, random_state=1) # TODO
# train_inds, test_inds = next(CV.split(X, y, sample_weight))
# else:
# CV = sklearn.model_selection.GroupShuffleSplit(test_size=test_size)
# train_inds, test_inds = next(CV.split(X, y, groups))
# X_train, X_test, y_train, y_test = list(np.array(X)[train_inds]), list(np.array(X)[test_inds]), y[train_inds], y[test_inds]
# return X_train, X_test, y_train, y_test
class MEGNet_tf(BaseEstimator, RegressorMixin):
def __init__(self, use_learnt_elemental_embedding, epochs, batch_size, patience, lr, l2_coef, dropout, optimizer_kwargs, nblocks, n1, n2, n3, r_cutoff=4, n_feat_bond=100, early_stopping=False, validation_frac=None, save_checkpoint=False, prev_model=None, loss='mse', domain_col=None, tensorboard=False, lr_exp_decay=None, act='softplus2', npass=3):
"""
Args:
graphs_path: (str) path to a json file of all graphs
kwargs:
nfeat_edge: (int) number of bond features
nfeat_global: (int) number of state features
nfeat_node: (int) number of atom features
nblocks: (int) number of MEGNetLayer blocks
lr: (float) learning rate
n1: (int) number of hidden units in layer 1 in MEGNetLayer
n2: (int) number of hidden units in layer 2 in MEGNetLayer
n3: (int) number of hidden units in layer 3 in MEGNetLayer
nvocal: (int) number of total element
embedding_dim: (int) number of embedding dimension
nbvocal: (int) number of bond types if bond attributes are types
bond_embedding_dim: (int) number of bond embedding dimension
ngvocal: (int) number of global types if global attributes are types
global_embedding_dim: (int) number of global embedding dimension
npass: (int) number of recurrent steps in Set2Set layer
ntarget: (int) number of output targets
act: (object) activation function
l2_coef: (float or None) l2 regularization parameter
is_classification: (bool) whether it is a classification task
loss: (object or str) loss function
metrics: (list or dict) List or dictionary of Keras metrics to be evaluated by the model during training
and testing
dropout: (float) dropout rate
graph_converter: (object) object that exposes a "convert" method for structure to graph conversion
target_scaler: (object) object that exposes a "transform" and "inverse_transform" methods for transforming
the target values
optimizer_kwargs (dict): extra keywords for optimizer, for example clipnorm and clipvalue
sample_weight_mode (str): sample weight mode for compilation
kwargs (dict): in the case where bond inputs are pure distances (not the expanded distances nor integers
for embedding, i.e., nfeat_edge=None and bond_embedding_dim=None),
kwargs can take additional inputs for expand the distance using Gaussian basis.
centers (np.ndarray): array for defining the Gaussian expansion centers
width (float): width for the Gaussian basis
"""
self.use_learnt_elemental_embedding = use_learnt_elemental_embedding
self.epochs = epochs
self.batch_size = batch_size
self.patience = patience
self.r_cutoff = r_cutoff
self.n_feat_bond = n_feat_bond
self.early_stopping = early_stopping
self.validation_frac = validation_frac
self.save_checkpoint = save_checkpoint
self.prev_model = prev_model
self.loss = loss
self.domain_col = domain_col
self.tensorboard = tensorboard
self.lr = lr
self.lr_exp_decay = lr_exp_decay
self.l2_coef = l2_coef
self.dropout = dropout
self.optimizer_kwargs = optimizer_kwargs
self.nblocks = nblocks
self.n1 = n1
self.n2 = n2
self.n3 = n3
self.act = act
self.npass = npass
def get_loss_fn(self):
"""Returns the loss function.
"""
if self.loss == 'mse':
loss_function = 'mse'
elif self.loss == 'huber':
loss_function = 'huber'
elif self.loss == 'sc_huber':
loss_function = sc_huber
else:
raise ValueError(f'Loss function {self.loss} of MEGNet model not recognized.')
return loss_function
def get_act_fn(self):
"""Returns the activation function.
"""
if self.act == 'softplus2' or self.act == 'softplus':
act_function = megnet.activations.softplus2
elif self.act == 'swish':
act_function = megnet.activations.swish
elif self.act == 'relu':
act_function = tf.keras.activations.relu
else:
raise ValueError(f'Activation function {self.act} of MEGNet model not recognized.')
return act_function
def get_paths(self, X):
assert X.shape[1] == 1
paths = [os.path.expanduser(path) for path in X[:,0]]
return paths
def get_graphs(self, X):
"""Returns the graphs of each input. X must be a list of paths to these graphs.
"""
paths = self.get_paths(X)
graphs = [read_json_file(path) for path in paths]
# Get already learnt elemental embedding features.
if self.use_learnt_elemental_embedding:
print('Convert atom features to learnt embedding.')
embedding = _AtomEmbeddingMap()
for i in range(len(graphs)):
graphs[i]['atom'] = embedding.convert(graphs[i]['atom'])
return graphs
def get_prev_model_path(self):
"""Saves only the MEGNet part of a previously trained model in a temporary file and returns this filename.
"""
# megnet_path = Pickle_tf.get_tf_filepath(self.prev_model)
return self.prev_model
def get_feature_dimensions(self, example_graph):
"""Returns the input dimensions of the features in the correct `encoding`.
"""
# No encoding here, just use the plain provided features.
nfeat_state = len(example_graph['state'][0])
# Get `encoding` of correct input of atom.
Z_with_embedding = all([isinstance(feat, int) for feat in example_graph['atom']])
embedding_from_disordered_dict = all([isinstance(feat, dict) for feat in example_graph['atom']])
if Z_with_embedding:
print('Use atom number Z with embedding as atom features.')
nfeat_node = None
elif embedding_from_disordered_dict:
print('Use already learnt embedding of 16 features per element.')
nfeat_node = 16
else:
print('Use plain features as provided for the atoms.')
nfeat_node = len(example_graph['atom'][0])
# Expand distance in gaussian basis
nfeat_edge = None
return nfeat_node, nfeat_edge, nfeat_state
def exp_decay_scheduler(self, epoch, lr):
"""This learning rate scheduler reduces the learning rate exponentially.
"""
return lr * self.lr_exp_decay
def init_callbacks(self):
"""Add callbacks.
"""
callbacks = []
if self.early_stopping:
assert not self.validation_frac is None, 'Early stopping needs validation data!'
callbacks.append(
tf.keras.callbacks.EarlyStopping(
monitor='val_loss',
min_delta=0,
patience=self.patience,
verbose=0,
mode='min',
restore_best_weights=True
))
# Early stopping with monitoring anything else than val_mae only works if not saving checkpoints, MEGNet is a bit weird there.
self.save_checkpoint = False
if self.tensorboard:
callbacks.append(
tf.keras.callbacks.TensorBoard(
log_dir='tensorboard',
write_graph=False,
))
if not self.lr_exp_decay is None:
callbacks.append(
tf.keras.callbacks.LearningRateScheduler(
schedule=self.exp_decay_scheduler
))
return callbacks
def train_val_split(self, graphs, y, sample_weight, groups):
"""Splits data into train and validation data. If groups is given, this gives the groups that will be kept together in either train or validation set. This is useful e.g. for superconductors, where all datapoints with the same chemical composition should be kept together.
"""
if not groups is None:
raise NotImplementedError('groups is not implemented because the train_test_split with the groups can not deal with sample_weight yet.')
if self.validation_frac == None:
train_graphs = graphs
val_graphs = None
train_y = y
val_y = None
train_w = sample_weight
val_w = None
else:
assert 0 < self.validation_frac < 1
if sample_weight is None:
train_graphs, val_graphs, train_y, val_y = sklearn.model_selection.train_test_split(graphs, y, test_size=self.validation_frac)#, groups=groups)
train_w = None
val_w = None
else:
train_graphs, val_graphs, train_y, val_y, train_w, val_w = sklearn.model_selection.train_test_split(graphs, y, sample_weight, test_size=self.validation_frac)#, groups=groups)
return train_graphs, val_graphs, train_y, val_y, train_w, val_w
def sanitize_input(self, d_train):
"""Sanitize input.
"""
# We want either None or several groups, therefore set to None if there is only one group. This is the encoding of the ML script which is a bit unfortunate.
if len(np.unique(d_train)) == 1:
d_train = None
if self.validation_frac == 0:
self.validation_frac = None
if self.lr_exp_decay == 1:
self.lr_exp_decay = None
if not self.lr_exp_decay is None:
if not 0 < self.lr_exp_decay < 1:
raise ValueError('`lr_exp_decay` must be a float between 0 and 1.')
if self.lr_exp_decay < 0.95:
warnings.warn('`lr_exp_decay` is very small, should usually be very close to 1.')
return d_train
def fit(self, X, y, d_train=None, sample_weight=None):
"""The input X must be a list with paths to json files of graph structures.
"""
d_train = self.sanitize_input(d_train)
self.metrics = ['mse']
# Load graphs
graphs = self.get_graphs(X)
# Define model
# Dimensions
example_graph = graphs[0]
self.nfeat_node, self.nfeat_edge, self.nfeat_state = self.get_feature_dimensions(example_graph)
self.n_target = y.shape[1] if y.ndim == 2 else 1
# Expansion of bond features for continous representation
gaussian_centers = np.linspace(0, self.r_cutoff + 1, self.n_feat_bond)
gaussian_width = 0.5
self.graph_converter = CrystalGraph(cutoff=self.r_cutoff)
self.model = FixedMEGNetModel(
nfeat_node=self.nfeat_node,
nfeat_global=self.nfeat_state,
nfeat_edge=self.nfeat_edge,
graph_converter=self.graph_converter,
centers=gaussian_centers,
width=gaussian_width,
ntarget=self.n_target,
loss=self.get_loss_fn(),
metrics=self.metrics,
lr=self.lr,
l2_coef=self.l2_coef,
dropout=self.dropout,
optimizer_kwargs=self.optimizer_kwargs,
nblocks=self.nblocks,
n1=self.n1,
n2=self.n2,
n3=self.n3,
act=self.get_act_fn(),
npass=self.npass,
)
train_graphs, val_graphs, train_y, val_y, train_w, val_w = self.train_val_split(graphs, y, sample_weight, d_train)
# if not self.prev_model is None:
# prev_model = self.get_prev_model_path()
self.callbacks = self.init_callbacks()
model = self.model.train_from_graphs(train_graphs=train_graphs,
train_targets=train_y,
validation_graphs=val_graphs,
validation_targets=val_y,
epochs=self.epochs,
batch_size=self.batch_size,
patience=self.patience,
callbacks=self.callbacks,
save_checkpoint=self.save_checkpoint,
prev_model=self.prev_model,
sample_weights=train_w,
val_sample_weights=val_w,
)
# Get loss curve
self.loss_curve_ = {}
self.loss_curve_['train'] = model.history.history['loss']
if not self.validation_frac is None:
self.loss_curve_['valid'] = model.history.history['val_loss']
for metric in self.metrics:
self.loss_curve_[f'{metric} (train)'] = model.history.history[metric]
if not self.validation_frac is None:
self.loss_curve_[f'{metric} (valid)'] = model.history.history[f'val_{metric}']
return self
def predict(self, X):
"""The input X must be a list with paths to json files of graph structures.
"""
graphs = self.get_graphs(X)
y_pred = self.model.predict_graphs(graphs)
return y_pred
from typing import Dict, List, Union
from tensorflow.keras.callbacks import Callback
from megnet.callbacks import ModelCheckpointMAE, ManualStop, ReduceLRUponNan
class FixedMEGNetModel(MEGNetModel):
"""Added validation sample weights.
"""
def train_from_graphs(
self,
train_graphs: List[Dict],
train_targets: List[float],
validation_graphs: List[Dict] = None,
validation_targets: List[float] = None,
sample_weights: List[float] = None,
epochs: int = 1000,
batch_size: int = 128,
verbose: int = 1,
callbacks: List[Callback] = None,
prev_model: str = None,
lr_scaling_factor: float = 0.5,
patience: int = 500,
save_checkpoint: bool = True,
automatic_correction: bool = False,
dirname: str = "callback",
val_sample_weights: List[float] = None, # Added this line
**kwargs,
):
"""
Args:
train_graphs: (list) list of graph dictionaries
train_targets: (list) list of target values
validation_graphs: (list) list of graphs as validation
validation_targets: (list) list of validation targets
sample_weights: (list) list of sample weights
epochs: (int) number of epochs
batch_size: (int) training batch size
verbose: (int) keras fit verbose, 0 no progress bar, 1 only at the epoch end and 2 every batch
callbacks: (list) megnet or keras callback functions for training
prev_model: (str) file name for previously saved model
lr_scaling_factor: (float, less than 1) scale the learning rate down when nan loss encountered
patience: (int) patience for early stopping
save_checkpoint: (bool) whether to save checkpoint
automatic_correction: (bool) correct nan errors
dirname: (str) the directory in which to save checkpoints, if `save_checkpoint=True`
**kwargs:
"""
# load from saved model
if prev_model:
self.load_weights(prev_model)
is_classification = "entropy" in str(self.model.loss)
monitor = "val_acc" if is_classification else "val_mae"
mode = "max" if is_classification else "min"
has_sample_weights = sample_weights is not None
if not os.path.isdir(dirname):
os.makedirs(dirname)
if callbacks is None:
# with this call back you can stop the model training by `touch STOP`
callbacks = [ManualStop()]
train_nb_atoms = [len(i["atom"]) for i in train_graphs]
train_targets = [self.target_scaler.transform(i, j) for i, j in zip(train_targets, train_nb_atoms)]
if (validation_graphs is not None) and (validation_targets is not None):
filepath = os.path.join(dirname, "%s_{epoch:05d}_{%s:.6f}.hdf5" % (monitor, monitor))
val_nb_atoms = [len(i["atom"]) for i in validation_graphs]
validation_targets = [self.target_scaler.transform(i, j) for i, j in zip(validation_targets, val_nb_atoms)]
val_inputs = self.graph_converter.get_flat_data(validation_graphs, validation_targets)
val_generator = self._create_generator(*val_inputs, sample_weights=val_sample_weights, batch_size=batch_size) # Fixed this line.
steps_per_val = int(np.ceil(len(validation_graphs) / batch_size))
if save_checkpoint:
callbacks.extend(
[
ModelCheckpointMAE(
filepath=filepath,
monitor=monitor,
mode=mode,
save_best_only=True,
save_weights_only=False,
val_gen=val_generator,
steps_per_val=steps_per_val,
target_scaler=self.target_scaler,
)
]
)
# avoid running validation twice in an epoch
val_generator = None # type: ignore
steps_per_val = None # type: ignore
if automatic_correction:
callbacks.extend(
[
ReduceLRUponNan(
filepath=filepath,
monitor=monitor,
mode=mode,
factor=lr_scaling_factor,
patience=patience,
has_sample_weights=has_sample_weights,
)
]
)
else:
val_generator = None # type: ignore
steps_per_val = None # type: ignore
train_inputs = self.graph_converter.get_flat_data(train_graphs, train_targets)
# check dimension match
self.check_dimension(train_graphs[0])
train_generator = self._create_generator(*train_inputs, sample_weights=sample_weights, batch_size=batch_size)
steps_per_train = int(np.ceil(len(train_graphs) / batch_size))
self.fit(
train_generator,
steps_per_epoch=steps_per_train,
validation_data=val_generator,
validation_steps=steps_per_val,
epochs=epochs,
verbose=verbose,
callbacks=callbacks,
**kwargs,
)
return self
| 21,677 | 40.688462 | 352 | py |
3DSC | 3DSC-main/superconductors_3D/machine_learning/own_libraries/analysis/Experiments/Run.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 18 14:23:49 2021
@author: Timo Sommer
This script contains a class to plot stuff for ML runs.
"""
import os
import pandas as pd
from pandas.api.types import is_string_dtype
from pandas.api.types import is_numeric_dtype
import numpy as np
import matplotlib.pyplot as plt
from superconductors_3D.machine_learning.own_libraries.utils.Scalers import Arcsinh_Scaler
import matplotlib.colors
import sklearn
from scipy import stats
from matplotlib.lines import Line2D
from matplotlib.ticker import ScalarFormatter, LogLocator, NullFormatter, FormatStrFormatter
import seaborn as sns
import superconductors_3D.machine_learning.Custom_Machine_Learning_v1_3 as ML
from superconductors_3D.machine_learning.own_libraries.data import All_Data
from itertools import product
import warnings
import yaml
import copy
from superconductors_3D.machine_learning.own_libraries.data.All_scores import All_scores
from superconductors_3D.machine_learning.own_libraries.data.Domain_statistics import Domain_statistics
from typing import Union
from superconductors_3D.machine_learning.own_libraries.data import Feature_Importances
from superconductors_3D.dataset_preparation.utils.check_dataset import get_chem_dict
sns.set_theme()
from superconductors_3D.machine_learning.own_libraries.utils.Scores import SMAPE
def unroll_dictionaries(dictionary, sep='__'):
"""Unrolls all dictionaries in the given dictionary.
"""
unrolled_dictionary = copy.deepcopy(dictionary)
for key, val in dictionary.items():
if isinstance(val, dict):
for subkey, subval in val.items():
unrolled_key = key + sep + subkey
assert not unrolled_key in dictionary
unrolled_dictionary[unrolled_key] = subval
del unrolled_dictionary[key]
return unrolled_dictionary
def disassemble_dict(value):
if isinstance(value, dict):
val = list(value.values())[0]
key = list(value.keys())[0]
else:
val = value
key = value
return key, val
def color_for_gaussian_datapoints(y_std_train, y_std_test):
"""Set colors for datapoints so that datapoints with higher std have a higher transparency. Only useful for the Gaussian Process.
"""
color_train=matplotlib.colors.to_rgba("C1")
colors_train=np.outer(np.ones(len(y_std_train)),np.array(color_train))
alpha_train=1.0-y_std_train/(np.max(y_std_train)+0.1)
if np.min(alpha_train)<0.01:
alpha_train-=np.min(alpha_train)
if np.max(alpha_train)>=1.0:
alpha_train/=(np.max(alpha_train)+0.01)
colors_train[:,-1]=alpha_train.reshape(-1)
color_test=matplotlib.colors.to_rgba("C2")
colors_test=np.outer(np.ones(len(y_std_test)),np.array(color_test))
alpha_test=1.0-y_std_test/(np.max(y_std_test)+0.1)
if np.min(alpha_test)<0.01:
alpha_test-=np.min(alpha_test)
if np.max(alpha_test)>=1.0:
alpha_test/=(np.max(alpha_test)+0.01)
colors_test[:,-1]=alpha_test.reshape(-1)
return(colors_train, colors_test)
def is_run_dir(absdir, accept_deprecated=False):
"""Checks if a given directory is an ML run directory.
"""
has_scores = os.path.exists(os.path.join(absdir, 'All_scores.csv'))
has_arguments = os.path.exists(os.path.join(absdir, 'arguments'))
has_hparams = os.path.exists(os.path.join(absdir, 'hparams.yml'))
if has_arguments and has_hparams:
if has_scores:
return True
else:
warnings.warn(f'Failed ML run in directory {absdir}!')
# old versions
if accept_deprecated:
if has_scores and not (has_arguments or has_hparams):
warnings.warn(f'Old run with deprecated structure found! Accepting old run.')
return True
return False
def get_hparams(hparams_file):
"""Get hyperparameter from yaml file."""
if os.path.exists(hparams_file):
hparams = yaml.load(open(hparams_file,"r"), Loader=yaml.FullLoader)
else:
raise ValueError(f'No hyperparameter file "{hparams_file}" found.')
return(hparams)
def hparams_file(run_dir):
return os.path.join(run_dir, 'hparams.yml')
def analysis_dir(run_dir):
return os.path.join(run_dir, 'plots')
def all_scores_file(run_dir):
return os.path.join(run_dir, 'All_scores.csv')
def params_file(run_dir):
return os.path.join(run_dir, 'arguments')
class MLRun():
"""This class contains functions to plot ML runs.
"""
def __init__(self, run_dir, get_arguments=True):
self.run_dir = run_dir
self.analysis_dir = analysis_dir(self.run_dir)
self.all_scores_file = all_scores_file(self.run_dir)
self.hparams_file = hparams_file(self.run_dir)
self.params_file = params_file(self.run_dir)
# self.all_scores = All_scores(self.all_scores_file)
self.all_Data_path = os.path.join(self.run_dir, 'All_values_and_predictions.csv')
if get_arguments:
self.arguments = self.get_params()
def get_hparams(self):
"""Returns a dictionary of the hyperparameters of this run.
"""
return get_hparams(self.hparams_file)
def get_params(self, unroll=False):
"""Returns a dictionary of the parameters of this run (saved under `arguments`).
"""
if os.path.exists(self.params_file):
params = yaml.load(open(self.params_file,"r"), Loader=yaml.Loader)
else:
raise ValueError(f'No hyperparameter file "{self.params_file}" found.')
if unroll:
params = unroll_dictionaries(params)
return params
def get_all_params(self, unroll=False):
"""Returns all parameters and hyperparameters as a dictionary.
"""
hparams = self.get_hparams()
params = self.get_params()
all_params = params.update(hparams)
assert len(all_params) == len(hparams) + len(params), 'There is overlap between the names of parameters and hyperparameters!'
if unroll:
all_params = unroll_dictionaries(all_params)
return all_params
def get_scores(self):
"""Returns the df of all scores.
"""
all_scores = All_scores(self.all_scores_file)
return all_scores
def savefig(self, filename, **kwargs):
directory = os.path.dirname(filename)
if not os.path.exists(directory):
os.mkdir(directory)
plt.savefig(filename, **kwargs)
return
def plot_score_of_models(self, models: list, score_name: Union[str,dict], target: str, CVs: list=['test'], save_file=None):
"""Makes bar plots of all given scores of all models. `score_name` can be either a string or a dictionary where the key is the score_name in the csv file and the value is the name in the plot.
"""
# Allow to override plotted name of score by passing a dict.
score_name, plot_score_name = disassemble_dict(score_name)
all_scores = All_scores(self.all_scores_file)
data = []
for model, CV in product(models, CVs):
scores = all_scores.get_scores(target, score_name, model, CV)
for score in scores:
data.append(
{'model': model,
score_name: score,
'CV': CV,
'target': target
})
data = pd.DataFrame(data)
plt.figure()
hue = 'CV' if len(CVs) > 1 else None
sns.catplot(data=data, x='model', y=score_name, hue=hue, kind='bar', legend=False)
plt.ylabel(plot_score_name)
plt.legend(loc='best')
plt.tight_layout()
if save_file == None:
save_file = f'scores_{target}_{score_name}_{"+".join(models)}_{"+".join(CVs)}.png'
filename = os.path.join(self.analysis_dir, 'scores', save_file)
self.savefig(filename, dpi=300)
plt.show()
plt.clf()
return
def plot_domain_score_of_model(self, domain_col: str, models: list, score_name: Union[str,dict], target: str, CVs: list=['test'], save_file=None, domain_order=None, rotate_xlabels=None, yscale: str='linear', ylim: tuple=None):
"""Makes a barplot of the domain scores of some models.
"""
# Allow to override plotted name of score by passing a dict.
score_name, plot_score_name = disassemble_dict(score_name)
# Get domain data.
domain_statistics_file = os.path.join(self.run_dir, 'Domain_statistics.csv')
domain_stats = Domain_statistics(domain_statistics_file, domain_col)
domains = domain_stats.all_domains
# Build up df.
data = []
for model, CV, domain in product(models, CVs, domains):
model, plot_model_name = disassemble_dict(model)
score = domain_stats.get_score(model, domain, score_name, target, CV)
data.append({
'model': plot_model_name,
'group': domain,
'target': target,
'CV': CV,
plot_score_name: score
})
data = pd.DataFrame(data)
plot_models = list(data['model'].unique())
# Plot.
plt.figure()
hue = 'group' if len(domains) > 1 else None
if domain_order == None:
domain_order = domains
sns.catplot(data=data, x='model', y=plot_score_name, hue=hue, hue_order=domain_order, kind='bar', legend=False)
# Scale
plt.yscale(yscale)
ax = plt.gca()
ax.yaxis.set_major_formatter(ScalarFormatter())
plt.ylim(ylim)
plt.legend(loc='best')
if rotate_xlabels != None:
ax.set_xticklabels(rotation=rotate_xlabels, ha="right")
plt.tight_layout()
if save_file == None:
save_file = f'domain_scores_{target}_{score_name}_{"+".join(plot_models)}_{"+".join(CVs)}.png'
filename = os.path.join(self.analysis_dir, 'domain_scores', save_file)
self.savefig(filename, dpi=300)
plt.show()
plt.clf()
return
def plot_2D_preds(self, model, feature_dict, outpath, x_true=[], y_true=[], target_true=[], scatter_kwargs={}, target_name='target', target_idx=0, res=30):
"""Plots the prediction surface of the target of a model dependent on two features in given ranges. `feature_dict` must have feature names as keys and as value an iterable of [feature_min, feature_max].
"""
features = list(feature_dict.keys())
x = features[0]
y = features[1]
xmin = feature_dict[x][0]
xmax = feature_dict[x][1]
ymin = feature_dict[y][0]
ymax = feature_dict[y][1]
x_grid = np.outer(np.linspace(xmin, xmax, res), np.ones(res))
y_grid = np.outer(np.linspace(ymin, ymax, res), np.ones(res)).T
x_flat = x_grid.flatten()
y_flat = y_grid.flatten()
feats = np.array([x_flat, y_flat]).T
# feats = model.x_scaler.transform(feats)
z_flat = model.predict(feats)
# z_flat = model.y_scaler.inverse_transform(z_flat)
if len(z_flat.shape) > 1:
z_flat = z_flat[:, target_idx]
z_grid = z_flat.reshape(res, res)
fig = plt.figure()
ax = plt.axes(projection='3d')
ax.plot_surface(x_grid, y_grid, z_grid, cmap='plasma', edgecolor='none', alpha=0.3)
if len(x_true) > 0 and len(y_true) > 0 and len(target_true) > 0:
ax.scatter(x_true, y_true, target_true, **scatter_kwargs)
ax.set_title('Prediction plot')
ax.set_xlabel(x)
ax.set_ylabel(y)
ax.set_zlabel(target_name)
plt.savefig(outpath, dpi=300)
plt.close()
def plot_1D_preds(self, model_dict, data_dict, outpath, scatter_kwargs={}, y_idx=0, res=300, xlabel='x', ylabel='target', x_limits=[], y_limits=[], add_fn={}):
"""Plots the prediction surface of the target of some models dependent on one feature in given range. With `add_fn` you can add other functions to the plot, it needs the name of the function as key and the function as value.
"""
plt.figure()
sns.set_theme()
# Get x for the prediction.
if x_limits == []:
xmax = -np.inf
xmin = np.inf
for _, (x, _) in data_dict.items():
if max(x) > xmax:
xmax = max(x)
if min(x) < xmin:
xmin = min(x)
else:
xmin = x_limits[0]
xmax = x_limits[1]
x = np.linspace(xmin, xmax, res)
x = x.reshape((-1, 1))
# Plot true data points.
for label, (x_, y_) in data_dict.items():
plt.plot(x_, y_, label=label, marker='.', linestyle='None', **scatter_kwargs)
# Plot prediction of model.
for modelname, model in model_dict.items():
x_pred = x
y_pred = model.predict(x_pred)
if len(y_pred.shape) > 1:
y_pred = y_pred[:, y_idx]
plt.plot(x_pred, y_pred, label=modelname)
# Plot additional functions.
for fn_name, f in add_fn.items():
y = f(x)
plt.plot(x, y, ':', label=fn_name)
plt.legend()
# plt.title('Predictions')
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if not y_limits == []:
plt.ylim(y_limits)
plt.savefig(outpath, dpi=300, transparent=False)
plt.close()
def reduce_duplicates(self, df, duplicate_col: str, mean_cols: list):
"""Reduces the df so that of duplicates in `duplicate_col` will be taken the mean if all the values are numeric and otherwise it is asserted that it is the same for all the duplicates. Useful for making a df with crystals to a df based on individual superconductors.
"""
all_columns = df.columns
n_unique = sum(~ df.duplicated([duplicate_col, 'repetition']))
groupby_cols = [col for col in df.columns if not col in mean_cols]
assert duplicate_col in groupby_cols
df = df.groupby(groupby_cols).mean().reset_index()
assert len(df) == n_unique
assert sorted(df.columns) == sorted(all_columns)
return df
def parity_plot_all_test_data(self, target, model, repetitions, domain_col, duplicate_col=None, log_log=False):
"""This makes a parity plot with all test data from all CV runs of all repetitions.
"""
true_target = f'true {target}'
pred_target = f'pred {target}'
pred_lower_bound = f'${ML.SIGMA} \sigma$ lower bound'
pred_upper_bound = f'${ML.SIGMA} \sigma$ upper bound'
rel_pred_lower_bound = f'${ML.SIGMA} \sigma$ lower bound (relative)'
pred_scaled_unc = f'scaled ${ML.SIGMA} \sigma$'
uncertainty = f'relative uncertainty'
# When All_Data is finished this should be written to use it.
data_path = os.path.join(self.run_dir, 'All_values_and_predictions.csv')
df, _ = ML.load_df_and_metadata(data_path)
other_cols = [duplicate_col] if not duplicate_col == None else []
data = All_Data.All_Data.get_test_data(df,
target,
model,
repetitions,
domain_col,
other_cols=other_cols,
true_target=true_target,
pred_target=pred_target,
pred_lower_bound=pred_lower_bound,
pred_upper_bound=pred_upper_bound,
pred_scaled_unc=pred_scaled_unc
)
# Reduce data so that we have only one entry per superconductor instead of one entry per crystal and take mean of true/ pred/ unc tc columns.
if duplicate_col != None:
data = self.reduce_duplicates(data, duplicate_col, mean_cols=[true_target, pred_target, pred_lower_bound, pred_upper_bound, pred_scaled_unc])
# Sort so that domains with few data points are plotted on top.
if domain_col != None:
data['count'] = data.groupby('group')['group'].transform(pd.Series.count)
data.sort_values('count', inplace=True, ascending=False)
data = data.drop(columns='count')
# Start plotting.
if domain_col != None:
plt.figure(figsize=(8, 4.8))
else:
plt.figure()
# To remove title of legend.
if domain_col != None:
data = data.rename(columns={'group': ''})
hue = ''
else:
hue = None
# If uncertainty given, use color for uncertainty, not for group anymore.
has_unc = pred_lower_bound in data.columns
if has_unc:
# rel_unc = data[pred_lower_bound] / data[pred_target]
# data[rel_pred_lower_bound] = rel_unc
unc = data[pred_scaled_unc] - min(data[pred_scaled_unc])
unc /= max(unc)
data[uncertainty] = unc
norm = plt.Normalize(min(unc), max(unc))
cmap = sns.cubehelix_palette(as_cmap=True, reverse=True)
sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
sm.set_array([])
hue = uncertainty
else:
cmap = None
# Plot scatter plot.
marker_size = 15
ax = sns.scatterplot(data=data, x=true_target, y=pred_target, hue=hue, alpha=1, palette=cmap, s=marker_size)
# Plot dotted line of perfect fit.
x_min = min(data[true_target])
x_max = 200#max(data[true_target])
line = np.linspace(x_min, x_max, 300)
ax.plot(line, line, '--k', label='perfect fit')
# Plot title.
plt.title(model)
# Add legend or colorbar.
if has_unc:
ax.get_legend().remove()
cbar = ax.figure.colorbar(sm)
cbar.set_label(hue, labelpad=10)
elif domain_col != None:
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
# Ticks.
if log_log:
ax.set(xscale='symlog', yscale='symlog')
ax.xaxis.set_major_formatter(FormatStrFormatter('%.12g'))
ax.yaxis.set_major_formatter(FormatStrFormatter('%.12g'))
# Make labels nicer for tc.
if target == 'tc':
plt.ylabel('pred $T_c$ (K)')
plt.xlabel('true $T_c$ (K)')
ticks = [0, 0.3, 1, 3, 10, 30, 100, 200]
ax.set_xticks(ticks)
ax.set_yticks(ticks)
ticklabels = ['0', '0.3', '1', '3', '10', '30', '100', '200']
ax.set_xticklabels(ticklabels)
ax.set_yticklabels(ticklabels)
# Save plot
plt.tight_layout()
save_dir = os.path.join(self.run_dir, 'plots/parity_plots')
if not os.path.exists(save_dir):
os.mkdir(save_dir)
save_name = os.path.join(save_dir, f'all_together_parity_plot_{target}_{model}.png')
plt.savefig(save_name, dpi=300)
plt.show()
return()
def score_over_elemental_prevalence(self, model, target, score, repetitions, domains, chem_formula, duplicate_col, log=False):
"""This makes a parity plot of the uncertainty and the error with all test data from all CV runs of all repetitions.
"""
true_target = f'true {target}'
pred_target = f'pred {target}'
pred_lower_bound = f'${ML.SIGMA} \sigma$ lower bound'
pred_upper_bound = f'${ML.SIGMA} \sigma$ upper bound'
pred_scaled_unc = f'${ML.SIGMA} \sigma$ (scaled)'
# Rename x and y axis to make them nicer for Tc.
if target == 'tc':
error = f'{score} of $T_c$'
else:
error = f'{score} of {target}'
# When All_Data is finished this should be written to use it.
data_path = os.path.join(self.run_dir, 'All_values_and_predictions.csv')
df, _ = ML.load_df_and_metadata(data_path)
other_cols = [duplicate_col] if duplicate_col != None else []
data = All_Data.All_Data.get_test_data(df,
target,
model,
repetitions=repetitions,
domain=domains,
other_cols=other_cols,
true_target=true_target,
pred_target=pred_target,
pred_lower_bound=pred_lower_bound,
pred_upper_bound=pred_upper_bound,
pred_scaled_unc=pred_scaled_unc
)
# Reduce data so that we have only one entry per superconductor instead of one entry per crystal and take mean of true/ pred/ unc tc columns.
if duplicate_col != None:
mean_cols=[true_target, pred_target, pred_lower_bound, pred_upper_bound, pred_scaled_unc]
data = self.reduce_duplicates(data, duplicate_col, mean_cols=mean_cols)
assert score == 'SMAPE'
data[score] = SMAPE(data[true_target], data[pred_target])
elemental_data = {'element': [], score: []}
for formula, value in zip(data[chem_formula], data[score]):
elements = list(get_chem_dict(formula).keys())
for el in elements:
elemental_data['element'].append(el)
elemental_data[score].append(value)
elemental_data = pd.DataFrame(elemental_data)
# Take mean per element.
els = elemental_data.groupby('element')
df = els.size().reset_index().rename(columns={0: 'occurrences of element'})
df[score] = list(els[score].mean())
df['std'] = list(els[score].std())
# Start plotting.
plt.figure()
# Plot scatter plot.
ax = plt.errorbar(x=df['occurrences of element'], y=df[score], yerr=df['std'], fmt='.')
# Add axis labels.
plt.xlabel('occurrences of element')
plt.ylabel(score)
# Add title.
plt.title(model)
if log:
plt.xscale('log')
# # Add legend for quantile bars
# label = '25%/ 75% quantiles'
# handles, labels = plt.gca().get_legend_handles_labels()
# line = Line2D([0], [0], label=label, color='k')
# handles.extend([line])
# plt.legend(handles=handles)
# Save plot
plt.tight_layout()
save_dir = os.path.join(self.run_dir, 'plots')
if not os.path.exists(save_dir):
os.mkdir(save_dir)
save_name = os.path.join(save_dir, f'score_elemental_prevalence_{target}_{model}_{score}.png')
plt.savefig(save_name, dpi=300)
plt.show()
return()
return()
def hist_error_over_x(self, x, target, model, repetitions, domain_col, duplicate_col=None, ylim=None, errortype='MdAE', uncertainty='quantiles', log_bars=False):
"""This makes a parity plot of the uncertainty and the error with all test data from all CV runs of all repetitions.
`uncertainty´: 'quantiles' or 'sem'
"""
true_target = f'true {target}'
pred_target = f'pred {target}'
pred_lower_bound = f'${ML.SIGMA} \sigma$ lower bound'
pred_upper_bound = f'${ML.SIGMA} \sigma$ upper bound'
pred_scaled_unc = f'${ML.SIGMA} \sigma$ (scaled)'
# Rename x and y axis to make them nicer for Tc.
if target == 'tc':
error = f'{errortype} of $T_c$'
else:
error = f'{errortype} of {target}'
# When All_Data is finished this should be written to use it.
data_path = os.path.join(self.run_dir, 'All_values_and_predictions.csv')
df, _ = ML.load_df_and_metadata(data_path)
other_cols = [duplicate_col, x] if duplicate_col != None else [x]
data = All_Data.All_Data.get_test_data(df,
target,
model,
repetitions,
domain_col,
other_cols=other_cols,
true_target=true_target,
pred_target=pred_target,
pred_lower_bound=pred_lower_bound,
pred_upper_bound=pred_upper_bound,
pred_scaled_unc=pred_scaled_unc
)
data = data[(data[true_target] > 0) & (data[pred_target] > 0)]
# Reduce data so that we have only one entry per superconductor instead of one entry per crystal and take mean of true/ pred/ unc tc columns.
if duplicate_col != None:
mean_cols=[true_target, pred_target, pred_lower_bound, pred_upper_bound, pred_scaled_unc, x]
data = self.reduce_duplicates(data, duplicate_col, mean_cols=mean_cols)
# Sort so that domains with few data points are plotted on top.
if domain_col != None:
data['count'] = data.groupby('group')['group'].transform(pd.Series.count)
data.sort_values('count', inplace=True, ascending=False)
data = data.drop(columns='count')
# Start plotting.
if domain_col != None:
plt.figure(figsize=(8, 4.8))
else:
plt.figure()
if log_bars:
scaler = np.arcsinh
inv_scaler = np.sinh
else:
scaler = lambda x: x
inv_scaler = lambda x: x
# Get errors to plot.
if errortype == 'MdAE':
data[error] = np.abs(data[pred_target] - data[true_target])
reduce_fn = 'median'
elif errortype == 'SMAPE':
data[error] = SMAPE(data[true_target], data[pred_target], multioutput='raw_values')
reduce_fn = 'mean'
# Bin data because there are too many points for a scatterplot.
n_bins = 17
bin_space = scaler(data[x])
bin_width = (bin_space.max() - bin_space.min()) / (n_bins - 1)
bins = np.linspace(bin_space.min(), bin_space.max(), n_bins)
bin_width = inv_scaler(bins + bin_width) - inv_scaler(bins)
bins = inv_scaler(bins)
# if target == 'tc':
# # Bin to the left for tc==0.
# bins = np.insert(bins, 0, - bins[1], axis=0)
# bin_width = np.insert(bin_width, 0, bin_width[0], axis=0)
data['binned'] = pd.cut(data[x], bins=bins, include_lowest=True)
if reduce_fn == 'median':
data_binned = data.groupby('binned').median()
elif reduce_fn == 'mean':
data_binned = data.groupby('binned').mean()
bars = bins[:-1]
bin_width = bin_width[:-1]
# Get uncertainties.
if uncertainty == 'quantiles':
data_binned['25_quantile'] = data.groupby('binned')[error].quantile(0.25)
data_binned['75_quantile'] = data.groupby('binned')[error].quantile(0.75)
yerr = (data_binned[error] - data_binned['25_quantile'], data_binned['75_quantile'] - data_binned[error])
elif uncertainty == 'sem':
yerr = data.groupby('binned')[error].apply(stats.sem)
# Plot bar plot.
plt.bar(bars, data_binned[error], yerr=yerr, width=bin_width, align='edge', capsize=5)
# Add axis labels.
convert_label = {'totreldiff': '$\Delta_{totrel}$', 'tc': '$T_c$ (K)'}
xlabel = convert_label[x] if x in convert_label else x
if log_bars:
# plt.xscale('symlog')
plt.xscale(value='function', functions=(scaler, inv_scaler))
# Change log-scaled x-axis back into interpretable numbers.
if target == 'tc' and log_bars:
ax = plt.gca()
ticks = np.array([0, 1, 3, 10, 30, 100])
ax.set_xticks(ticks)
ticklabels = ['0', '1', '3', '10', '30', '100']
ax.set_xticklabels(ticklabels)
plt.xlabel(xlabel)
plt.ylabel(error)
# Add title.
plt.title(model)
# Add legend for quantile bars
labeldict = {'quantiles': '25-75% quantiles', 'sem': 'SEM'}
label = labeldict[uncertainty]
handles, labels = plt.gca().get_legend_handles_labels()
line = Line2D([0], [0], label=label, color='k')
handles.extend([line])
plt.legend(handles=handles)
# Add plot limits.
plt.ylim(ylim)
# Save plot
plt.tight_layout()
save_dir = os.path.join(self.run_dir, 'plots')
if not os.path.exists(save_dir):
os.mkdir(save_dir)
save_name = os.path.join(save_dir, f'quality_plot_all_test_data_{target}_{model}_{x}.png')
plt.savefig(save_name, dpi=300)
plt.show()
return()
def parity_plot_uncertainty_all_test_data(self, target, model, repetitions, domain_col, duplicate_col=None, log_log=False):
"""This makes a parity plot of the uncertainty and the error with all test data from all CV runs of all repetitions.
"""
true_target = f'true {target}'
pred_target = f'pred {target}'
pred_lower_bound = f'${ML.SIGMA} \sigma$ lower bound'
pred_upper_bound = f'${ML.SIGMA} \sigma$ upper bound'
pred_scaled_unc = f'${ML.SIGMA} \sigma$ (scaled)'
uncertainty = f'relative uncertainty'
# Rename x and y axis to make them nicer for Tc.
if target == 'tc':
error = 'error of $T_c$'
uncertainty = f'${ML.SIGMA} \sigma$ bound of $T_c$'
else:
error = f'error of {target}'
uncertainty = f'${ML.SIGMA} \sigma$ bound of {target}'
# When All_Data is finished this should be written to use it.
data_path = os.path.join(self.run_dir, 'All_values_and_predictions.csv')
df, _ = ML.load_df_and_metadata(data_path)
other_cols = [duplicate_col] if duplicate_col != None else []
data = All_Data.All_Data.get_test_data(df,
target,
model,
repetitions,
domain_col,
other_cols=other_cols,
true_target=true_target,
pred_target=pred_target,
pred_lower_bound=pred_lower_bound,
pred_upper_bound=pred_upper_bound,
pred_scaled_unc=pred_scaled_unc
)
# If this model has no uncertainty given leave.
has_uncertainty = pred_lower_bound in data.columns and pred_upper_bound in data.columns
if not has_uncertainty:
return
# Reduce data so that we have only one entry per superconductor instead of one entry per crystal and take mean of true/ pred/ unc tc columns.
if duplicate_col != None:
data = self.reduce_duplicates(data, duplicate_col, mean_cols=[true_target, pred_target, pred_lower_bound, pred_upper_bound, pred_scaled_unc])
# Sort so that domains with few data points are plotted on top.
data['count'] = data.groupby('group')['group'].transform(pd.Series.count)
data.sort_values('count', inplace=True, ascending=False)
data = data.drop(columns='count')
# Start plotting.
_ = plt.figure(figsize=(8, 4.8))
# To remove title of legend.
data = data.rename(columns={'group': ''})
hue = ''
# Get errors to plot.
data[error] = data[pred_target] - data[true_target]
# Get uncertainties to plot.
overestimating = data[error] > 0
underestimating = data[error] <= 0
upper_uncertainties = data.loc[underestimating, pred_upper_bound] - data.loc[underestimating, pred_target]
lower_uncertainties = data.loc[overestimating, pred_target] - data.loc[overestimating, pred_lower_bound]
data.loc[underestimating, uncertainty] = upper_uncertainties
data.loc[overestimating, uncertainty] = lower_uncertainties
# Get percentage of data points out of sigma bound.
out_of_sigma = ML.out_of_sigma(data[true_target], data[pred_target], data[pred_lower_bound], data[pred_upper_bound])
own_oos = data[error].abs() > data[uncertainty]
assert sum(own_oos) / len(own_oos) == out_of_sigma, 'Implementations of out_of_sigma don\'t match.'
# For debugging, can be deleted.
# unc = data[pred_scaled_unc] - min(data[pred_scaled_unc])
# unc /= max(unc)
# data['uncertainty'] = unc
# Plot scatter plot.
ax = sns.scatterplot(data=data, x=uncertainty, y=error, hue=hue, alpha=1)
# Plot dotted line where the error is exactly at the boundary of the uncertainty.
x_min = 0
x_max = max(data[uncertainty])
line = np.linspace(x_min, x_max, 300)
ax.plot(line, line, '--k', label=f'${ML.SIGMA} \sigma$ boundary')
ax.plot(line, -line, '--k')
# Plot title.
out_of_sigma_str = f'Out of ${ML.SIGMA} \sigma$: {out_of_sigma:.2f}'
title = f'{model} ({out_of_sigma_str})'
plt.title(title)
# Add legend.
ax.legend(loc='best')#, bbox_to_anchor=(1, 0.5))
# Ticks.
if log_log:
ax.set(xscale='symlog', yscale='symlog')
ax.xaxis.set_major_formatter(FormatStrFormatter('%.12g'))
ax.yaxis.set_major_formatter(FormatStrFormatter('%.12g'))
# Make labels nicer for tc.
if target == 'tc' and log_log:
yticks = [-100, -30, -10, -3, -1, 0, 1, 3, 10, 30, 100]
xticks = [0, 1, 3, 10, 30, 100, 200]
ax.set_xticks(xticks)
ax.set_yticks(yticks)
yticklabels = ['-100', '-30', '-10', '-3', '-1', '0', '1', '3', '10', '30', '100']
xticklabels = ['0', '1', '3', '10', '30', '100', '200']
ax.set_xticklabels(xticklabels)
ax.set_yticklabels(yticklabels)
# Save plot
plt.tight_layout()
save_dir = os.path.join(self.run_dir, 'plots/parity_plots')
if not os.path.exists(save_dir):
os.mkdir(save_dir)
save_name = os.path.join(save_dir, f'all_test_data_uncertainty_parity_plot_{target}_{model}.png')
plt.savefig(save_name, dpi=300)
plt.show()
return()
def raw_parity_plot(self, df, true_target, pred_target, hue, style, ax=None, log_log=False):
"""Plots a parity plot of predicted vs true target.
"""
true_target = df[true_target] if not isinstance(true_target, pd.Series) else true_target
pred_target = df[pred_target] if not isinstance(pred_target, pd.Series) else pred_target
if hue != None:
hue = df[hue] if not isinstance(hue, pd.Series) else hue
# Setup hue colors to be categorical.
hue = hue.convert_dtypes().astype(str)
palette = sns.color_palette(n_colors=hue.nunique())
hue_order = sorted(hue.unique())
# Remove legend title because it's annoying.
hue = hue.rename('')
else:
palette = None
hue_order = None
style = df[style] if not isinstance(style, pd.Series) else style
style_order = ['train', 'test']
# Plot
ax = sns.scatterplot(data=df, x=true_target, y=pred_target, hue=hue, style=style, style_order=style_order, palette=palette, hue_order=hue_order, ax=ax, alpha=0.8)
if log_log:
ax.set(xscale='symlog', yscale='symlog')
ax.xaxis.set_major_formatter(FormatStrFormatter('%.12g'))
ax.yaxis.set_major_formatter(FormatStrFormatter('%.12g'))
# Make labels nicer for tc.
if true_target.name == 'true tc':
plt.ylabel('pred $T_c$ (K)')
plt.xlabel('true $T_c$ (K)')
ticks = [0.1, 0.5, 1, 5, 10, 50, 100]
ax.set_xticks(ticks)
ax.set_yticks(ticks)
ticklabels = ['0.1', '0.5', '1', '5', '10', '50', '100']
ax.set_xticklabels(ticklabels)
ax.set_yticklabels(ticklabels)
# Add vertical line of perfect fit.
x_min = min(true_target)
x_max = max(true_target)
line = np.linspace(x_min, x_max, 300)
ax.plot(line, line, '--k', label='perfect fit')
ax.legend(loc='upper right')
return ax
def parity_plot(self, target, model, repetition, hue, duplicate_col=None, log_log=False):
"""Plot parity plots for all given targets, models and repetitions in run_dir.
"""
# When All_Data is finished this should be written to use it.
data_path = os.path.join(self.run_dir, 'All_values_and_predictions.csv')
df, _ = ML.load_df_and_metadata(data_path)
pred_target_name = All_Data.All_Data.name_preds_col(model, repetition, target)
CV = All_Data.name_CV_col(repetition)
# Make df of crystals to df of superconductors.
used_cols = [target, pred_target_name, CV, duplicate_col, hue]
df = df[used_cols]
if duplicate_col != None:
df = self.reduce_duplicates(df, duplicate_col, mean_cols=[target, pred_target_name])
# Make that test data points are plotted on top of train data points.
df = df.sort_values(by=CV, ascending=False)
plt.figure()
style = df[CV].rename('')
true_target = df[target].rename(f'true {target}')
pred_target = df[pred_target_name].rename(f'pred {target}')
self.raw_parity_plot(df, true_target, pred_target, hue, style=style, log_log=log_log)
# Save plot
save_dir = os.path.join(self.run_dir, 'plots/parity_plots')
if not os.path.exists(save_dir):
os.mkdir(save_dir)
save_name = os.path.join(save_dir, f'parity_plot_{target}_{model}_{repetition}.png')
plt.savefig(save_name, dpi=300)
plt.clf()
return
def get_loss_dfs(self, plot_models, repetitions, run_dir, smoothen=False):
"""Returns dataframes in long and wide format with the losses of the models and repetitions in run_dir. Smoothen means summing over one epoch for the curves with higher resolution than that.
"""
# Loop through all saved models and get their loss curves.
loss_curves = {}
for modelname in plot_models:
for i in repetitions:
regr = ML.get_saved_model(modelname, i, run_dir)
model = regr.regressor_['model']
# Standardize loss curve to dict.
if isinstance(model.loss_curve_, list):
loss_curves[f'train_{i}'] = model.loss_curve_
elif isinstance(model.loss_curve_, dict):
for key, vals in model.loss_curve_.items():
loss_curves[f'{key}_{i}'] = vals
else:
raise ValueError('model.loss_curve_ is neither list nor dict.')
# Get number of epochs for each repetition.
num_epochs = {}
for i in repetitions:
num_epochs[i] = len(loss_curves[f'train_{i}'])
df = pd.DataFrame(columns=['epoch'])
for curve_name, loss_curve in loss_curves.items():
df_curve = pd.DataFrame(data=loss_curve, columns=[curve_name])
df_curve = df_curve.reset_index().rename(columns={'index': 'epoch'})
df_curve['epoch'] = df_curve['epoch'] + 1
rep = int(curve_name.split('_')[-1])
norm = num_epochs[rep] / len(df_curve)
df_curve['epoch'] = norm * df_curve['epoch']
df = df.merge(df_curve, on='epoch', how='outer')
df = df.sort_values(by='epoch')
if smoothen:
# Sum curves over one epoch if resolution is higher than that.
df['epoch'] = df['epoch'].apply(np.ceil)
df = df.groupby(by=['epoch']).sum().reset_index()
df_save = copy.deepcopy(df)
# Prepare df in long format for sns.lineplot.
df = pd.melt(df, ['epoch'], value_name='loss', var_name='curve')
df = df[df['loss'].notna()]
pattern = r'^(.*)_(\d+)$'
df[['mode', 'repetition']] = df['curve'].str.extract(pattern)
# Extract different metrics
df.loc[df['mode'] == 'train', 'mode'] = 'loss (train)'
df.loc[df['mode'] == 'valid', 'mode'] = 'loss (valid)'
metrics_pattern = r'^(.*) \((.*)\)$'
df[['metric', 'mode']] = df['mode'].str.extract(metrics_pattern)
df = df.rename(columns={'loss': 'value'})
return(df, df_save)
def plot_loss_curves(self, plot_models, repetitions, run_dir, outpath, losses, ax=None, save=True, smoothen=False, scale=False, mean=False):
"""Plot and save loss curves of MLPregressor and torch models. Smoothen means summing over one epoch for the curves with higher resolution than that.
"""
df, df_save = self.get_loss_dfs(plot_models, repetitions, run_dir)
wanted_metrics = df['metric'].isin(losses)
assert len(wanted_metrics) > 0, f'Attribute {losses[0]} not found.'
df = df[wanted_metrics]
# Define style of plot.
if ax == None:
ax = plt.figure().gca()
modes = df['mode'].unique().tolist()
dashes = {mode: (2, 2) if mode.startswith('train') else '' for mode in modes}
if mean or len(repetitions) == 1:
if len(wanted_metrics) > 1:
hue = 'metric'
style = 'mode'
else:
hue = 'mode'
style = None
ax.set_ylabel(wanted_metrics[0])
else:
assert len(wanted_metrics) == 1
hue = 'repetition'
style = 'mode'
# Plot and save loss curves.
sns.lineplot(x='epoch', y='value', hue=hue, style=style, dashes=dashes, data=df, ax=ax)
ax.set_title('+'.join(plot_models))
plt.yscale('log')
if scale:
max_losses = df.groupby(by=['mode', 'repetition'])['value'].max()
max_plot_loss = 2*max_losses.median()
plt.ylim(0, max_plot_loss)
if save:
plt.savefig(outpath + '.png', dpi=300)
df_save.to_csv(outpath + '.csv')
plt.show()
plt.close()
return()
def plot_grid_loss_curves(self, plot_models, repetitions, run_dir, outpath, losses):
"""Plots a grid plot of all loss curves.
"""
# Get number of images in rows and columns (height and width).
num_models = len(plot_models)
if num_models <= 2:
height = num_models
width = 1
else:
height = int(np.ceil(num_models / 2))
width = 2
fig, axes = plt.subplots(height, width, gridspec_kw=dict(hspace=0.3), figsize=(12,9), sharex=True, squeeze=False)
# Plot figures in a grid.
idx = -1
for w in range(width):
for h in range(height):
idx += 1
if idx >= num_models:
continue
plot_model = plot_models[idx]
ax = axes[h][w]
self.plot_loss_curves([plot_model], repetitions, run_dir, outpath='', losses=losses, ax=ax, save=False)
# Add ticks to shared axes.
ax.xaxis.set_tick_params(labelbottom=True)
# Save plot.
plt.savefig(outpath, dpi=300)
plt.close('all')
return()
def final_plots(self, plot_dir, plot_models, df_data, domain_colname, features, targets, use_models, outdir):
"""Do some final plots of your models.
"""
self.plot_dir = plot_dir
n_repetitions = len([col for col in df_data.columns if col.startswith('CV_')])
repetitions = list(range(n_repetitions))
# repetitions = list(range(len(train_indices)))
print(f'Plot some stuff in {self.plot_dir}...')
if not os.path.exists(self.plot_dir):
os.mkdir(self.plot_dir)
# # Plot grid figure for loss curve.
# outpath = os.path.join(self.plot_dir, 'loss_all_models.png')
# losses = ['train', 'valid', 'train2', 'valid2']
# if plot_models:
# try:
# self.plot_grid_loss_curves(plot_models, repetitions, self.run_dir, outpath, losses)
# except AttributeError:
# warnings.warn('Could not make a grid plot of loss curves because one of the specified models did not have the attribute `loss_curve_`!')
# losses = ['loss', 'mse', 'mae']
# for plot_model in plot_models:
# # Plot individual figures for curve.
# outpath = os.path.join(self.plot_dir, f'loss_{"+".join(plot_models)}')
# self.plot_loss_curves([plot_model], repetitions, self.run_dir, outpath, losses)
# Plot individual loss curves with mean and standard deviation.
losses = ['loss', 'mse']
for plot_model in plot_models:
outpath = os.path.join(self.plot_dir, f'mean_{"+".join(losses)}_{"+".join(plot_models)}')
self.plot_loss_curves([plot_model], repetitions, self.run_dir, outpath, losses, mean=True)
# # Plot figures for each of the minor loss curves.
# losses = ['extrapol', 'oracle', 'erm', 'holdout', 'eff_regret', 'regret', 'total', 'eff_loss', 'rep_loss']
# for plot_model in ['RGM']:
# if not plot_model in plot_models:
# continue # if model doesn't exist
# for i in repetitions:
# outpath = os.path.join(self.plot_dir, f'minor_loss_RGM_{i}')
# self.plot_loss_curves([plot_model], [i], self.run_dir, outpath, losses)
# # Plot figures for the norm of the gradient.
# losses = ['grad_norm_before', 'grad_norm_clipped']
# for plot_model in ['RGM']:
# if not plot_model in plot_models:
# continue # if model doesn't exist
# for i in repetitions:
# outpath = os.path.join(self.plot_dir, f'norm_grad_RGM_{i}')
# self.plot_loss_curves([plot_model], [i], self.run_dir, outpath, losses)
# Plot backwards graphs.
for modelname in use_models.keys():
repetition = 0
model = ML.get_saved_model(modelname, repetition, self.run_dir)
try:
for i, graph in enumerate(model.backward_graphs):
filename = f'Backward_{modelname}_{repetition}_{i}'
graph.render(filename, self.plot_dir, cleanup=True)
except AttributeError:
pass
# Plot prediction surface if features are 2D.
try:
for modelname in use_models.keys():
for repetition in repetitions:
model = ML.get_saved_model(modelname, repetition, self.run_dir)
feature_dict = {'x': (-1.5, 1.5), 'y': (-1.5, 1.5)}
x_true = df_data['x_0'].to_numpy()
y_true = df_data['x_1'].to_numpy()
target_true = df_data['target'].to_numpy()
outpath = os.path.join(self.plot_dir, f'Preds_surface_{modelname}_{repetition}.png')
# Get color per domain.
domains = df_data[domain_colname].to_numpy()
if len(domains) > 0:
cycle = plt.rcParams['axes.prop_cycle'].by_key()['color']
color_dict = {d: cycle[i] for i, d in enumerate(np.unique(domains))}
colors = [color_dict[d] for d in domains]
else:
colors = None
scatter_kwargs = {'c': colors}
self.plot_2D_preds(model, feature_dict, outpath, x_true, y_true, target_true, scatter_kwargs)
except KeyError:
pass
# Plot prediction line if features are 1D.
models = [model for model in use_models.keys() if not (model == 'LR')]
plot_1D_features = len(features) == 1 and is_numeric_dtype(df_data[features])
if plot_1D_features:
try:
x_true = df_data[features].to_numpy()
target_true = df_data[targets[0]].to_numpy()
for repetition in repetitions:
model_dict = {}
for modelname in models:
model_dict[modelname] = ML.get_saved_model(modelname, repetition, self.run_dir)
# Get test and train feature and target for this repetition.
CV_col = All_Data.All_Data.name_CV_col(repetition)
train_indices = df_data[CV_col] == 'train'
test_indices = df_data[CV_col] == 'test'
x_train = x_true[train_indices]
target_train = target_true[train_indices]
x_test = x_true[test_indices]
target_test = target_true[test_indices]
data_dict = {
'train data': (x_train, target_train),
'test data': (x_test, target_test),
}
scatter_kwargs = {'markersize': 5}
outpath = os.path.join(self.plot_dir, f'Preds_1D_line_{modelname}_{repetition}.png')
# x_limits = [-2, 2]
# y_limits = [-1.5, 1.5]
# add_fn={'Cbrt': np.cbrt}
x_limits = []
y_limits = []
add_fn = {}
self.plot_1D_preds(model_dict, data_dict, outpath, scatter_kwargs, x_limits=x_limits, y_limits=y_limits, add_fn=add_fn)
except KeyError:
pass
# Parity plot for all test data.
plot_models = use_models.keys()
duplicate_col = 'formula_sc' if 'formula_sc' in df_data.columns else None
log_log = True
for target, model in product(targets, plot_models):
self.parity_plot_all_test_data(target, model, repetitions, domain_colname, duplicate_col=duplicate_col, log_log=log_log)
# Plot uncertainty parity plot.
plot_models = use_models.keys()
duplicate_col = 'formula_sc' if 'formula_sc' in df_data.columns else None
log_log = True
# The function itself checks whether the model has uncertainty and if not it returns.
for target, model in product(targets, plot_models):
self.parity_plot_uncertainty_all_test_data(target, model, repetitions, domain_colname, duplicate_col=duplicate_col, log_log=log_log)
# Plot error over target distribution.
ylim = (0, 1)
duplicate_col = 'formula_sc' if 'formula_sc' in df_data.columns else None
for target, model in product(targets, plot_models):
x = target
log_bars = True if target == 'tc' else False
self.hist_error_over_x(x, target, model, repetitions, domain_colname, duplicate_col=duplicate_col, log_bars=log_bars, ylim=ylim, errortype='SMAPE', uncertainty='sem')
# Plot quality plot with totreldiff.
x = 'totreldiff'
ylim = (0, None)
duplicate_col = 'formula_sc' if 'formula_sc' in df_data.columns else None
if x in df_data:
varying_quality = sum(df_data[x].unique()) > 1
if varying_quality:
for target, model in product(targets, plot_models):
self.hist_error_over_x(x, target, model, repetitions, domain_colname, duplicate_col=duplicate_col, ylim=ylim, errortype='SMAPE', uncertainty='sem')
else:
print('Can\'t plot dataset quality plot.')
# Plot prediction error over elemental prevalence.
score = 'SMAPE'
chem_formula = 'formula_sc'
duplicate_col = 'formula_sc' if 'formula_sc' in df_data.columns else None
log = True
if chem_formula in df_data.columns:
for target, modelname in product(targets, plot_models):
self.score_over_elemental_prevalence(model, target, score, repetitions, domain_colname, chem_formula, duplicate_col, log)
# Plot feature importances.
for target, modelname in product(targets, plot_models):
all_importances = []
for repetition in repetitions:
model = ML.get_saved_model(modelname, repetition, self.run_dir).regressor_['model']
try:
importances = Feature_Importances.get_feature_importances(model)
except AttributeError:
continue
all_importances.append(importances)
if len(all_importances) > 0:
importances = np.mean(all_importances, axis=0)
feat_dir = os.path.join(self.run_dir, 'plots', 'feature_importances')
if not os.path.exists(feat_dir):
os.mkdir(feat_dir)
outpath = os.path.join(feat_dir, f'{modelname}_{target}_feature_importances')
Feature_Importances.plot_feature_importances(importances, features, outpath)
print('Finished with plotting.')
return()
| 55,547 | 43.688656 | 274 | py |
3DSC | 3DSC-main/superconductors_3D/machine_learning/own_libraries/utils/Refactoring.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 22 10:59:22 2021
@author: Timo Sommer
This script contains a class for checking if the output of the Machine_Learning() class is the same as in one reference directory. It is particular useful for automatically checking if the code still does the same after refactoring.
"""
import os
import superconductors_3D.machine_learning.Custom_Machine_Learning_v1_3 as ML
import pandas as pd
import joblib
import pickle
import torch
import filecmp
import warnings
import tempfile
class Refactoring():
"""This class is used for refactoring. It checks if the files in one directory match the files in another comparison directory, except for some things that must change like clocktime.
"""
def __init__(self, cmp_dir):
self.cmp_dir = os.path.abspath(cmp_dir)
self.expected_files = ['All_scores.csv', 'All_values_and_predictions.csv', 'Domain_statistics.csv', 'Numerical results', 'hparams.yml', 'arguments']
self.expected_dirs = ['models', 'plots']
self.out_filename = 'Refacturing_result.txt'
self.skip_files = [self.out_filename]
def check_if_content_as_expected(self):
"""Checks if all of the expected files and directories in the comparison directory are there and if there are no unexpected files. Otherwise just prints messages.
"""
contents = sorted(os.listdir(self.cmp_dir))
expected_contents = sorted(self.expected_files + self.expected_dirs)
too_few = [name for name in expected_contents if not name in contents]
too_much = [name for name in contents if not name in expected_contents]
for content in too_few:
if content in self.skip_files:
continue
self.print(f'Expected but not found: {content}')
for content in too_much:
if content in self.skip_files:
continue
self.print(f'Unexpectedly found: {content}')
return()
def indent(self, text):
space = ' '
text = space + text.replace('\n', '\n' + space)
return(text)
def print(self, text, to_console=True):
"""Prints text to output file. If to_console=True, also prints to console.
"""
outpath = os.path.join(self.new_dir, self.out_filename)
with open(outpath, 'a') as f:
f.write('\n' + text)
if to_console:
print(text)
return()
def check_if_df_is_close(self, filename, d1, d2):
"""Checks if a df is at least close to another df.
"""
f1 = os.path.join(d1, filename)
f2 = os.path.join(d2, filename)
df1, _ = ML.load_df_and_metadata(f1)
df2, _ = ML.load_df_and_metadata(f2)
try:
pd.testing.assert_frame_equal(df1, df2)
is_close = True
error = ''
except AssertionError as e:
is_close = False
error = str(e)
return(is_close, error)
def cmp_Numerical_results(self, filename1, filename2):
"""Compares the files 'Numerical results' in the two directories and returns True if they are equal except for the time.
"""
with open(filename1) as f1:
with open(filename2) as f2:
# Exclude the first line that includes the time of the run.
relevant1 = f1.readlines()[1:]
relevant2 = f2.readlines()[1:]
equal = relevant1 == relevant2
return(equal)
def cmp_pytorch_NN(self, NN1, NN2):
"""If the model is a pytorch model, there will be non deterministic behaviour. Therefore only compare the state_dicts in this case.
"""
params1 = list(NN1.state_dict().values())
params2 = list(NN2.state_dict().values())
same_n_weights = len(params1) == len(params2)
equal = same_n_weights and all([torch.equal(w1, w2) for w1, w2 in zip(params1, params2)])
return equal
def cmp_GP_on_NN_featurizer(self, regr1, regr2):
"""If the model is a GP model that loads a pytorch NN, the attribute that is a path that points to the pytorch model changes between runs. Therefore before comparing these models overwrite these attributes.
"""
# Check NNs of GP_on_NN for equality.
NN1 = regr1.regressor_['model'].NN_featurizer
NN2 = regr2.regressor_['model'].NN_featurizer
equal_NNs = self.cmp_pytorch_NN(NN1, NN2)
# Overwrite NN_stuff of GP_on_NN.
regr1.regressor_['model'].NN_path = ''
regr2.regressor_['model'].NN_path = ''
regr1.regressor_['model'].NN_featurizer = ''
regr2.regressor_['model'].NN_featurizer = ''
regr1.regressor['model'].NN_path = ''
regr2.regressor['model'].NN_path = ''
# Check GP with overriden NN stuff for equality.
with tempfile.NamedTemporaryFile() as tmp_file1:
with tempfile.NamedTemporaryFile() as tmp_file2:
pickle.dump(regr1, open(tmp_file1.name, 'wb'))
pickle.dump(regr2, open(tmp_file2.name, 'wb'))
equal_GPs = filecmp.cmp(tmp_file1.name, tmp_file2.name, shallow=True)
equal = equal_NNs and equal_GPs
return equal
def cmp_joblib(self, filename1, filename2):
"""Compare joblib files.
"""
regr1 = joblib.load(filename1)
regr2 = joblib.load(filename2)
try:
NN1 = regr1.regressor_['model'].trainer
NN2 = regr2.regressor_['model'].trainer
try:
# pytorch model
equal = self.cmp_pytorch_NN(NN1, NN2)
except AttributeError:
# pytorch lightning model
NN1 = NN1.model
NN2 = NN2.model
equal = self.cmp_pytorch_NN(NN1, NN2)
except AttributeError:
try:
equal = self.cmp_GP_on_NN_featurizer(regr1, regr2)
except AttributeError:
equal = filecmp.cmp(filename1, filename2, shallow=True)
return(equal)
def cmp_files(self, filename, d1, d2):
"""Compares if two files are the same. In some cases, it is clear that files cannot be the same after refacturing, in this case this function compares only what can be compared.
"""
pkl_files = ['.pkl', '.joblib']
f1 = os.path.join(d1, filename)
f2 = os.path.join(d2, filename)
if filename == 'Numerical results':
equal = self.cmp_Numerical_results(f1, f2)
elif any([filename.endswith(pkl_file) for pkl_file in pkl_files]):
equal = self.cmp_joblib(f1, f2)
else:
equal = filecmp.cmp(f1, f2, shallow=False)
return(equal)
def not_equal(self, filename, new_path, cmp_path):
"""Executed if files are not equal. Prints the issues.
"""
self.print(f'\nFile not equal: {filename}')
# If file is df, check if dfs are at least close.
if filename.endswith('.csv'):
is_close, error = self.check_if_df_is_close(filename, new_path, cmp_path)
if is_close:
self.print('--> But dfs are close.')
else:
self.print('--> Dataframes are not even close.')
print(f'--> See file {self.out_filename} for further information.')
error = self.indent(error)
self.print(f'--> Error:\n{error}', to_console=False)
def both_exist(self, filename, d1, d2):
f1 = os.path.join(d1, filename)
f2 = os.path.join(d2, filename)
exist = True
for f in (f1, f2):
if not os.path.exists(f):
self.print(f'File {f} doesn\'t exist.')
exist = False
return(exist)
def check(self, new_dir):
"""Checks if the original `cmp_dir` matches the new directory `new_dir` in all important files. If yes, refacturing was successfull.
"""
self.new_dir = os.path.abspath(new_dir)
self.print(f'\nCheck refactoring against comparison directory {self.cmp_dir}.')
if not os.path.exists(self.cmp_dir):
warnings.warn(f'Comparison directory {self.cmp_dir} doesn\'t exist!\n Exiting without check.')
return(False)
self.check_if_content_as_expected()
# Check every file if they are equal and otherwise print message.
dirs_equal = True
for new_path, _, new_files in os.walk(self.new_dir):
# New path including subdirectories of the cmp directory.
cmp_path = new_path.replace(self.new_dir, self.cmp_dir)
for filename in new_files:
if filename in self.skip_files or not self.both_exist(filename, new_path, cmp_path):
continue
equal = self.cmp_files(filename, new_path, cmp_path)
if equal and filename in self.expected_files:
print(f'File equal: {filename}')
if not equal:
dirs_equal = False
self.not_equal(filename, new_path, cmp_path)
if dirs_equal:
self.print('\nAll relevant files of the directories are equal.\nRefactoring successfull!\n')
else:
self.print('\nThere are some relevant files that are not equal.\nRefactoring not successfull!\n')
return(dirs_equal) | 9,634 | 42.795455 | 232 | py |
3DSC | 3DSC-main/superconductors_3D/machine_learning/own_libraries/utils/Models.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 22 14:12:31 2021
@author: Timo Sommer
This script is a collection of classes for saving and loading models of the ML script.
"""
from megnet.models import MEGNetModel
import os
import pickle
import io
def get_modelpath(outdir, modelname, repetition):
"""Returns path without extension where a specific model would be saved. Used internally and externally. Outdir is the directory of the whole run with a subdirectory 'models'.
"""
filename = f'{modelname}_{repetition}'
path = os.path.join(outdir, 'models', filename)
return(path)
def regressor_from_pipeline(pipe):
"""Returns the ML model from a given sklearn Pipeline or TransformedTargetRegressor.
"""
return pipe.regressor_['model']
class Pickle_tf():
"""Use this class to pickle tensorflow models that cannot be pickled normally because of some ThreadLock error.
"""
def __init__(self):
pass
def model_is_MEGNet(self, modelpath):
"""Checks by the modelpath if the model is a MEGNET model.
"""
tf_filename = self.get_tf_filepath(modelpath)
try:
MEGNetModel.from_file(tf_filename)
is_MEGNet = True
except FileNotFoundError:
is_MEGNet = False
return is_MEGNet
def get_tf_filepath(self, modelpath):
"""Turns the pickle path into the tf model path.
"""
return os.path.splitext(modelpath)[0] + '.hdf5'
def save(self, regr, modelpath):
"""Saves a tf or MEGNet model. Part of the sklearn pipeline is saved as pickle and the not-pickable tf part is saved as tf model.
"""
tf_filename = self.get_tf_filepath(modelpath)
try:
# MEGNet
tf_model = regressor_from_pipeline(regr).model
# Save non-pickable part as hd5.
tf_model.save_model(filename=tf_filename)
except AttributeError as e:
# Other tf models
raise NotImplementedError(f'Not implemented for other model than MEGNet. Error message: {e}')
# Remove non-pickable part from regressor.
regressor_from_pipeline(regr).model = None
# Pickle regressor.
with open(modelpath, 'wb') as f:
pickle.dump(regr, f)
# Put non-pickable model back into pipeline to not change regr from the outside. Somehow using deepcopy doesn't work.
regressor_from_pipeline(regr).model = tf_model
def load(self, modelpath):
"""Loads a tf model composed of pickled regressor/pipeline and tf_model saved as hdf5.
"""
regr = Models().load_pickle(modelpath)
tf_filepath = self.get_tf_filepath(modelpath)
try:
# MEGNet
tf_model = MEGNetModel.from_file(tf_filepath)
regressor_from_pipeline(regr).model = tf_model
except AttributeError as e:
# Other tf models
raise NotImplementedError(f'Not implemented for other model than MEGNet. Error message: {e}')
return regr
class Models():
"""Class for saving and loading ML models.
"""
def __init__(self):
pass
def save(self, regr, rundir, modelname, repetition, save_torch_statedict=False):
"""Saves model as pickle and if possible as pytorch models.
"""
filename = get_modelpath(rundir, modelname, repetition)
outpath = filename + '.pkl'
try:
with open(outpath, 'wb') as f:
pickle.dump(regr, f)
except (TypeError, AttributeError) as e:
# tf or keras models must be saved seperately because they cannot be pickled.
if 'pickle' in str(e):
Pickle_tf().save(regr=regr, modelpath=outpath)
else:
raise TypeError(e)
if save_torch_statedict:
outpath = filename + '.pt'
try:
model = regressor_from_pipeline(regr)
import torch
torch.save(model.trainer.state_dict(), outpath)
except AttributeError:
pass
return()
def load_pickle(self, modelpath):
"""Loads a pickled model from the path.
"""
with open(modelpath, 'rb') as f:
model = CPU_Unpickler(f).load()
return model
def load_from_path(self, modelpath):
"""Loads a model from the given path with .pkl extension and returns it. Can also load tensorflow models that were not simply pickled.
"""
model_is_MEGNet = Pickle_tf().model_is_MEGNet(modelpath)
if model_is_MEGNet:
model = Pickle_tf().load(modelpath)
else:
model = self.load_pickle(modelpath)
return model
def load(self, modelname: str, repetition: int, rundir: str, regressor: bool=False):
"""Loads a model with given name and repetition from rundir. If `regressor==True` the returned model is not the whole sklearn pipeline/regressor but only the fitted ML model.
"""
modelpath = get_modelpath(rundir, modelname, repetition) + '.pkl'
model = self.load_from_path(modelpath)
if regressor:
# Get regressor model from pipeline
model = regressor_from_pipeline(model)
return(model)
class CPU_Unpickler(pickle.Unpickler):
"""Use this for unpickling instead of pickle.load(f) to be able to load a model even if it was saved on a gpu.
"""
def find_class(self, module, name):
if module == 'torch.storage' and name == '_load_from_bytes':
return lambda b: torch.load(io.BytesIO(b), map_location='cpu')
else: return super().find_class(module, name) | 5,903 | 35.670807 | 182 | py |
MatchZoo | MatchZoo-master/setup.py | import io
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
# Avoids IDE errors, but actual version is read from version.py
__version__ = None
exec(open('matchzoo/version.py').read())
short_description = 'Facilitating the design, comparison and sharing of deep text matching models.'
# Get the long description from the README file
with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
install_requires = [
'keras >= 2.3.0',
'nltk >= 3.2.3',
'numpy >= 1.14',
'tqdm >= 4.19.4',
'dill >= 0.2.7.1',
'pandas >= 0.23.1',
'networkx >= 2.1',
'h5py >= 2.8.0',
'hyperopt >= 0.1.1'
]
extras_requires = {
'tests': [
'coverage >= 4.3.4',
'codecov >= 2.0.15',
'pytest >= 3.0.3',
'pytest-cov >= 2.4.0',
'flake8 >= 3.6.0',
'flake8_docstrings >= 1.0.2'],
}
setup(
name="MatchZoo",
version=__version__,
author="Yixing Fan, Bo Wang, Zeyi Wang, Liang Pang, Liu Yang, Qinghua Wang, etc.",
author_email="fanyixing@ict.ac.cn",
description=(short_description),
license="Apache 2.0",
keywords="text matching models",
url="https://github.com/NTMC-Community/MatchZoo",
packages=find_packages(),
long_description=long_description,
long_description_content_type='text/markdown',
classifiers=[
"Development Status :: 3 - Alpha",
'Environment :: Console',
'Operating System :: POSIX :: Linux',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
"License :: OSI Approved :: Apache Software License",
'Programming Language :: Python :: 3.6'
],
install_requires=install_requires,
extras_require=extras_requires
)
| 1,799 | 26.692308 | 99 | py |
MatchZoo | MatchZoo-master/matchzoo/__init__.py | from pathlib import Path
USER_DIR = Path.expanduser(Path('~')).joinpath('.matchzoo')
if not USER_DIR.exists():
USER_DIR.mkdir()
USER_DATA_DIR = USER_DIR.joinpath('datasets')
if not USER_DATA_DIR.exists():
USER_DATA_DIR.mkdir()
USER_TUNED_MODELS_DIR = USER_DIR.joinpath('tuned_models')
from .version import __version__
from .data_pack import DataPack
from .data_pack import pack
from .data_pack import load_data_pack
from . import metrics
from . import tasks
from . import preprocessors
from . import data_generator
from .data_generator import DataGenerator
from .data_generator import DataGeneratorBuilder
from .preprocessors.chain_transform import chain_transform
from . import metrics
from . import losses
from . import engine
from . import models
from . import embedding
from . import datasets
from . import layers
from . import auto
from . import contrib
from .engine import hyper_spaces
from .engine.base_model import load_model
from .engine.base_preprocessor import load_preprocessor
from .engine import callbacks
from .engine.param import Param
from .engine.param_table import ParamTable
from .embedding.embedding import Embedding
from .utils import one_hot, make_keras_optimizer_picklable
from .preprocessors.build_unit_from_data_pack import build_unit_from_data_pack
from .preprocessors.build_vocab_unit import build_vocab_unit
# deprecated, should be removed in v2.2
from .contrib.legacy_data_generator import DPoolDataGenerator
from .contrib.legacy_data_generator import DPoolPairDataGenerator
from .contrib.legacy_data_generator import HistogramDataGenerator
from .contrib.legacy_data_generator import HistogramPairDataGenerator
from .contrib.legacy_data_generator import DynamicDataGenerator
from .contrib.legacy_data_generator import PairDataGenerator
| 1,785 | 30.333333 | 78 | py |
MatchZoo | MatchZoo-master/matchzoo/models/cdssm.py | """An implementation of CDSSM (CLSM) model."""
import typing
import keras
from keras.models import Model
from matchzoo.engine.base_model import BaseModel
from matchzoo.engine.param import Param
from matchzoo.engine.param_table import ParamTable
from matchzoo import preprocessors
from matchzoo.utils import TensorType
class CDSSM(BaseModel):
"""
CDSSM Model implementation.
Learning Semantic Representations Using Convolutional Neural Networks
for Web Search. (2014a)
A Latent Semantic Model with Convolutional-Pooling Structure for
Information Retrieval. (2014b)
Examples:
>>> model = CDSSM()
>>> model.params['optimizer'] = 'adam'
>>> model.params['filters'] = 32
>>> model.params['kernel_size'] = 3
>>> model.params['conv_activation_func'] = 'relu'
>>> model.guess_and_fill_missing_params(verbose=0)
>>> model.build()
"""
@classmethod
def get_default_params(cls) -> ParamTable:
""":return: model default parameters."""
# set :attr:`with_multi_layer_perceptron` to False to support
# user-defined variable dense layer units
params = super().get_default_params(with_multi_layer_perceptron=True)
params.add(Param(name='filters', value=32,
desc="Number of filters in the 1D convolution "
"layer."))
params.add(Param(name='kernel_size', value=3,
desc="Number of kernel size in the 1D "
"convolution layer."))
params.add(Param(name='strides', value=1,
desc="Strides in the 1D convolution layer."))
params.add(Param(name='padding', value='same',
desc="The padding mode in the convolution "
"layer. It should be one of `same`, "
"`valid`, ""and `causal`."))
params.add(Param(name='conv_activation_func', value='relu',
desc="Activation function in the convolution"
" layer."))
params.add(Param(name='w_initializer', value='glorot_normal'))
params.add(Param(name='b_initializer', value='zeros'))
params.add(Param(name='dropout_rate', value=0.3,
desc="The dropout rate."))
return params
def _create_base_network(self) -> typing.Callable:
"""
Apply conv and maxpooling operation towards to each letter-ngram.
The input shape is `fixed_text_length`*`number of letter-ngram`,
as described in the paper, `n` is 3, `number of letter-trigram`
is about 30,000 according to their observation.
:return: Wrapped Keras `Layer` as CDSSM network, tensor in tensor out.
"""
def _wrapper(x: TensorType):
# Apply 1d convolutional on each word_ngram (lt).
# Input shape: (batch_size, num_tri_letters, 90000)
# Sequence of num_tri_letters vectors of 90000d vectors.
x = keras.layers.Conv1D(
filters=self._params['filters'],
kernel_size=self._params['kernel_size'],
strides=self._params['strides'],
padding=self._params['padding'],
activation=self._params['conv_activation_func'],
kernel_initializer=self._params['w_initializer'],
bias_initializer=self._params['b_initializer'])(x)
# Apply max pooling by take max at each dimension across
# all word_trigram features.
x = keras.layers.Dropout(self._params['dropout_rate'])(x)
x = keras.layers.GlobalMaxPool1D()(x)
# Apply a none-linear transformation use a tanh layer.
x = self._make_multi_layer_perceptron_layer()(x)
return x
return _wrapper
def build(self):
"""
Build model structure.
CDSSM use Siamese architecture.
"""
base_network = self._create_base_network()
# Left input and right input.
input_left, input_right = self._make_inputs()
# Process left & right input.
x = [base_network(input_left),
base_network(input_right)]
# Dot product with cosine similarity.
x = keras.layers.Dot(axes=[1, 1], normalize=True)(x)
x_out = self._make_output_layer()(x)
self._backend = Model(inputs=[input_left, input_right],
outputs=x_out)
@classmethod
def get_default_preprocessor(cls):
""":return: Default preprocessor."""
return preprocessors.CDSSMPreprocessor()
def guess_and_fill_missing_params(self, verbose: int = 1):
"""
Guess and fill missing parameters in :attr:`params`.
Use this method to automatically fill-in hyper parameters.
This involves some guessing so the parameter it fills could be
wrong. For example, the default task is `Ranking`, and if we do not
set it to `Classification` manually for data packs prepared for
classification, then the shape of the model output and the data will
mismatch.
:param verbose: Verbosity.
"""
self._params.get('input_shapes').set_default([(10, 30),
(10, 30)], verbose)
super().guess_and_fill_missing_params(verbose)
| 5,450 | 39.984962 | 78 | py |
MatchZoo | MatchZoo-master/matchzoo/models/drmm.py | """An implementation of DRMM Model."""
import typing
import keras
import keras.backend as K
import tensorflow as tf
from matchzoo.engine.base_model import BaseModel
from matchzoo.engine.param import Param
from matchzoo.engine.param_table import ParamTable
class DRMM(BaseModel):
"""
DRMM Model.
Examples:
>>> model = DRMM()
>>> model.params['mlp_num_layers'] = 1
>>> model.params['mlp_num_units'] = 5
>>> model.params['mlp_num_fan_out'] = 1
>>> model.params['mlp_activation_func'] = 'tanh'
>>> model.guess_and_fill_missing_params(verbose=0)
>>> model.build()
>>> model.compile()
"""
@classmethod
def get_default_params(cls) -> ParamTable:
""":return: model default parameters."""
params = super().get_default_params(with_embedding=True,
with_multi_layer_perceptron=True)
params.add(Param(name='mask_value', value=-1,
desc="The value to be masked from inputs."))
params['optimizer'] = 'adam'
params['input_shapes'] = [(5,), (5, 30,)]
return params
def build(self):
"""Build model structure."""
# Scalar dimensions referenced here:
# B = batch size (number of sequences)
# D = embedding size
# L = `input_left` sequence length
# R = `input_right` sequence length
# H = histogram size
# K = size of top-k
# Left input and right input.
# query: shape = [B, L]
# doc: shape = [B, L, H]
# Note here, the doc is the matching histogram between original query
# and original document.
query = keras.layers.Input(
name='text_left',
shape=self._params['input_shapes'][0]
)
match_hist = keras.layers.Input(
name='match_histogram',
shape=self._params['input_shapes'][1]
)
embedding = self._make_embedding_layer()
# Process left input.
# shape = [B, L, D]
embed_query = embedding(query)
# shape = [B, L]
atten_mask = tf.not_equal(query, self._params['mask_value'])
# shape = [B, L]
atten_mask = tf.cast(atten_mask, K.floatx())
# shape = [B, L, D]
atten_mask = tf.expand_dims(atten_mask, axis=2)
# shape = [B, L, D]
attention_probs = self.attention_layer(embed_query, atten_mask)
# Process right input.
# shape = [B, L, 1]
dense_output = self._make_multi_layer_perceptron_layer()(match_hist)
# shape = [B, 1, 1]
dot_score = keras.layers.Dot(axes=[1, 1])(
[attention_probs, dense_output])
flatten_score = keras.layers.Flatten()(dot_score)
x_out = self._make_output_layer()(flatten_score)
self._backend = keras.Model(inputs=[query, match_hist], outputs=x_out)
@classmethod
def attention_layer(cls, attention_input: typing.Any,
attention_mask: typing.Any = None
) -> keras.layers.Layer:
"""
Performs attention on the input.
:param attention_input: The input tensor for attention layer.
:param attention_mask: A tensor to mask the invalid values.
:return: The masked output tensor.
"""
# shape = [B, L, 1]
dense_input = keras.layers.Dense(1, use_bias=False)(attention_input)
if attention_mask is not None:
# Since attention_mask is 1.0 for positions we want to attend and
# 0.0 for masked positions, this operation will create a tensor
# which is 0.0 for positions we want to attend and -10000.0 for
# masked positions.
# shape = [B, L, 1]
dense_input = keras.layers.Lambda(
lambda x: x + (1.0 - attention_mask) * -10000.0,
name="attention_mask"
)(dense_input)
# shape = [B, L, 1]
attention_probs = keras.layers.Lambda(
lambda x: tf.nn.softmax(x, axis=1),
output_shape=lambda s: (s[0], s[1], s[2]),
name="attention_probs"
)(dense_input)
return attention_probs
| 4,248 | 33.827869 | 78 | py |
MatchZoo | MatchZoo-master/matchzoo/models/duet.py | """DUET Model."""
import keras
import tensorflow as tf
from matchzoo.engine import hyper_spaces
from matchzoo.engine.base_model import BaseModel
from matchzoo.engine.param import Param
class DUET(BaseModel):
"""
DUET Model.
Examples:
>>> model = DUET()
>>> model.params['embedding_input_dim'] = 1000
>>> model.params['embedding_output_dim'] = 300
>>> model.params['lm_filters'] = 32
>>> model.params['lm_hidden_sizes'] = [64, 32]
>>> model.params['dropout_rate'] = 0.5
>>> model.params['dm_filters'] = 32
>>> model.params['dm_kernel_size'] = 3
>>> model.params['dm_d_mpool'] = 4
>>> model.params['dm_hidden_sizes'] = [64, 32]
>>> model.guess_and_fill_missing_params(verbose=0)
>>> model.build()
"""
@classmethod
def get_default_params(cls):
"""Get default parameters."""
params = super().get_default_params(with_embedding=True)
params.add(Param(name='lm_filters', value=32,
desc="Filter size of 1D convolution layer in "
"the local model."))
params.add(Param(name='lm_hidden_sizes', value=[32],
desc="A list of hidden size of the MLP layer "
"in the local model."))
params.add(Param(name='dm_filters', value=32,
desc="Filter size of 1D convolution layer in "
"the distributed model."))
params.add(Param(name='dm_kernel_size', value=3,
desc="Kernel size of 1D convolution layer in "
"the distributed model."))
params.add(Param(name='dm_q_hidden_size', value=32,
desc="Hidden size of the MLP layer for the "
"left text in the distributed model."))
params.add(Param(name='dm_d_mpool', value=3,
desc="Max pooling size for the right text in "
"the distributed model."))
params.add(Param(name='dm_hidden_sizes', value=[32],
desc="A list of hidden size of the MLP layer "
"in the distributed model."))
params.add(Param(name='padding', value='same',
desc="The padding mode in the convolution "
"layer. It should be one of `same`, "
"`valid`, ""and `causal`."))
params.add(Param(name='activation_func', value='relu',
desc="Activation function in the convolution"
" layer."))
params.add(Param(
name='dropout_rate', value=0.5,
hyper_space=hyper_spaces.quniform(low=0.0, high=0.8,
q=0.02),
desc="The dropout rate."))
return params
def build(self):
"""Build model."""
query, doc = self._make_inputs()
embedding = self._make_embedding_layer()
q_embed = embedding(query)
d_embed = embedding(doc)
lm_xor = keras.layers.Lambda(self._xor_match)([query, doc])
lm_conv = keras.layers.Conv1D(
self._params['lm_filters'],
self._params['input_shapes'][1][0],
padding=self._params['padding'],
activation=self._params['activation_func']
)(lm_xor)
lm_conv = keras.layers.Dropout(self._params['dropout_rate'])(
lm_conv)
lm_feat = keras.layers.Reshape((-1,))(lm_conv)
for hidden_size in self._params['lm_hidden_sizes']:
lm_feat = keras.layers.Dense(
hidden_size,
activation=self._params['activation_func']
)(lm_feat)
lm_drop = keras.layers.Dropout(self._params['dropout_rate'])(
lm_feat)
lm_score = keras.layers.Dense(1)(lm_drop)
dm_q_conv = keras.layers.Conv1D(
self._params['dm_filters'],
self._params['dm_kernel_size'],
padding=self._params['padding'],
activation=self._params['activation_func']
)(q_embed)
dm_q_conv = keras.layers.Dropout(self._params['dropout_rate'])(
dm_q_conv)
dm_q_mp = keras.layers.MaxPooling1D(
pool_size=self._params['input_shapes'][0][0])(dm_q_conv)
dm_q_rep = keras.layers.Reshape((-1,))(dm_q_mp)
dm_q_rep = keras.layers.Dense(self._params['dm_q_hidden_size'])(
dm_q_rep)
dm_q_rep = keras.layers.Lambda(lambda x: tf.expand_dims(x, 1))(
dm_q_rep)
dm_d_conv1 = keras.layers.Conv1D(
self._params['dm_filters'],
self._params['dm_kernel_size'],
padding=self._params['padding'],
activation=self._params['activation_func']
)(d_embed)
dm_d_conv1 = keras.layers.Dropout(self._params['dropout_rate'])(
dm_d_conv1)
dm_d_mp = keras.layers.MaxPooling1D(
pool_size=self._params['dm_d_mpool'])(dm_d_conv1)
dm_d_conv2 = keras.layers.Conv1D(
self._params['dm_filters'], 1,
padding=self._params['padding'],
activation=self._params['activation_func']
)(dm_d_mp)
dm_d_conv2 = keras.layers.Dropout(self._params['dropout_rate'])(
dm_d_conv2)
h_dot = keras.layers.Lambda(self._hadamard_dot)([dm_q_rep, dm_d_conv2])
dm_feat = keras.layers.Reshape((-1,))(h_dot)
for hidden_size in self._params['dm_hidden_sizes']:
dm_feat = keras.layers.Dense(hidden_size)(dm_feat)
dm_feat_drop = keras.layers.Dropout(self._params['dropout_rate'])(
dm_feat)
dm_score = keras.layers.Dense(1)(dm_feat_drop)
add = keras.layers.Add()([lm_score, dm_score])
out = self._make_output_layer()(add)
self._backend = keras.Model(inputs=[query, doc], outputs=out)
@classmethod
def _xor_match(cls, x):
t1 = x[0]
t2 = x[1]
t1_shape = t1.get_shape()
t2_shape = t2.get_shape()
t1_expand = tf.stack([t1] * t2_shape[1], 2)
t2_expand = tf.stack([t2] * t1_shape[1], 1)
out_bool = tf.equal(t1_expand, t2_expand)
out = tf.cast(out_bool, tf.float32)
return out
@classmethod
def _hadamard_dot(cls, x):
x1 = x[0]
x2 = x[1]
out = x1 * x2
return out
| 6,502 | 39.141975 | 79 | py |
MatchZoo | MatchZoo-master/matchzoo/models/conv_knrm.py | """ConvKNRM model."""
import keras
import tensorflow as tf
from .knrm import KNRM
from matchzoo.engine.param import Param
class ConvKNRM(KNRM):
"""
ConvKNRM model.
Examples:
>>> model = ConvKNRM()
>>> model.params['embedding_input_dim'] = 10000
>>> model.params['embedding_output_dim'] = 300
>>> model.params['embedding_trainable'] = True
>>> model.params['filters'] = 128
>>> model.params['conv_activation_func'] = 'tanh'
>>> model.params['max_ngram'] = 3
>>> model.params['use_crossmatch'] = True
>>> model.params['kernel_num'] = 11
>>> model.params['sigma'] = 0.1
>>> model.params['exact_sigma'] = 0.001
>>> model.guess_and_fill_missing_params(verbose=0)
>>> model.build()
"""
def get_default_params(cls):
"""Get default parameters."""
params = super().get_default_params()
params.add(Param(name='filters', value=128,
desc="The filter size in the convolution"
" layer."))
params.add(Param(name='conv_activation_func', value='relu',
desc="The activation function in the "
"convolution layer."))
params.add(Param(name='max_ngram', value=3,
desc="The maximum length of n-grams for the "
"convolution layer."))
params.add(Param(name='use_crossmatch', value=True,
desc="Whether to match left n-grams and right "
"n-grams of different lengths"))
return params
def build(self):
"""Build model."""
query, doc = self._make_inputs()
embedding = self._make_embedding_layer()
q_embed = embedding(query)
d_embed = embedding(doc)
q_convs = []
d_convs = []
for i in range(self._params['max_ngram']):
c = keras.layers.Conv1D(
self._params['filters'], i + 1,
activation=self._params['conv_activation_func'],
padding='same'
)
q_convs.append(c(q_embed))
d_convs.append(c(d_embed))
KM = []
for qi in range(self._params['max_ngram']):
for di in range(self._params['max_ngram']):
# do not match n-gram with different length if use crossmatch
if not self._params['use_crossmatch'] and qi != di:
continue
q_ngram = q_convs[qi]
d_ngram = d_convs[di]
mm = keras.layers.Dot(axes=[2, 2],
normalize=True)([q_ngram, d_ngram])
for i in range(self._params['kernel_num']):
mu = 1. / (self._params['kernel_num'] - 1) + (2. * i) / (
self._params['kernel_num'] - 1) - 1.0
sigma = self._params['sigma']
if mu > 1.0:
sigma = self._params['exact_sigma']
mu = 1.0
mm_exp = self._kernel_layer(mu, sigma)(mm)
mm_doc_sum = keras.layers.Lambda(
lambda x: tf.reduce_sum(x, 2))(
mm_exp)
mm_log = keras.layers.Activation(tf.math.log1p)(mm_doc_sum)
mm_sum = keras.layers.Lambda(
lambda x: tf.reduce_sum(x, 1))(mm_log)
KM.append(mm_sum)
phi = keras.layers.Lambda(lambda x: tf.stack(x, 1))(KM)
out = self._make_output_layer()(phi)
self._backend = keras.Model(inputs=[query, doc], outputs=[out])
| 3,736 | 37.132653 | 79 | py |
MatchZoo | MatchZoo-master/matchzoo/models/dssm.py | """An implementation of DSSM, Deep Structured Semantic Model."""
from keras.models import Model
from keras.layers import Input, Dot
from matchzoo.engine.param_table import ParamTable
from matchzoo.engine.base_model import BaseModel
from matchzoo import preprocessors
class DSSM(BaseModel):
"""
Deep structured semantic model.
Examples:
>>> model = DSSM()
>>> model.params['mlp_num_layers'] = 3
>>> model.params['mlp_num_units'] = 300
>>> model.params['mlp_num_fan_out'] = 128
>>> model.params['mlp_activation_func'] = 'relu'
>>> model.guess_and_fill_missing_params(verbose=0)
>>> model.build()
"""
@classmethod
def get_default_params(cls) -> ParamTable:
""":return: model default parameters."""
params = super().get_default_params(with_multi_layer_perceptron=True)
return params
def build(self):
"""
Build model structure.
DSSM use Siamese arthitecture.
"""
dim_triletter = self._params['input_shapes'][0][0]
input_shape = (dim_triletter,)
base_network = self._make_multi_layer_perceptron_layer()
# Left input and right input.
input_left = Input(name='text_left', shape=input_shape)
input_right = Input(name='text_right', shape=input_shape)
# Process left & right input.
x = [base_network(input_left),
base_network(input_right)]
# Dot product with cosine similarity.
x = Dot(axes=[1, 1], normalize=True)(x)
x_out = self._make_output_layer()(x)
self._backend = Model(
inputs=[input_left, input_right],
outputs=x_out)
@classmethod
def get_default_preprocessor(cls):
""":return: Default preprocessor."""
return preprocessors.DSSMPreprocessor()
| 1,847 | 31.421053 | 77 | py |
MatchZoo | MatchZoo-master/matchzoo/models/match_pyramid.py | """An implementation of MatchPyramid Model."""
import typing
import keras
import matchzoo
from matchzoo.engine.base_model import BaseModel
from matchzoo.engine.param import Param
from matchzoo.engine.param_table import ParamTable
from matchzoo.engine import hyper_spaces
class MatchPyramid(BaseModel):
"""
MatchPyramid Model.
Examples:
>>> model = MatchPyramid()
>>> model.params['embedding_output_dim'] = 300
>>> model.params['num_blocks'] = 2
>>> model.params['kernel_count'] = [16, 32]
>>> model.params['kernel_size'] = [[3, 3], [3, 3]]
>>> model.params['dpool_size'] = [3, 10]
>>> model.guess_and_fill_missing_params(verbose=0)
>>> model.build()
"""
@classmethod
def get_default_params(cls) -> ParamTable:
""":return: model default parameters."""
params = super().get_default_params(with_embedding=True)
params.add(Param(name='num_blocks', value=1,
desc="Number of convolution blocks."))
params.add(Param(name='kernel_count', value=[32],
desc="The kernel count of the 2D convolution "
"of each block."))
params.add(Param(name='kernel_size', value=[[3, 3]],
desc="The kernel size of the 2D convolution "
"of each block."))
params.add(Param(name='activation', value='relu',
desc="The activation function."))
params.add(Param(name='dpool_size', value=[3, 10],
desc="The max-pooling size of each block."))
params.add(Param(
name='padding', value='same',
desc="The padding mode in the convolution layer."
))
params.add(Param(
name='dropout_rate', value=0.0,
hyper_space=hyper_spaces.quniform(low=0.0, high=0.8,
q=0.01),
desc="The dropout rate."
))
return params
def build(self):
"""
Build model structure.
MatchPyramid text matching as image recognition.
"""
input_left, input_right = self._make_inputs()
input_dpool_index = keras.layers.Input(
name='dpool_index',
shape=[self._params['input_shapes'][0][0],
self._params['input_shapes'][1][0],
2],
dtype='int32')
embedding = self._make_embedding_layer()
embed_left = embedding(input_left)
embed_right = embedding(input_right)
# Interaction
matching_layer = matchzoo.layers.MatchingLayer(matching_type='dot')
embed_cross = matching_layer([embed_left, embed_right])
for i in range(self._params['num_blocks']):
embed_cross = self._conv_block(
embed_cross,
self._params['kernel_count'][i],
self._params['kernel_size'][i],
self._params['padding'],
self._params['activation']
)
# Dynamic Pooling
dpool_layer = matchzoo.layers.DynamicPoolingLayer(
*self._params['dpool_size'])
embed_pool = dpool_layer([embed_cross, input_dpool_index])
embed_flat = keras.layers.Flatten()(embed_pool)
x = keras.layers.Dropout(rate=self._params['dropout_rate'])(embed_flat)
inputs = [input_left, input_right, input_dpool_index]
x_out = self._make_output_layer()(x)
self._backend = keras.Model(inputs=inputs, outputs=x_out)
@classmethod
def _conv_block(
cls, x,
kernel_count: int,
kernel_size: int,
padding: str,
activation: str
) -> typing.Any:
output = keras.layers.Conv2D(kernel_count,
kernel_size,
padding=padding,
activation=activation)(x)
return output
| 4,014 | 34.530973 | 79 | py |
MatchZoo | MatchZoo-master/matchzoo/models/arci.py | """An implementation of ArcI Model."""
import typing
import keras
from matchzoo.engine.base_model import BaseModel
from matchzoo.engine.param import Param
from matchzoo.engine.param_table import ParamTable
from matchzoo.engine import hyper_spaces
class ArcI(BaseModel):
"""
ArcI Model.
Examples:
>>> model = ArcI()
>>> model.params['num_blocks'] = 1
>>> model.params['left_filters'] = [32]
>>> model.params['right_filters'] = [32]
>>> model.params['left_kernel_sizes'] = [3]
>>> model.params['right_kernel_sizes'] = [3]
>>> model.params['left_pool_sizes'] = [2]
>>> model.params['right_pool_sizes'] = [4]
>>> model.params['conv_activation_func'] = 'relu'
>>> model.params['mlp_num_layers'] = 1
>>> model.params['mlp_num_units'] = 64
>>> model.params['mlp_num_fan_out'] = 32
>>> model.params['mlp_activation_func'] = 'relu'
>>> model.params['dropout_rate'] = 0.5
>>> model.guess_and_fill_missing_params(verbose=0)
>>> model.build()
"""
@classmethod
def get_default_params(cls) -> ParamTable:
""":return: model default parameters."""
params = super().get_default_params(
with_embedding=True,
with_multi_layer_perceptron=True
)
params['optimizer'] = 'adam'
params.add(Param(name='num_blocks', value=1,
desc="Number of convolution blocks."))
params.add(Param(name='left_filters', value=[32],
desc="The filter size of each convolution "
"blocks for the left input."))
params.add(Param(name='left_kernel_sizes', value=[3],
desc="The kernel size of each convolution "
"blocks for the left input."))
params.add(Param(name='right_filters', value=[32],
desc="The filter size of each convolution "
"blocks for the right input."))
params.add(Param(name='right_kernel_sizes', value=[3],
desc="The kernel size of each convolution "
"blocks for the right input."))
params.add(Param(name='conv_activation_func', value='relu',
desc="The activation function in the "
"convolution layer."))
params.add(Param(name='left_pool_sizes', value=[2],
desc="The pooling size of each convolution "
"blocks for the left input."))
params.add(Param(name='right_pool_sizes', value=[2],
desc="The pooling size of each convolution "
"blocks for the right input."))
params.add(Param(
name='padding',
value='same',
hyper_space=hyper_spaces.choice(
['same', 'valid', 'causal']),
desc="The padding mode in the convolution layer. It should be one"
"of `same`, `valid`, and `causal`."
))
params.add(Param(
'dropout_rate', 0.0,
hyper_space=hyper_spaces.quniform(
low=0.0, high=0.8, q=0.01),
desc="The dropout rate."
))
return params
def build(self):
"""
Build model structure.
ArcI use Siamese arthitecture.
"""
input_left, input_right = self._make_inputs()
embedding = self._make_embedding_layer()
embed_left = embedding(input_left)
embed_right = embedding(input_right)
for i in range(self._params['num_blocks']):
embed_left = self._conv_pool_block(
embed_left,
self._params['left_filters'][i],
self._params['left_kernel_sizes'][i],
self._params['padding'],
self._params['conv_activation_func'],
self._params['left_pool_sizes'][i]
)
embed_right = self._conv_pool_block(
embed_right,
self._params['right_filters'][i],
self._params['right_kernel_sizes'][i],
self._params['padding'],
self._params['conv_activation_func'],
self._params['right_pool_sizes'][i]
)
rep_left = keras.layers.Flatten()(embed_left)
rep_right = keras.layers.Flatten()(embed_right)
concat = keras.layers.Concatenate(axis=1)([rep_left, rep_right])
dropout = keras.layers.Dropout(
rate=self._params['dropout_rate'])(concat)
mlp = self._make_multi_layer_perceptron_layer()(dropout)
inputs = [input_left, input_right]
x_out = self._make_output_layer()(mlp)
self._backend = keras.Model(inputs=inputs, outputs=x_out)
def _conv_pool_block(
self,
input_: typing.Any,
filters: int,
kernel_size: int,
padding: str,
conv_activation_func: str,
pool_size: int
) -> typing.Any:
output = keras.layers.Conv1D(
filters,
kernel_size,
padding=padding,
activation=conv_activation_func
)(input_)
output = keras.layers.MaxPooling1D(pool_size=pool_size)(output)
return output
| 5,386 | 37.205674 | 78 | py |
MatchZoo | MatchZoo-master/matchzoo/models/mvlstm.py | """An implementation of MVLSTM Model."""
import keras
import tensorflow as tf
from matchzoo.engine import hyper_spaces
from matchzoo.engine.base_model import BaseModel
from matchzoo.engine.param import Param
from matchzoo.engine.param_table import ParamTable
class MVLSTM(BaseModel):
"""
MVLSTM Model.
Examples:
>>> model = MVLSTM()
>>> model.params['lstm_units'] = 32
>>> model.params['top_k'] = 50
>>> model.params['mlp_num_layers'] = 2
>>> model.params['mlp_num_units'] = 20
>>> model.params['mlp_num_fan_out'] = 10
>>> model.params['mlp_activation_func'] = 'relu'
>>> model.params['dropout_rate'] = 0.5
>>> model.guess_and_fill_missing_params(verbose=0)
>>> model.build()
"""
@classmethod
def get_default_params(cls) -> ParamTable:
""":return: model default parameters."""
params = super().get_default_params(
with_embedding=True, with_multi_layer_perceptron=True)
params.add(Param(name='lstm_units', value=32,
desc="Integer, the hidden size in the "
"bi-directional LSTM layer."))
params.add(Param(name='dropout_rate', value=0.0,
desc="Float, the dropout rate."))
params.add(Param(
'top_k', value=10,
hyper_space=hyper_spaces.quniform(low=2, high=100),
desc="Integer, the size of top-k pooling layer."
))
params['optimizer'] = 'adam'
return params
def build(self):
"""Build model structure."""
query, doc = self._make_inputs()
# Embedding layer
embedding = self._make_embedding_layer(mask_zero=True)
embed_query = embedding(query)
embed_doc = embedding(doc)
# Bi-directional LSTM layer
rep_query = keras.layers.Bidirectional(keras.layers.LSTM(
self._params['lstm_units'],
return_sequences=True,
dropout=self._params['dropout_rate']
))(embed_query)
rep_doc = keras.layers.Bidirectional(keras.layers.LSTM(
self._params['lstm_units'],
return_sequences=True,
dropout=self._params['dropout_rate']
))(embed_doc)
# Top-k matching layer
matching_matrix = keras.layers.Dot(
axes=[2, 2], normalize=False)([rep_query, rep_doc])
matching_signals = keras.layers.Reshape((-1,))(matching_matrix)
matching_topk = keras.layers.Lambda(
lambda x: tf.nn.top_k(x, k=self._params['top_k'], sorted=True)[0]
)(matching_signals)
# Multilayer perceptron layer.
mlp = self._make_multi_layer_perceptron_layer()(matching_topk)
mlp = keras.layers.Dropout(
rate=self._params['dropout_rate'])(mlp)
x_out = self._make_output_layer()(mlp)
self._backend = keras.Model(inputs=[query, doc], outputs=x_out)
| 2,963 | 34.285714 | 77 | py |
MatchZoo | MatchZoo-master/matchzoo/models/anmm.py | """An implementation of aNMM Model."""
import keras
from keras.activations import softmax
from keras.initializers import RandomUniform
from matchzoo.engine.base_model import BaseModel
from matchzoo.engine.param import Param
from matchzoo.engine.param_table import ParamTable
from matchzoo.engine import hyper_spaces
class ANMM(BaseModel):
"""
ANMM Model.
Examples:
>>> model = ANMM()
>>> model.guess_and_fill_missing_params(verbose=0)
>>> model.build()
"""
@classmethod
def get_default_params(cls) -> ParamTable:
""":return: model default parameters."""
params = super().get_default_params(with_embedding=True)
params.add(Param(
name='dropout_rate', value=0.1,
desc="The dropout rate.",
hyper_space=hyper_spaces.quniform(0, 1, 0.05)
))
params.add(Param(
name='num_layers', value=2,
desc="Number of hidden layers in the MLP "
"layer."
))
params.add(Param(
name='hidden_sizes', value=[30, 30],
desc="Number of hidden size for each hidden"
" layer"
))
return params
def build(self):
"""
Build model structure.
aNMM model based on bin weighting and query term attentions
"""
# query is [batch_size, left_text_len]
# doc is [batch_size, right_text_len, bin_num]
query, doc = self._make_inputs()
embedding = self._make_embedding_layer()
q_embed = embedding(query)
q_attention = keras.layers.Dense(
1, kernel_initializer=RandomUniform(), use_bias=False)(q_embed)
q_text_len = self._params['input_shapes'][0][0]
q_attention = keras.layers.Lambda(
lambda x: softmax(x, axis=1),
output_shape=(q_text_len,)
)(q_attention)
d_bin = keras.layers.Dropout(
rate=self._params['dropout_rate'])(doc)
for layer_id in range(self._params['num_layers'] - 1):
d_bin = keras.layers.Dense(
self._params['hidden_sizes'][layer_id],
kernel_initializer=RandomUniform())(d_bin)
d_bin = keras.layers.Activation('tanh')(d_bin)
d_bin = keras.layers.Dense(
self._params['hidden_sizes'][self._params['num_layers'] - 1])(
d_bin)
d_bin = keras.layers.Reshape((q_text_len,))(d_bin)
q_attention = keras.layers.Reshape((q_text_len,))(q_attention)
score = keras.layers.Dot(axes=[1, 1])([d_bin, q_attention])
x_out = self._make_output_layer()(score)
self._backend = keras.Model(inputs=[query, doc], outputs=x_out)
| 2,720 | 33.0125 | 75 | py |
MatchZoo | MatchZoo-master/matchzoo/models/drmmtks.py | """An implementation of DRMMTKS Model."""
import typing
import keras
import tensorflow as tf
from matchzoo.engine.base_model import BaseModel
from matchzoo.engine.param import Param
from matchzoo.engine.param_table import ParamTable
from matchzoo.engine import hyper_spaces
class DRMMTKS(BaseModel):
"""
DRMMTKS Model.
Examples:
>>> model = DRMMTKS()
>>> model.params['embedding_input_dim'] = 10000
>>> model.params['embedding_output_dim'] = 100
>>> model.params['top_k'] = 20
>>> model.params['mlp_num_layers'] = 1
>>> model.params['mlp_num_units'] = 5
>>> model.params['mlp_num_fan_out'] = 1
>>> model.params['mlp_activation_func'] = 'tanh'
>>> model.guess_and_fill_missing_params(verbose=0)
>>> model.build()
"""
@classmethod
def get_default_params(cls) -> ParamTable:
""":return: model default parameters."""
params = super().get_default_params(
with_embedding=True,
with_multi_layer_perceptron=True
)
params.add(Param(name='mask_value', value=-1,
desc="The value to be masked from inputs."))
params['input_shapes'] = [(5,), (300,)]
params.add(Param(
'top_k', value=10,
hyper_space=hyper_spaces.quniform(low=2, high=100),
desc="Size of top-k pooling layer."
))
return params
def build(self):
"""Build model structure."""
# Scalar dimensions referenced here:
# B = batch size (number of sequences)
# D = embedding size
# L = `input_left` sequence length
# R = `input_right` sequence length
# K = size of top-k
# Left input and right input.
# shape = [B, L]
# shape = [B, R]
query, doc = self._make_inputs()
embedding = self._make_embedding_layer()
# Process left input.
# shape = [B, L, D]
embed_query = embedding(query)
# shape = [B, R, D]
embed_doc = embedding(doc)
# shape = [B, L]
atten_mask = tf.not_equal(query, self._params['mask_value'])
# shape = [B, L]
atten_mask = tf.cast(atten_mask, keras.backend.floatx())
# shape = [B, L, 1]
atten_mask = tf.expand_dims(atten_mask, axis=2)
# shape = [B, L, 1]
attention_probs = self.attention_layer(embed_query, atten_mask)
# Matching histogram of top-k
# shape = [B, L, R]
matching_matrix = keras.layers.Dot(axes=[2, 2], normalize=True)(
[embed_query,
embed_doc])
# shape = [B, L, K]
effective_top_k = min(self._params['top_k'],
self.params['input_shapes'][0][0],
self.params['input_shapes'][1][0])
matching_topk = keras.layers.Lambda(
lambda x: tf.nn.top_k(x, k=effective_top_k, sorted=True)[0]
)(matching_matrix)
# Process right input.
# shape = [B, L, 1]
dense_output = self._make_multi_layer_perceptron_layer()(matching_topk)
# shape = [B, 1, 1]
dot_score = keras.layers.Dot(axes=[1, 1])(
[attention_probs, dense_output])
flatten_score = keras.layers.Flatten()(dot_score)
x_out = self._make_output_layer()(flatten_score)
self._backend = keras.Model(inputs=[query, doc], outputs=x_out)
@classmethod
def attention_layer(cls, attention_input: typing.Any,
attention_mask: typing.Any = None
) -> keras.layers.Layer:
"""
Performs attention on the input.
:param attention_input: The input tensor for attention layer.
:param attention_mask: A tensor to mask the invalid values.
:return: The masked output tensor.
"""
# shape = [B, L, 1]
dense_input = keras.layers.Dense(1, use_bias=False)(attention_input)
if attention_mask is not None:
# Since attention_mask is 1.0 for positions we want to attend and
# 0.0 for masked positions, this operation will create a tensor
# which is 0.0 for positions we want to attend and -10000.0 for
# masked positions.
# shape = [B, L, 1]
dense_input = keras.layers.Lambda(
lambda x: x + (1.0 - attention_mask) * -10000.0,
name="attention_mask"
)(dense_input)
# shape = [B, L, 1]
attention_probs = keras.layers.Lambda(
lambda x: tf.nn.softmax(x, axis=1),
output_shape=lambda s: (s[0], s[1], s[2]),
name="attention_probs"
)(dense_input)
return attention_probs
| 4,766 | 34.311111 | 79 | py |
MatchZoo | MatchZoo-master/matchzoo/models/arcii.py | """An implementation of ArcII Model."""
import typing
import keras
import matchzoo
from matchzoo.engine.base_model import BaseModel
from matchzoo.engine.param import Param
from matchzoo.engine.param_table import ParamTable
from matchzoo.engine import hyper_spaces
class ArcII(BaseModel):
"""
ArcII Model.
Examples:
>>> model = ArcII()
>>> model.params['embedding_output_dim'] = 300
>>> model.params['num_blocks'] = 2
>>> model.params['kernel_1d_count'] = 32
>>> model.params['kernel_1d_size'] = 3
>>> model.params['kernel_2d_count'] = [16, 32]
>>> model.params['kernel_2d_size'] = [[3, 3], [3, 3]]
>>> model.params['pool_2d_size'] = [[2, 2], [2, 2]]
>>> model.guess_and_fill_missing_params(verbose=0)
>>> model.build()
"""
@classmethod
def get_default_params(cls) -> ParamTable:
""":return: model default parameters."""
params = super().get_default_params(with_embedding=True)
params['optimizer'] = 'adam'
opt_space = hyper_spaces.choice(['adam', 'rmsprop', 'adagrad'])
params.get('optimizer').hyper_space = opt_space
params.add(Param(name='num_blocks', value=1,
desc="Number of 2D convolution blocks."))
params.add(Param(name='kernel_1d_count', value=32,
desc="Kernel count of 1D convolution layer."))
params.add(Param(name='kernel_1d_size', value=3,
desc="Kernel size of 1D convolution layer."))
params.add(Param(name='kernel_2d_count', value=[32],
desc="Kernel count of 2D convolution layer in"
"each block"))
params.add(Param(name='kernel_2d_size', value=[[3, 3]],
desc="Kernel size of 2D convolution layer in"
" each block."))
params.add(Param(name='activation', value='relu',
desc="Activation function."))
params.add(Param(name='pool_2d_size', value=[[2, 2]],
desc="Size of pooling layer in each block."))
params.add(Param(
name='padding', value='same',
hyper_space=hyper_spaces.choice(
['same', 'valid']),
desc="The padding mode in the convolution layer. It should be one"
"of `same`, `valid`."
))
params.add(Param(
name='dropout_rate', value=0.0,
hyper_space=hyper_spaces.quniform(low=0.0, high=0.8,
q=0.01),
desc="The dropout rate."
))
return params
def build(self):
"""
Build model structure.
ArcII has the desirable property of letting two sentences meet before
their own high-level representations mature.
"""
input_left, input_right = self._make_inputs()
embedding = self._make_embedding_layer()
embed_left = embedding(input_left)
embed_right = embedding(input_right)
# Phrase level representations
conv_1d_left = keras.layers.Conv1D(
self._params['kernel_1d_count'],
self._params['kernel_1d_size'],
padding=self._params['padding']
)(embed_left)
conv_1d_right = keras.layers.Conv1D(
self._params['kernel_1d_count'],
self._params['kernel_1d_size'],
padding=self._params['padding']
)(embed_right)
# Interaction
matching_layer = matchzoo.layers.MatchingLayer(matching_type='plus')
embed_cross = matching_layer([conv_1d_left, conv_1d_right])
for i in range(self._params['num_blocks']):
embed_cross = self._conv_pool_block(
embed_cross,
self._params['kernel_2d_count'][i],
self._params['kernel_2d_size'][i],
self._params['padding'],
self._params['activation'],
self._params['pool_2d_size'][i]
)
embed_flat = keras.layers.Flatten()(embed_cross)
x = keras.layers.Dropout(rate=self._params['dropout_rate'])(embed_flat)
inputs = [input_left, input_right]
x_out = self._make_output_layer()(x)
self._backend = keras.Model(inputs=inputs, outputs=x_out)
@classmethod
def _conv_pool_block(
cls, x,
kernel_count: int,
kernel_size: int,
padding: str,
activation: str,
pool_size: int
) -> typing.Any:
output = keras.layers.Conv2D(kernel_count,
kernel_size,
padding=padding,
activation=activation)(x)
output = keras.layers.MaxPooling2D(pool_size=pool_size)(output)
return output
| 4,891 | 36.630769 | 79 | py |
MatchZoo | MatchZoo-master/matchzoo/models/knrm.py | """KNRM model."""
import keras
import tensorflow as tf
from matchzoo.engine.base_model import BaseModel
from matchzoo.engine.param import Param
from matchzoo.engine import hyper_spaces
class KNRM(BaseModel):
"""
KNRM model.
Examples:
>>> model = KNRM()
>>> model.params['embedding_input_dim'] = 10000
>>> model.params['embedding_output_dim'] = 10
>>> model.params['embedding_trainable'] = True
>>> model.params['kernel_num'] = 11
>>> model.params['sigma'] = 0.1
>>> model.params['exact_sigma'] = 0.001
>>> model.guess_and_fill_missing_params(verbose=0)
>>> model.build()
"""
@classmethod
def get_default_params(cls):
"""Get default parameters."""
params = super().get_default_params(with_embedding=True)
params.add(Param(
name='kernel_num',
value=11,
hyper_space=hyper_spaces.quniform(low=5, high=20),
desc="The number of RBF kernels."
))
params.add(Param(
name='sigma',
value=0.1,
hyper_space=hyper_spaces.quniform(
low=0.01, high=0.2, q=0.01),
desc="The `sigma` defines the kernel width."
))
params.add(Param(
name='exact_sigma', value=0.001,
desc="The `exact_sigma` denotes the `sigma` "
"for exact match."
))
return params
def build(self):
"""Build model."""
query, doc = self._make_inputs()
embedding = self._make_embedding_layer()
q_embed = embedding(query)
d_embed = embedding(doc)
mm = keras.layers.Dot(axes=[2, 2], normalize=True)([q_embed, d_embed])
KM = []
for i in range(self._params['kernel_num']):
mu = 1. / (self._params['kernel_num'] - 1) + (2. * i) / (
self._params['kernel_num'] - 1) - 1.0
sigma = self._params['sigma']
if mu > 1.0:
sigma = self._params['exact_sigma']
mu = 1.0
mm_exp = self._kernel_layer(mu, sigma)(mm)
mm_doc_sum = keras.layers.Lambda(
lambda x: tf.reduce_sum(x, 2))(mm_exp)
mm_log = keras.layers.Activation(tf.math.log1p)(mm_doc_sum)
mm_sum = keras.layers.Lambda(
lambda x: tf.reduce_sum(x, 1))(mm_log)
KM.append(mm_sum)
phi = keras.layers.Lambda(lambda x: tf.stack(x, 1))(KM)
out = self._make_output_layer()(phi)
self._backend = keras.Model(inputs=[query, doc], outputs=[out])
@classmethod
def _kernel_layer(cls, mu: float, sigma: float) -> keras.layers.Layer:
"""
Gaussian kernel layer in KNRM.
:param mu: Float, mean of the kernel.
:param sigma: Float, sigma of the kernel.
:return: `keras.layers.Layer`.
"""
def kernel(x):
return tf.math.exp(-0.5 * (x - mu) * (x - mu) / sigma / sigma)
return keras.layers.Activation(kernel)
| 3,057 | 31.189474 | 78 | py |
MatchZoo | MatchZoo-master/matchzoo/models/naive.py | """Naive model with a simplest structure for testing purposes."""
import keras
from matchzoo.engine.base_model import BaseModel
from matchzoo.engine import hyper_spaces
class Naive(BaseModel):
"""
Naive model with a simplest structure for testing purposes.
Bare minimum functioning model. The best choice to get things rolling.
The worst choice to fit and evaluate performance.
"""
@classmethod
def get_default_params(cls):
"""Default parameters."""
params = super().get_default_params()
params.get('optimizer').hyper_space = \
hyper_spaces.choice(['adam', 'adagrad', 'rmsprop'])
return params
def build(self):
"""Build."""
x_in = self._make_inputs()
x = keras.layers.concatenate(x_in)
x_out = self._make_output_layer()(x)
self._backend = keras.models.Model(inputs=x_in, outputs=x_out)
| 909 | 28.354839 | 74 | py |
MatchZoo | MatchZoo-master/matchzoo/models/dense_baseline.py | """A simple densely connected baseline model."""
import keras.layers
from matchzoo.engine.base_model import BaseModel
from matchzoo.engine.param_table import ParamTable
from matchzoo.engine import hyper_spaces
class DenseBaseline(BaseModel):
"""
A simple densely connected baseline model.
Examples:
>>> model = DenseBaseline()
>>> model.params['mlp_num_layers'] = 2
>>> model.params['mlp_num_units'] = 300
>>> model.params['mlp_num_fan_out'] = 128
>>> model.params['mlp_activation_func'] = 'relu'
>>> model.guess_and_fill_missing_params(verbose=0)
>>> model.build()
>>> model.compile()
"""
@classmethod
def get_default_params(cls) -> ParamTable:
""":return: model default parameters."""
params = super().get_default_params(with_multi_layer_perceptron=True)
params['mlp_num_units'] = 256
params.get('mlp_num_units').hyper_space = \
hyper_spaces.quniform(16, 512)
params.get('mlp_num_layers').hyper_space = \
hyper_spaces.quniform(1, 5)
return params
def build(self):
"""Model structure."""
x_in = self._make_inputs()
x = keras.layers.concatenate(x_in)
x = self._make_multi_layer_perceptron_layer()(x)
x_out = self._make_output_layer()(x)
self._backend = keras.models.Model(inputs=x_in, outputs=x_out)
| 1,420 | 31.295455 | 77 | py |
MatchZoo | MatchZoo-master/matchzoo/datasets/snli/load_data.py | """SNLI data loader."""
import typing
from pathlib import Path
import pandas as pd
import keras
import matchzoo
_url = "https://nlp.stanford.edu/projects/snli/snli_1.0.zip"
def load_data(
stage: str = 'train',
task: str = 'classification',
target_label: str = 'entailment',
return_classes: bool = False
) -> typing.Union[matchzoo.DataPack, tuple]:
"""
Load SNLI data.
:param stage: One of `train`, `dev`, and `test`. (default: `train`)
:param task: Could be one of `ranking`, `classification` or a
:class:`matchzoo.engine.BaseTask` instance. (default: `ranking`)
:param target_label: If `ranking`, chose one of `entailment`,
`contradiction`, `neutral`, and `-` as the positive label.
(default: `entailment`)
:param return_classes: `True` to return classes for classification task,
`False` otherwise.
:return: A DataPack unless `task` is `classificiation` and `return_classes`
is `True`: a tuple of `(DataPack, classes)` in that case.
"""
if stage not in ('train', 'dev', 'test'):
raise ValueError(f"{stage} is not a valid stage."
f"Must be one of `train`, `dev`, and `test`.")
data_root = _download_data()
file_path = data_root.joinpath(f'snli_1.0_{stage}.txt')
data_pack = _read_data(file_path)
if task == 'ranking':
task = matchzoo.tasks.Ranking()
if task == 'classification':
task = matchzoo.tasks.Classification()
if isinstance(task, matchzoo.tasks.Ranking):
if target_label not in ['entailment', 'contradiction', 'neutral', '-']:
raise ValueError(f"{target_label} is not a valid target label."
f"Must be one of `entailment`, `contradiction`, "
f"`neutral` and `-`.")
binary = (data_pack.relation['label'] == target_label).astype(float)
data_pack.relation['label'] = binary
return data_pack
elif isinstance(task, matchzoo.tasks.Classification):
classes = ['entailment', 'contradiction', 'neutral', '-']
label = data_pack.relation['label'].apply(classes.index)
data_pack.relation['label'] = label
data_pack.one_hot_encode_label(num_classes=4, inplace=True)
if return_classes:
return data_pack, classes
else:
return data_pack
else:
raise ValueError(f"{task} is not a valid task."
f"Must be one of `Ranking` and `Classification`.")
def _download_data():
ref_path = keras.utils.data_utils.get_file(
'snli', _url, extract=True,
cache_dir=matchzoo.USER_DATA_DIR,
cache_subdir='snli'
)
return Path(ref_path).parent.joinpath('snli_1.0')
def _read_data(path):
table = pd.read_csv(path, sep='\t')
df = pd.DataFrame({
'text_left': table['sentence1'],
'text_right': table['sentence2'],
'label': table['gold_label']
})
df = df.dropna(axis=0, how='any').reset_index(drop=True)
return matchzoo.pack(df)
| 3,067 | 33.863636 | 79 | py |
MatchZoo | MatchZoo-master/matchzoo/datasets/quora_qp/load_data.py | """Quora Question Pairs data loader."""
import typing
from pathlib import Path
import keras
import pandas as pd
import matchzoo
_url = "https://firebasestorage.googleapis.com/v0/b/mtl-sentence" \
"-representations.appspot.com/o/data%2FQQP.zip?alt=media&" \
"token=700c6acf-160d-4d89-81d1-de4191d02cb5"
def load_data(
stage: str = 'train',
task: str = 'classification',
return_classes: bool = False,
) -> typing.Union[matchzoo.DataPack, tuple]:
"""
Load QuoraQP data.
:param path: `None` for download from quora, specific path for
downloaded data.
:param stage: One of `train`, `dev`, and `test`.
:param task: Could be one of `ranking`, `classification` or a
:class:`matchzoo.engine.BaseTask` instance.
:param return_classes: Whether return classes for classification task.
:return: A DataPack if `ranking`, a tuple of (DataPack, classes) if
`classification`.
"""
if stage not in ('train', 'dev', 'test'):
raise ValueError(f"{stage} is not a valid stage."
f"Must be one of `train`, `dev`, and `test`.")
data_root = _download_data()
file_path = data_root.joinpath(f"{stage}.tsv")
data_pack = _read_data(file_path, stage)
if task == 'ranking':
task = matchzoo.tasks.Ranking()
elif task == 'classification':
task = matchzoo.tasks.Classification()
if isinstance(task, matchzoo.tasks.Ranking):
return data_pack
elif isinstance(task, matchzoo.tasks.Classification):
if stage != 'test':
data_pack.one_hot_encode_label(num_classes=2, inplace=True)
if return_classes:
return data_pack, [False, True]
else:
return data_pack
else:
raise ValueError(f"{task} is not a valid task.")
def _download_data():
ref_path = keras.utils.data_utils.get_file(
'quora_qp', _url, extract=True,
cache_dir=matchzoo.USER_DATA_DIR,
cache_subdir='quora_qp'
)
return Path(ref_path).parent.joinpath('QQP')
def _read_data(path, stage):
data = pd.read_csv(path, sep='\t', error_bad_lines=False)
data = data.dropna(axis=0, how='any').reset_index(drop=True)
if stage in ['train', 'dev']:
df = pd.DataFrame({
'id_left': data['qid1'],
'id_right': data['qid2'],
'text_left': data['question1'],
'text_right': data['question2'],
'label': data['is_duplicate'].astype(int)
})
else:
df = pd.DataFrame({
'text_left': data['question1'],
'text_right': data['question2']
})
return matchzoo.pack(df)
| 2,677 | 30.505882 | 74 | py |
MatchZoo | MatchZoo-master/matchzoo/datasets/cqa_ql_16/load_data.py | """CQA-QL-16 data loader."""
import xml
import typing
from pathlib import Path
import keras
import pandas as pd
import matchzoo
_train_dev_url = "http://alt.qcri.org/semeval2016/task3/data/uploads/" \
"semeval2016-task3-cqa-ql-traindev-v3.2.zip"
_test_url = "http://alt.qcri.org/semeval2016/task3/data/uploads/" \
"semeval2016_task3_test.zip"
def load_data(
stage: str = 'train',
task: str = 'classification',
target_label: str = 'PerfectMatch',
return_classes: bool = False,
match_type: str = 'question',
mode: str = 'both',
) -> typing.Union[matchzoo.DataPack, tuple]:
"""
Load CQA-QL-16 data.
:param stage: One of `train`, `dev`, and `test`.
(default: `train`)
:param task: Could be one of `ranking`, `classification` or instance
of :class:`matchzoo.engine.BaseTask`. (default: `classification`)
:param target_label: If `ranking`, choose one of classification
label as the positive label. (default: `PerfectMatch`)
:param return_classes: `True` to return classes for classification
task, `False` otherwise.
:param match_type: Matching text types. One of `question`,
`answer`, and `external_answer`. (default: `question`)
:param mode: Train data use method. One of `part1`, `part2`,
and `both`. (default: `both`)
:return: A DataPack unless `task` is `classification` and `return_classes`
is `True`: a tuple of `(DataPack, classes)` in that case.
"""
if stage not in ('train', 'dev', 'test'):
raise ValueError(f"{stage} is not a valid stage."
f"Must be one of `train`, `dev`, and `test`.")
if match_type not in ('question', 'answer', 'external_answer'):
raise ValueError(f"{match_type} is not a valid method. Must be one of"
f" `question`, `answer`, `external_answer`.")
if mode not in ('part1', 'part2', 'both'):
raise ValueError(f"{mode} is not a valid method."
f"Must be one of `part1`, `part2`, `both`.")
data_root = _download_data(stage)
data_pack = _read_data(data_root, stage, match_type, mode)
if task == 'ranking':
if match_type in ('anwer', 'external_answer') and target_label not in [
'Good', 'PotentiallyUseful', 'Bad']:
raise ValueError(f"{target_label} is not a valid target label."
f"Must be one of `Good`, `PotentiallyUseful`,"
f" `Bad`.")
elif match_type == 'question' and target_label not in [
'PerfectMatch', 'Relevant', 'Irrelevant']:
raise ValueError(f"{target_label} is not a valid target label."
f" Must be one of `PerfectMatch`, `Relevant`,"
f" `Irrelevant`.")
binary = (data_pack.relation['label'] == target_label).astype(float)
data_pack.relation['label'] = binary
return data_pack
elif task == 'classification':
if match_type in ('answer', 'external_answer'):
classes = ['Good', 'PotentiallyUseful', 'Bad']
else:
classes = ['PerfectMatch', 'Relevant', 'Irrelevant']
label = data_pack.relation['label'].apply(classes.index)
data_pack.relation['label'] = label
data_pack.one_hot_encode_label(num_classes=3, inplace=True)
if return_classes:
return data_pack, classes
else:
return data_pack
else:
raise ValueError(f"{task} is not a valid task."
f"Must be one of `Ranking` and `Classification`.")
def _download_data(stage):
if stage in ['train', 'dev']:
return _download_train_dev_data()
else:
return _download_test_data()
def _download_train_dev_data():
ref_path = keras.utils.data_utils.get_file(
'semeval_train', _train_dev_url, extract=True,
cache_dir=matchzoo.USER_DATA_DIR,
cache_subdir='semeval_train'
)
return Path(ref_path).parent.joinpath('v3.2')
def _download_test_data():
ref_path = keras.utils.data_utils.get_file(
'semeval_test', _test_url, extract=True,
cache_dir=matchzoo.USER_DATA_DIR,
cache_subdir='semeval_test'
)
return Path(ref_path).parent.joinpath('SemEval2016_task3_test/English')
def _read_data(path, stage, match_type, mode='both'):
if stage == 'train':
if mode == 'part1':
path = path.joinpath(
'train/SemEval2016-Task3-CQA-QL-train-part1.xml')
data = _load_data_by_type(path, match_type)
elif mode == 'part2':
path = path.joinpath(
'train/SemEval2016-Task3-CQA-QL-train-part2.xml')
data = _load_data_by_type(path, match_type)
else:
part1 = path.joinpath(
'train/SemEval2016-Task3-CQA-QL-train-part1.xml')
p1 = _load_data_by_type(part1, match_type)
part2 = path.joinpath(
'train/SemEval2016-Task3-CQA-QL-train-part1.xml')
p2 = _load_data_by_type(part2, match_type)
data = pd.concat([p1, p2], ignore_index=True)
return matchzoo.pack(data)
elif stage == 'dev':
path = path.joinpath('dev/SemEval2016-Task3-CQA-QL-dev.xml')
data = _load_data_by_type(path, match_type)
return matchzoo.pack(data)
else:
path = path.joinpath('SemEval2016-Task3-CQA-QL-test.xml')
data = _load_data_by_type(path, match_type)
return matchzoo.pack(data)
def _load_data_by_type(path, match_type):
if match_type == 'question':
return _load_question(path)
elif match_type == 'answer':
return _load_answer(path)
else:
return _load_external_answer(path)
def _load_question(path):
doc = xml.etree.ElementTree.parse(path)
dataset = []
for question in doc.iterfind('OrgQuestion'):
qid = question.attrib['ORGQ_ID']
query = question.findtext('OrgQBody')
rel_question = question.find('Thread').find('RelQuestion')
question = rel_question.findtext('RelQBody')
question_id = rel_question.attrib['RELQ_ID']
dataset.append([qid, question_id, query, question,
rel_question.attrib['RELQ_RELEVANCE2ORGQ']])
df = pd.DataFrame(dataset, columns=[
'id_left', 'id_right', 'text_left', 'text_right', 'label'])
return df
def _load_answer(path):
doc = xml.etree.ElementTree.parse(path)
dataset = []
for org_q in doc.iterfind('OrgQuestion'):
for thread in org_q.iterfind('Thread'):
ques = thread.find('RelQuestion')
qid = ques.attrib['RELQ_ID']
question = ques.findtext('RelQBody')
for comment in thread.iterfind('RelComment'):
aid = comment.attrib['RELC_ID']
answer = comment.findtext('RelCText')
dataset.append([qid, aid, question, answer,
comment.attrib['RELC_RELEVANCE2RELQ']])
df = pd.DataFrame(dataset, columns=[
'id_left', 'id_right', 'text_left', 'text_right', 'label'])
return df
def _load_external_answer(path):
doc = xml.etree.ElementTree.parse(path)
dataset = []
for question in doc.iterfind('OrgQuestion'):
qid = question.attrib['ORGQ_ID']
query = question.findtext('OrgQBody')
thread = question.find('Thread')
for comment in thread.iterfind('RelComment'):
answer = comment.findtext('RelCText')
aid = comment.attrib['RELC_ID']
dataset.append([qid, aid, query, answer,
comment.attrib['RELC_RELEVANCE2ORGQ']])
df = pd.DataFrame(dataset, columns=[
'id_left', 'id_right', 'text_left', 'text_right', 'label'])
return df
| 7,865 | 37.558824 | 79 | py |
MatchZoo | MatchZoo-master/matchzoo/datasets/embeddings/load_glove_embedding.py | """Embedding data loader."""
from pathlib import Path
import keras
import matchzoo as mz
_glove_embedding_url = "http://nlp.stanford.edu/data/glove.6B.zip"
def load_glove_embedding(dimension: int = 50) -> mz.embedding.Embedding:
"""
Return the pretrained glove embedding.
:param dimension: the size of embedding dimension, the value can only be
50, 100, or 300.
:return: The :class:`mz.embedding.Embedding` object.
"""
file_name = 'glove.6B.' + str(dimension) + 'd.txt'
file_path = (Path(mz.USER_DATA_DIR) / 'glove').joinpath(file_name)
if not file_path.exists():
keras.utils.data_utils.get_file('glove_embedding',
_glove_embedding_url,
extract=True,
cache_dir=mz.USER_DATA_DIR,
cache_subdir='glove')
return mz.embedding.load_from_file(file_path=str(file_path), mode='glove')
| 995 | 33.344828 | 78 | py |
MatchZoo | MatchZoo-master/matchzoo/datasets/wiki_qa/load_data.py | """WikiQA data loader."""
import typing
import csv
from pathlib import Path
import keras
import pandas as pd
import matchzoo
_url = "https://download.microsoft.com/download/E/5/F/" \
"E5FCFCEE-7005-4814-853D-DAA7C66507E0/WikiQACorpus.zip"
def load_data(
stage: str = 'train',
task: str = 'ranking',
filtered: bool = False,
return_classes: bool = False
) -> typing.Union[matchzoo.DataPack, tuple]:
"""
Load WikiQA data.
:param stage: One of `train`, `dev`, and `test`.
:param task: Could be one of `ranking`, `classification` or a
:class:`matchzoo.engine.BaseTask` instance.
:param filtered: Whether remove the questions without correct answers.
:param return_classes: `True` to return classes for classification task,
`False` otherwise.
:return: A DataPack unless `task` is `classificiation` and `return_classes`
is `True`: a tuple of `(DataPack, classes)` in that case.
"""
if stage not in ('train', 'dev', 'test'):
raise ValueError(f"{stage} is not a valid stage."
f"Must be one of `train`, `dev`, and `test`.")
data_root = _download_data()
file_path = data_root.joinpath(f'WikiQA-{stage}.tsv')
data_pack = _read_data(file_path)
if filtered and stage in ('dev', 'test'):
ref_path = data_root.joinpath(f'WikiQA-{stage}.ref')
filter_ref_path = data_root.joinpath(f'WikiQA-{stage}-filtered.ref')
with open(filter_ref_path, mode='r') as f:
filtered_ids = set([line.split()[0] for line in f])
filtered_lines = []
with open(ref_path, mode='r') as f:
for idx, line in enumerate(f.readlines()):
if line.split()[0] in filtered_ids:
filtered_lines.append(idx)
data_pack = data_pack[filtered_lines]
if task == 'ranking':
task = matchzoo.tasks.Ranking()
if task == 'classification':
task = matchzoo.tasks.Classification()
if isinstance(task, matchzoo.tasks.Ranking):
return data_pack
elif isinstance(task, matchzoo.tasks.Classification):
data_pack.one_hot_encode_label(task.num_classes, inplace=True)
if return_classes:
return data_pack, [False, True]
else:
return data_pack
else:
raise ValueError(f"{task} is not a valid task."
f"Must be one of `Ranking` and `Classification`.")
def _download_data():
ref_path = keras.utils.data_utils.get_file(
'wikiqa', _url, extract=True,
cache_dir=matchzoo.USER_DATA_DIR,
cache_subdir='wiki_qa'
)
return Path(ref_path).parent.joinpath('WikiQACorpus')
def _read_data(path):
table = pd.read_csv(path, sep='\t', header=0, quoting=csv.QUOTE_NONE)
df = pd.DataFrame({
'text_left': table['Question'],
'text_right': table['Sentence'],
'id_left': table['QuestionID'],
'id_right': table['SentenceID'],
'label': table['Label']
})
return matchzoo.pack(df)
| 3,045 | 32.472527 | 79 | py |
MatchZoo | MatchZoo-master/matchzoo/layers/matching_layer.py | """An implementation of Matching Layer."""
import typing
import tensorflow as tf
from keras.engine import Layer
class MatchingLayer(Layer):
"""
Layer that computes a matching matrix between samples in two tensors.
:param normalize: Whether to L2-normalize samples along the
dot product axis before taking the dot product.
If set to True, then the output of the dot product
is the cosine proximity between the two samples.
:param matching_type: the similarity function for matching
:param kwargs: Standard layer keyword arguments.
Examples:
>>> import matchzoo as mz
>>> layer = mz.layers.MatchingLayer(matching_type='dot',
... normalize=True)
>>> num_batch, left_len, right_len, num_dim = 5, 3, 2, 10
>>> layer.build([[num_batch, left_len, num_dim],
... [num_batch, right_len, num_dim]])
"""
def __init__(self, normalize: bool = False,
matching_type: str = 'dot', **kwargs):
""":class:`MatchingLayer` constructor."""
super().__init__(**kwargs)
self._normalize = normalize
self._validate_matching_type(matching_type)
self._matching_type = matching_type
self._shape1 = None
self._shape2 = None
@classmethod
def _validate_matching_type(cls, matching_type: str = 'dot'):
valid_matching_type = ['dot', 'mul', 'plus', 'minus', 'concat']
if matching_type not in valid_matching_type:
raise ValueError(f"{matching_type} is not a valid matching type, "
f"{valid_matching_type} expected.")
def build(self, input_shape: list):
"""
Build the layer.
:param input_shape: the shapes of the input tensors,
for MatchingLayer we need tow input tensors.
"""
# Used purely for shape validation.
if not isinstance(input_shape, list) or len(input_shape) != 2:
raise ValueError('A `MatchingLayer` layer should be called '
'on a list of 2 inputs.')
self._shape1 = input_shape[0]
self._shape2 = input_shape[1]
for idx in 0, 2:
if self._shape1[idx] != self._shape2[idx]:
raise ValueError(
'Incompatible dimensions: '
f'{self._shape1[idx]} != {self._shape2[idx]}.'
f'Layer shapes: {self._shape1}, {self._shape2}.'
)
def call(self, inputs: list, **kwargs) -> typing.Any:
"""
The computation logic of MatchingLayer.
:param inputs: two input tensors.
"""
x1 = inputs[0]
x2 = inputs[1]
if self._matching_type == 'dot':
if self._normalize:
x1 = tf.math.l2_normalize(x1, axis=2)
x2 = tf.math.l2_normalize(x2, axis=2)
return tf.expand_dims(tf.einsum('abd,acd->abc', x1, x2), 3)
else:
if self._matching_type == 'mul':
def func(x, y):
return x * y
elif self._matching_type == 'plus':
def func(x, y):
return x + y
elif self._matching_type == 'minus':
def func(x, y):
return x - y
elif self._matching_type == 'concat':
def func(x, y):
return tf.concat([x, y], axis=3)
else:
raise ValueError(f"Invalid matching type."
f"{self._matching_type} received."
f"Mut be in `dot`, `mul`, `plus`, "
f"`minus` and `concat`.")
x1_exp = tf.stack([x1] * self._shape2[1], 2)
x2_exp = tf.stack([x2] * self._shape1[1], 1)
return func(x1_exp, x2_exp)
def compute_output_shape(self, input_shape: list) -> tuple:
"""
Calculate the layer output shape.
:param input_shape: the shapes of the input tensors,
for MatchingLayer we need tow input tensors.
"""
if not isinstance(input_shape, list) or len(input_shape) != 2:
raise ValueError('A `MatchingLayer` layer should be called '
'on a list of 2 inputs.')
shape1 = list(input_shape[0])
shape2 = list(input_shape[1])
if len(shape1) != 3 or len(shape2) != 3:
raise ValueError('A `MatchingLayer` layer should be called '
'on 2 inputs with 3 dimensions.')
if shape1[0] != shape2[0] or shape1[2] != shape2[2]:
raise ValueError('A `MatchingLayer` layer should be called '
'on 2 inputs with same 0,2 dimensions.')
if self._matching_type in ['mul', 'plus', 'minus']:
return shape1[0], shape1[1], shape2[1], shape1[2]
elif self._matching_type == 'dot':
return shape1[0], shape1[1], shape2[1], 1
elif self._matching_type == 'concat':
return shape1[0], shape1[1], shape2[1], shape1[2] + shape2[2]
else:
raise ValueError(f"Invalid `matching_type`."
f"{self._matching_type} received."
f"Must be in `mul`, `plus`, `minus` "
f"`dot` and `concat`.")
def get_config(self) -> dict:
"""Get the config dict of MatchingLayer."""
config = {
'normalize': self._normalize,
'matching_type': self._matching_type,
}
base_config = super(MatchingLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| 5,753 | 39.808511 | 78 | py |
MatchZoo | MatchZoo-master/matchzoo/layers/dynamic_pooling_layer.py | """An implementation of Dynamic Pooling Layer."""
import typing
import tensorflow as tf
from keras.engine import Layer
class DynamicPoolingLayer(Layer):
"""
Layer that computes dynamic pooling of one tensor.
:param psize1: pooling size of dimension 1
:param psize2: pooling size of dimension 2
:param kwargs: Standard layer keyword arguments.
Examples:
>>> import matchzoo as mz
>>> layer = mz.layers.DynamicPoolingLayer(3, 2)
>>> num_batch, left_len, right_len, num_dim = 5, 3, 2, 10
>>> layer.build([[num_batch, left_len, right_len, num_dim],
... [num_batch, left_len, right_len, 3]])
"""
def __init__(self,
psize1: int,
psize2: int,
**kwargs):
""":class:`DynamicPoolingLayer` constructor."""
super().__init__(**kwargs)
self._psize1 = psize1
self._psize2 = psize2
def build(self, input_shape: typing.List[int]):
"""
Build the layer.
:param input_shape: the shapes of the input tensors,
for DynamicPoolingLayer we need tow input tensors.
"""
super().build(input_shape)
input_shape_one = input_shape[0]
self._msize1 = input_shape_one[1]
self._msize2 = input_shape_one[2]
def call(self, inputs: list, **kwargs) -> typing.Any:
"""
The computation logic of DynamicPoolingLayer.
:param inputs: two input tensors.
"""
self._validate_dpool_size()
x, dpool_index = inputs
dpool_shape = tf.shape(dpool_index)
batch_index_one = tf.expand_dims(
tf.expand_dims(
tf.range(dpool_shape[0]), axis=-1),
axis=-1)
batch_index = tf.expand_dims(
tf.tile(batch_index_one, [1, self._msize1, self._msize2]),
axis=-1)
dpool_index_ex = tf.concat([batch_index, dpool_index], axis=3)
x_expand = tf.gather_nd(x, dpool_index_ex)
stride1 = self._msize1 // self._psize1
stride2 = self._msize2 // self._psize2
x_pool = tf.nn.max_pool(x_expand,
[1, stride1, stride2, 1],
[1, stride1, stride2, 1],
"VALID")
return x_pool
def compute_output_shape(self, input_shape: list) -> tuple:
"""
Calculate the layer output shape.
:param input_shape: the shapes of the input tensors,
for DynamicPoolingLayer we need tow input tensors.
"""
input_shape_one = input_shape[0]
return (None, self._psize1, self._psize2, input_shape_one[3])
def get_config(self) -> dict:
"""Get the config dict of DynamicPoolingLayer."""
config = {
'psize1': self._psize1,
'psize2': self._psize2
}
base_config = super(DynamicPoolingLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def _validate_dpool_size(self):
suggestion = self.get_size_suggestion(
self._msize1, self._msize2, self._psize1, self._psize2
)
if suggestion != (self._psize1, self._psize2):
raise ValueError(
"DynamicPooling Layer can not "
f"generate ({self._psize1} x {self._psize2}) output "
f"feature map, please use ({suggestion[0]} x {suggestion[1]})"
f" instead. `model.params['dpool_size'] = {suggestion}` "
)
@classmethod
def get_size_suggestion(
cls,
msize1: int,
msize2: int,
psize1: int,
psize2: int
) -> typing.Tuple[int, int]:
"""
Get `dpool_size` suggestion for a given shape.
Returns the nearest legal `dpool_size` for the given combination of
`(psize1, psize2)`.
:param msize1: size of the left text.
:param msize2: size of the right text.
:param psize1: base size of the pool.
:param psize2: base size of the pool.
:return:
"""
stride1 = msize1 // psize1
stride2 = msize2 // psize2
suggestion1 = msize1 // stride1
suggestion2 = msize2 // stride2
return (suggestion1, suggestion2)
| 4,315 | 32.457364 | 78 | py |
MatchZoo | MatchZoo-master/matchzoo/data_generator/data_generator.py | """Base generator."""
import math
import typing
import keras
import numpy as np
import pandas as pd
import matchzoo as mz
from matchzoo.data_generator.callbacks import Callback
class DataGenerator(keras.utils.Sequence):
"""
Data Generator.
Used to divide a :class:`matchzoo.DataPack` into batches. This is helpful
for generating batch-wise features and delaying data preprocessing to the
`fit` time.
See `tutorials/data_handling.ipynb` for a walkthrough.
:param data_pack: DataPack to generator data from.
:param mode: One of "point", "pair", and "list". (default: "point")
:param num_dup: Number of duplications per instance, only effective when
`mode` is "pair". (default: 1)
:param num_neg: Number of negative samples per instance, only effective
when `mode` is "pair". (default: 1)
:param resample: Either to resample for each epoch, only effective when
`mode` is "pair". (default: `True`)
:param batch_size: Batch size. (default: 128)
:param shuffle: Either to shuffle the samples/instances. (default: `True`)
:param callbacks: Callbacks. See `matchzoo.data_generator.callbacks` for
more details.
Examples::
>>> import numpy as np
>>> import matchzoo as mz
>>> np.random.seed(0)
>>> data_pack = mz.datasets.toy.load_data()
>>> batch_size = 8
To generate data points:
>>> point_gen = mz.DataGenerator(
... data_pack=data_pack,
... batch_size=batch_size
... )
>>> len(point_gen)
13
>>> x, y = point_gen[0]
>>> for key, value in sorted(x.items()):
... print(key, str(value)[:30])
id_left ['Q6' 'Q17' 'Q1' 'Q13' 'Q16' '
id_right ['D6-6' 'D17-1' 'D1-2' 'D13-3'
text_left ['how long is the term for fed
text_right ['See Article I and Article II
To generate data pairs:
>>> pair_gen = mz.DataGenerator(
... data_pack=data_pack,
... mode='pair',
... num_dup=4,
... num_neg=4,
... batch_size=batch_size,
... shuffle=False
... )
>>> len(pair_gen)
3
>>> x, y = pair_gen[0]
>>> for key, value in sorted(x.items()):
... print(key, str(value)[:30])
id_left ['Q1' 'Q1' 'Q1' 'Q1' 'Q1' 'Q1'
id_right ['D1-3' 'D1-4' 'D1-0' 'D1-1' '
text_left ['how are glacier caves formed
text_right ['A glacier cave is a cave for
To generate data lists:
# TODO:
"""
def __init__(
self,
data_pack: mz.DataPack,
mode='point',
num_dup: int = 1,
num_neg: int = 1,
resample: bool = True,
batch_size: int = 128,
shuffle: bool = True,
callbacks: typing.List[Callback] = None
):
"""Init."""
if callbacks is None:
callbacks = []
if mode not in ('point', 'pair', 'list'):
raise ValueError(f"{mode} is not a valid mode type."
f"Must be one of `point`, `pair` or `list`.")
self._mode = mode
self._num_dup = num_dup
self._num_neg = num_neg
self._batch_size = batch_size
self._shuffle = shuffle
self._resample = resample
self._orig_relation = data_pack.relation
self._callbacks = callbacks
if mode == 'pair':
data_pack.relation = self._reorganize_pair_wise(
data_pack.relation,
num_dup=num_dup,
num_neg=num_neg
)
self._data_pack = data_pack
self._batch_indices = None
self.reset_index()
def __getitem__(self, item: int) -> typing.Tuple[dict, np.ndarray]:
"""Get a batch from index idx.
:param item: the index of the batch.
"""
if isinstance(item, slice):
indices = sum(self._batch_indices[item], [])
else:
indices = self._batch_indices[item]
batch_data_pack = self._data_pack[indices]
self._handle_callbacks_on_batch_data_pack(batch_data_pack)
x, y = batch_data_pack.unpack()
self._handle_callbacks_on_batch_unpacked(x, y)
return x, y
def __len__(self) -> int:
"""Get the total number of batches."""
return len(self._batch_indices)
def on_epoch_end(self):
"""Reorganize the index array while epoch is ended."""
if self._mode == 'pair' and self._resample:
self._data_pack.relation = self._reorganize_pair_wise(
relation=self._orig_relation,
num_dup=self._num_dup,
num_neg=self._num_neg
)
self.reset_index()
def reset_index(self):
"""
Set the :attr:`index_array`.
Here the :attr:`index_array` records the index of all the instances.
"""
# index pool: index -> instance index
if self._mode == 'point':
num_instances = len(self._data_pack)
index_pool = list(range(num_instances))
elif self._mode == 'pair':
index_pool = []
step_size = self._num_neg + 1
num_instances = int(len(self._data_pack) / step_size)
for i in range(num_instances):
lower = i * step_size
upper = (i + 1) * step_size
indices = list(range(lower, upper))
if indices:
index_pool.append(indices)
elif self._mode == 'list':
raise NotImplementedError(
f'{self._mode} data generator not implemented.')
else:
raise ValueError(f"{self._mode} is not a valid mode type"
f"Must be one of `point`, `pair` or `list`.")
if self._shuffle:
np.random.shuffle(index_pool)
# batch_indices: index -> batch of indices
self._batch_indices = []
for i in range(math.ceil(num_instances / self._batch_size)):
lower = self._batch_size * i
upper = self._batch_size * (i + 1)
candidates = index_pool[lower:upper]
if self._mode == 'pair':
candidates = sum(candidates, [])
if candidates:
self._batch_indices.append(candidates)
def _handle_callbacks_on_batch_data_pack(self, batch_data_pack):
for callback in self._callbacks:
callback.on_batch_data_pack(batch_data_pack)
def _handle_callbacks_on_batch_unpacked(self, x, y):
for callback in self._callbacks:
callback.on_batch_unpacked(x, y)
@property
def callbacks(self):
"""`callbacks` getter."""
return self._callbacks
@callbacks.setter
def callbacks(self, value):
"""`callbacks` setter."""
self._callbacks = value
@property
def num_neg(self):
"""`num_neg` getter."""
return self._num_neg
@num_neg.setter
def num_neg(self, value):
"""`num_neg` setter."""
self._num_neg = value
self.reset_index()
@property
def num_dup(self):
"""`num_dup` getter."""
return self._num_dup
@num_dup.setter
def num_dup(self, value):
"""`num_dup` setter."""
self._num_dup = value
self.reset_index()
@property
def mode(self):
"""`mode` getter."""
return self._mode
@mode.setter
def mode(self, value):
"""`mode` setter."""
self._mode = value
self.reset_index()
@property
def batch_size(self):
"""`batch_size` getter."""
return self._batch_size
@batch_size.setter
def batch_size(self, value):
"""`batch_size` setter."""
self._batch_size = value
self.reset_index()
@property
def shuffle(self):
"""`shuffle` getter."""
return self._shuffle
@shuffle.setter
def shuffle(self, value):
"""`shuffle` setter."""
self._shuffle = value
self.reset_index()
@property
def batch_indices(self):
"""`batch_indices` getter."""
return self._batch_indices
@classmethod
def _reorganize_pair_wise(
cls,
relation: pd.DataFrame,
num_dup: int = 1,
num_neg: int = 1
):
"""Re-organize the data pack as pair-wise format."""
pairs = []
groups = relation.sort_values(
'label', ascending=False).groupby('id_left')
for idx, group in groups:
labels = group.label.unique()
for label in labels[:-1]:
pos_samples = group[group.label == label]
pos_samples = pd.concat([pos_samples] * num_dup)
neg_samples = group[group.label < label]
for _, pos_sample in pos_samples.iterrows():
pos_sample = pd.DataFrame([pos_sample])
neg_sample = neg_samples.sample(num_neg, replace=True)
pairs.extend((pos_sample, neg_sample))
new_relation = pd.concat(pairs, ignore_index=True)
return new_relation
| 9,187 | 30.251701 | 78 | py |
MatchZoo | MatchZoo-master/matchzoo/engine/base_model.py | """Base Model."""
import abc
import typing
from pathlib import Path
import dill
import numpy as np
import keras
import keras.backend as K
import pandas as pd
import matchzoo
from matchzoo import DataGenerator
from matchzoo.engine import hyper_spaces
from matchzoo.engine.base_preprocessor import BasePreprocessor
from matchzoo.engine.base_metric import BaseMetric
from matchzoo.engine.param_table import ParamTable
from matchzoo.engine.param import Param
from matchzoo import tasks
class BaseModel(abc.ABC):
"""
Abstract base class of all MatchZoo models.
MatchZoo models are wrapped over keras models, and the actual keras model
built can be accessed by `model.backend`. `params` is a set of model
hyper-parameters that deterministically builds a model. In other words,
`params['model_class'](params=params)` of the same `params` always create
models with the same structure.
:param params: Model hyper-parameters. (default: return value from
:meth:`get_default_params`)
:param backend: A keras model as the model backend. Usually not passed as
an argument.
Example:
>>> BaseModel() # doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeError: Can't instantiate abstract class BaseModel ...
>>> class MyModel(BaseModel):
... def build(self):
... pass
>>> isinstance(MyModel(), BaseModel)
True
"""
BACKEND_WEIGHTS_FILENAME = 'backend_weights.h5'
PARAMS_FILENAME = 'params.dill'
def __init__(
self,
params: typing.Optional[ParamTable] = None,
backend: typing.Optional[keras.models.Model] = None
):
"""Init."""
self._params = params or self.get_default_params()
self._backend = backend
@classmethod
def get_default_params(
cls,
with_embedding=False,
with_multi_layer_perceptron=False
) -> ParamTable:
"""
Model default parameters.
The common usage is to instantiate :class:`matchzoo.engine.ModelParams`
first, then set the model specific parametrs.
Examples:
>>> class MyModel(BaseModel):
... def build(self):
... print(self._params['num_eggs'], 'eggs')
... print('and', self._params['ham_type'])
...
... @classmethod
... def get_default_params(cls):
... params = ParamTable()
... params.add(Param('num_eggs', 512))
... params.add(Param('ham_type', 'Parma Ham'))
... return params
>>> my_model = MyModel()
>>> my_model.build()
512 eggs
and Parma Ham
Notice that all parameters must be serialisable for the entire model
to be serialisable. Therefore, it's strongly recommended to use python
native data types to store parameters.
:return: model parameters
"""
params = ParamTable()
params.add(Param(
name='model_class', value=cls,
desc="Model class. Used internally for save/load. "
"Changing this may cause unexpected behaviors."
))
params.add(Param(
name='input_shapes',
desc="Dependent on the model and data. Should be set manually."
))
params.add(Param(
name='task',
desc="Decides model output shape, loss, and metrics."
))
params.add(Param(
name='optimizer', value='adam',
))
if with_embedding:
params.add(Param(
name='with_embedding', value=True,
desc="A flag used help `auto` module. Shouldn't be changed."
))
params.add(Param(
name='embedding_input_dim',
desc='Usually equals vocab size + 1. Should be set manually.'
))
params.add(Param(
name='embedding_output_dim',
desc='Should be set manually.'
))
params.add(Param(
name='embedding_trainable', value=True,
desc='`True` to enable embedding layer training, '
'`False` to freeze embedding parameters.'
))
if with_multi_layer_perceptron:
params.add(Param(
name='with_multi_layer_perceptron', value=True,
desc="A flag of whether a multiple layer perceptron is used. "
"Shouldn't be changed."
))
params.add(Param(
name='mlp_num_units', value=128,
desc="Number of units in first `mlp_num_layers` layers.",
hyper_space=hyper_spaces.quniform(8, 256, 8)
))
params.add(Param(
name='mlp_num_layers', value=3,
desc="Number of layers of the multiple layer percetron.",
hyper_space=hyper_spaces.quniform(1, 6)
))
params.add(Param(
name='mlp_num_fan_out', value=64,
desc="Number of units of the layer that connects the multiple "
"layer percetron and the output.",
hyper_space=hyper_spaces.quniform(4, 128, 4)
))
params.add(Param(
name='mlp_activation_func', value='relu',
desc='Activation function used in the multiple '
'layer perceptron.'
))
return params
@classmethod
def get_default_preprocessor(cls) -> BasePreprocessor:
"""
Model default preprocessor.
The preprocessor's transform should produce a correctly shaped data
pack that can be used for training. Some extra configuration (e.g.
setting `input_shapes` in :class:`matchzoo.models.DSSMModel` may be
required on the user's end.
:return: Default preprocessor.
"""
return matchzoo.preprocessors.BasicPreprocessor()
@property
def params(self) -> ParamTable:
""":return: model parameters."""
return self._params
@params.setter
def params(self, val):
self._params = val
@property
def backend(self) -> keras.models.Model:
""":return model backend, a keras model instance."""
if not self._backend:
raise ValueError("Backend not found."
"Please build the model first.")
else:
return self._backend
@abc.abstractmethod
def build(self):
"""Build model, each subclass need to impelemnt this method."""
def compile(self):
"""
Compile model for training.
Only `keras` native metrics are compiled together with backend.
MatchZoo metrics are evaluated only through :meth:`evaluate`.
Notice that `keras` count `loss` as one of the metrics while MatchZoo
:class:`matchzoo.engine.BaseTask` does not.
Examples:
>>> from matchzoo import models
>>> model = models.Naive()
>>> model.guess_and_fill_missing_params(verbose=0)
>>> model.params['task'].metrics = ['mse', 'map']
>>> model.params['task'].metrics
['mse', mean_average_precision(0.0)]
>>> model.build()
>>> model.compile()
"""
self._backend.compile(optimizer=self._params['optimizer'],
loss=self._params['task'].loss)
def fit(
self,
x: typing.Union[np.ndarray, typing.List[np.ndarray], dict],
y: np.ndarray,
batch_size: int = 128,
epochs: int = 1,
verbose: int = 1,
**kwargs
) -> keras.callbacks.History:
"""
Fit the model.
See :meth:`keras.models.Model.fit` for more details.
:param x: input data.
:param y: labels.
:param batch_size: number of samples per gradient update.
:param epochs: number of epochs to train the model.
:param verbose: 0, 1, or 2. Verbosity mode. 0 = silent, 1 = verbose,
2 = one log line per epoch.
Key word arguments not listed above will be propagated to keras's fit.
:return: A `keras.callbacks.History` instance. Its history attribute
contains all information collected during training.
"""
return self._backend.fit(x=x, y=y,
batch_size=batch_size, epochs=epochs,
verbose=verbose, **kwargs)
def fit_generator(
self,
generator: matchzoo.DataGenerator,
epochs: int = 1,
verbose: int = 1,
**kwargs
) -> keras.callbacks.History:
"""
Fit the model with matchzoo `generator`.
See :meth:`keras.models.Model.fit_generator` for more details.
:param generator: A generator, an instance of
:class:`engine.DataGenerator`.
:param epochs: Number of epochs to train the model.
:param verbose: 0, 1, or 2. Verbosity mode. 0 = silent, 1 = verbose,
2 = one log line per epoch.
:return: A `keras.callbacks.History` instance. Its history attribute
contains all information collected during training.
"""
return self._backend.fit_generator(
generator=generator,
epochs=epochs,
verbose=verbose, **kwargs
)
def evaluate(
self,
x: typing.Dict[str, np.ndarray],
y: np.ndarray,
batch_size: int = 128
) -> typing.Dict[BaseMetric, float]:
"""
Evaluate the model.
:param x: Input data.
:param y: Labels.
:param batch_size: Number of samples when `predict` for evaluation.
(default: 128)
Examples::
>>> import matchzoo as mz
>>> data_pack = mz.datasets.toy.load_data()
>>> preprocessor = mz.preprocessors.NaivePreprocessor()
>>> data_pack = preprocessor.fit_transform(data_pack, verbose=0)
>>> m = mz.models.DenseBaseline()
>>> m.params['task'] = mz.tasks.Ranking()
>>> m.params['task'].metrics = [
... 'acc', 'mse', 'mae', 'ce',
... 'average_precision', 'precision', 'dcg', 'ndcg',
... 'mean_reciprocal_rank', 'mean_average_precision', 'mrr',
... 'map', 'MAP',
... mz.metrics.AveragePrecision(threshold=1),
... mz.metrics.Precision(k=2, threshold=2),
... mz.metrics.DiscountedCumulativeGain(k=2),
... mz.metrics.NormalizedDiscountedCumulativeGain(
... k=3, threshold=-1),
... mz.metrics.MeanReciprocalRank(threshold=2),
... mz.metrics.MeanAveragePrecision(threshold=3)
... ]
>>> m.guess_and_fill_missing_params(verbose=0)
>>> m.build()
>>> m.compile()
>>> x, y = data_pack.unpack()
>>> evals = m.evaluate(x, y)
>>> type(evals)
<class 'dict'>
"""
result = dict()
matchzoo_metrics, keras_metrics = self._separate_metrics()
y_pred = self.predict(x, batch_size)
for metric in keras_metrics:
metric_func = keras.metrics.get(metric)
result[metric] = K.eval(K.mean(
metric_func(K.variable(y), K.variable(y_pred))))
if matchzoo_metrics:
if not isinstance(self.params['task'], tasks.Ranking):
raise ValueError("Matchzoo metrics only works on ranking.")
for metric in matchzoo_metrics:
result[metric] = self._eval_metric_on_data_frame(
metric, x['id_left'], y, y_pred)
return result
def evaluate_generator(
self,
generator: DataGenerator,
batch_size: int = 128
) -> typing.Dict['BaseMetric', float]:
"""
Evaluate the model.
:param generator: DataGenerator to evluate.
:param batch_size: Batch size. (default: 128)
"""
x, y = generator[:]
return self.evaluate(x, y, batch_size=batch_size)
def _separate_metrics(self):
matchzoo_metrics = []
keras_metrics = []
for metric in self._params['task'].metrics:
if isinstance(metric, BaseMetric):
matchzoo_metrics.append(metric)
else:
keras_metrics.append(metric)
return matchzoo_metrics, keras_metrics
@classmethod
def _eval_metric_on_data_frame(
cls,
metric: BaseMetric,
id_left: typing.Union[list, np.array],
y: typing.Union[list, np.array],
y_pred: typing.Union[list, np.array]
):
eval_df = pd.DataFrame(data={
'id': id_left,
'true': y.squeeze(),
'pred': y_pred.squeeze()
})
assert isinstance(metric, BaseMetric)
val = eval_df.groupby(by='id').apply(
lambda df: metric(df['true'].values, df['pred'].values)
).mean()
return val
def predict(
self,
x: typing.Dict[str, np.ndarray],
batch_size=128
) -> np.ndarray:
"""
Generate output predictions for the input samples.
See :meth:`keras.models.Model.predict` for more details.
:param x: input data
:param batch_size: number of samples per gradient update
:return: numpy array(s) of predictions
"""
return self._backend.predict(x=x, batch_size=batch_size)
def save(self, dirpath: typing.Union[str, Path]):
"""
Save the model.
A saved model is represented as a directory with two files. One is a
model parameters file saved by `pickle`, and the other one is a model
h5 file saved by `keras`.
:param dirpath: directory path of the saved model
Example:
>>> import matchzoo as mz
>>> model = mz.models.Naive()
>>> model.guess_and_fill_missing_params(verbose=0)
>>> model.build()
>>> model.save('temp-model')
>>> import shutil
>>> shutil.rmtree('temp-model')
"""
dirpath = Path(dirpath)
params_path = dirpath.joinpath(self.PARAMS_FILENAME)
weights_path = dirpath.joinpath(self.BACKEND_WEIGHTS_FILENAME)
if not dirpath.exists():
dirpath.mkdir(parents=True)
else:
raise FileExistsError(f'{dirpath} already exist, fail to save.')
self._backend.save_weights(weights_path)
with open(params_path, mode='wb') as params_file:
dill.dump(self._params, params_file)
def get_embedding_layer(
self, name: str = 'embedding'
) -> keras.layers.Layer:
"""
Get the embedding layer.
All MatchZoo models with a single embedding layer set the embedding
layer name to `embedding`, and this method should return that layer.
:param name: Name of the embedding layer. (default: `embedding`)
"""
for layer in self._backend.layers:
if layer.name == name:
return layer
raise ValueError(f"Layer {name} not found. Initialize your embedding "
f"layer with `name='{name}'`.")
def load_embedding_matrix(
self,
embedding_matrix: np.ndarray,
name: str = 'embedding'
):
"""
Load an embedding matrix.
Load an embedding matrix into the model's embedding layer. The name
of the embedding layer is specified by `name`. For models with only
one embedding layer, set `name='embedding'` when creating the keras
layer, and use the default `name` when load the matrix. For models
with more than one embedding layers, initialize keras layer with
different layer names, and set `name` accordingly to load a matrix
to a chosen layer.
:param embedding_matrix: Embedding matrix to be loaded.
:param name: Name of the layer. (default: 'embedding')
"""
self.get_embedding_layer(name).set_weights([embedding_matrix])
def guess_and_fill_missing_params(self, verbose=1):
"""
Guess and fill missing parameters in :attr:`params`.
Use this method to automatically fill-in other hyper parameters.
This involves some guessing so the parameter it fills could be
wrong. For example, the default task is `Ranking`, and if we do not
set it to `Classification` manaully for data packs prepared for
classification, then the shape of the model output and the data will
mismatch.
:param verbose: Verbosity.
"""
self._params.get('task').set_default(tasks.Ranking(), verbose)
self._params.get('input_shapes').set_default([(30,), (30,)], verbose)
if 'with_embedding' in self._params:
self._params.get('embedding_input_dim').set_default(300, verbose)
self._params.get('embedding_output_dim').set_default(300, verbose)
def _set_param_default(self, name: str,
default_val: str, verbose: int = 0):
if self._params[name] is None:
self._params[name] = default_val
if verbose:
print(f"Parameter \"{name}\" set to {default_val}.")
def _make_inputs(self) -> list:
input_left = keras.layers.Input(
name='text_left',
shape=self._params['input_shapes'][0]
)
input_right = keras.layers.Input(
name='text_right',
shape=self._params['input_shapes'][1]
)
return [input_left, input_right]
def _make_output_layer(self) -> keras.layers.Layer:
""":return: a correctly shaped keras dense layer for model output."""
task = self._params['task']
if isinstance(task, tasks.Classification):
return keras.layers.Dense(task.num_classes, activation='softmax')
elif isinstance(task, tasks.Ranking):
return keras.layers.Dense(1, activation='linear')
else:
raise ValueError(f"{task} is not a valid task type."
f"Must be in `Ranking` and `Classification`.")
def _make_embedding_layer(
self,
name: str = 'embedding',
**kwargs
) -> keras.layers.Layer:
return keras.layers.Embedding(
self._params['embedding_input_dim'],
self._params['embedding_output_dim'],
trainable=self._params['embedding_trainable'],
name=name,
**kwargs
)
def _make_multi_layer_perceptron_layer(self) -> keras.layers.Layer:
# TODO: do not create new layers for a second call
if not self._params['with_multi_layer_perceptron']:
raise AttributeError(
'Parameter `with_multi_layer_perception` not set.')
def _wrapper(x):
activation = self._params['mlp_activation_func']
for _ in range(self._params['mlp_num_layers']):
x = keras.layers.Dense(self._params['mlp_num_units'],
activation=activation)(x)
return keras.layers.Dense(self._params['mlp_num_fan_out'],
activation=activation)(x)
return _wrapper
def load_model(dirpath: typing.Union[str, Path]) -> BaseModel:
"""
Load a model. The reverse function of :meth:`BaseModel.save`.
:param dirpath: directory path of the saved model
:return: a :class:`BaseModel` instance
Example:
>>> import matchzoo as mz
>>> model = mz.models.Naive()
>>> model.guess_and_fill_missing_params(verbose=0)
>>> model.build()
>>> model.save('my-model')
>>> model.params.keys() == mz.load_model('my-model').params.keys()
True
>>> import shutil
>>> shutil.rmtree('my-model')
"""
dirpath = Path(dirpath)
params_path = dirpath.joinpath(BaseModel.PARAMS_FILENAME)
weights_path = dirpath.joinpath(BaseModel.BACKEND_WEIGHTS_FILENAME)
with open(params_path, mode='rb') as params_file:
params = dill.load(params_file)
model_instance = params['model_class'](params=params)
model_instance.build()
model_instance.compile()
model_instance.backend.load_weights(weights_path)
return model_instance
| 20,711 | 34.587629 | 79 | py |
MatchZoo | MatchZoo-master/matchzoo/engine/callbacks.py | """Callbacks."""
import typing
from pathlib import Path
import numpy as np
import keras
import matchzoo
from matchzoo.engine.base_model import BaseModel
class EvaluateAllMetrics(keras.callbacks.Callback):
"""
Callback to evaluate all metrics.
MatchZoo metrics can not be evaluated batch-wise since they require
dataset-level information. As a result, MatchZoo metrics are not
evaluated automatically when a Model `fit`. When this callback is used,
all metrics, including MatchZoo metrics and Keras metrics, are evluated
once every `once_every` epochs.
:param model: Model to evaluate.
:param x: X.
:param y: y.
:param once_every: Evaluation only triggers when `epoch % once_every == 0`.
(default: 1, i.e. evaluate on every epoch's end)
:param batch_size: Number of samples per evaluation. This only affects the
evaluation of Keras metrics, since MatchZoo metrics are always
evaluated using the full data.
:param model_save_path: Directory path to save the model after each
evaluate callback, (default: None, i.e., no saving.)
:param verbose: Verbosity.
"""
def __init__(
self,
model: 'BaseModel',
x: typing.Union[np.ndarray, typing.List[np.ndarray]],
y: np.ndarray,
once_every: int = 1,
batch_size: int = 128,
model_save_path: str = None,
verbose=1
):
"""Initializer."""
super().__init__()
self._model = model
self._dev_x = x
self._dev_y = y
self._valid_steps = once_every
self._batch_size = batch_size
self._model_save_path = model_save_path
self._verbose = verbose
def on_epoch_end(self, epoch: int, logs: dict = None):
"""
Called at the end of en epoch.
:param epoch: integer, index of epoch.
:param logs: dictionary of logs.
:return: dictionary of logs.
"""
if (epoch + 1) % self._valid_steps == 0:
val_logs = self._model.evaluate(self._dev_x, self._dev_y,
self._batch_size)
if self._verbose:
print('Validation: ' + ' - '.join(
f'{k}: {v}' for k, v in val_logs.items()))
for k, v in val_logs.items():
logs[k] = v
if self._model_save_path:
curr_path = self._model_save_path + str('%d/' % (epoch + 1))
self._model.save(curr_path)
| 2,513 | 32.972973 | 79 | py |
MatchZoo | MatchZoo-master/matchzoo/engine/parse_metric.py | import typing
import matchzoo
from matchzoo.engine.base_metric import BaseMetric
from matchzoo.engine import base_task
def parse_metric(
metric: typing.Union[str, typing.Type[BaseMetric], BaseMetric],
task: 'base_task.BaseTask' = None
) -> typing.Union['BaseMetric', str]:
"""
Parse input metric in any form into a :class:`BaseMetric` instance.
:param metric: Input metric in any form.
:param task: Task type for determining specific metric.
:return: A :class:`BaseMetric` instance
Examples::
>>> from matchzoo import metrics
>>> from matchzoo.engine.parse_metric import parse_metric
Use `str` as keras native metrics:
>>> parse_metric('mse')
'mse'
Use `str` as MatchZoo metrics:
>>> mz_metric = parse_metric('map')
>>> type(mz_metric)
<class 'matchzoo.metrics.mean_average_precision.MeanAveragePrecision'>
Use :class:`matchzoo.engine.BaseMetric` subclasses as MatchZoo metrics:
>>> type(parse_metric(metrics.AveragePrecision))
<class 'matchzoo.metrics.average_precision.AveragePrecision'>
Use :class:`matchzoo.engine.BaseMetric` instances as MatchZoo metrics:
>>> type(parse_metric(metrics.AveragePrecision()))
<class 'matchzoo.metrics.average_precision.AveragePrecision'>
"""
if task is None:
task = matchzoo.tasks.Ranking()
if isinstance(metric, str):
metric = metric.lower() # ignore case
# matchzoo metrics in str form
for subclass in BaseMetric.__subclasses__():
if metric == subclass.ALIAS or metric in subclass.ALIAS:
return subclass()
# keras native metrics
return _remap_keras_metric(metric, task)
elif isinstance(metric, BaseMetric):
return metric
elif issubclass(metric, BaseMetric):
return metric()
else:
raise ValueError(metric)
def _remap_keras_metric(metric: str, task) -> str:
# we do not support sparse label in classification.
lookup = {
matchzoo.tasks.Ranking: {
'acc': 'binary_accuracy',
'accuracy': 'binary_accuracy',
'crossentropy': 'binary_crossentropy',
'ce': 'binary_crossentropy',
},
matchzoo.tasks.Classification: {
'acc': 'categorical_accuracy',
'accuracy': 'categorical_accuracy',
'crossentropy': 'categorical_crossentropy',
'ce': 'categorical_crossentropy',
}
}
return lookup[type(task)].get(metric, metric)
| 2,559 | 31.405063 | 78 | py |
MatchZoo | MatchZoo-master/matchzoo/utils/make_keras_optimizer_picklable.py | import keras
def make_keras_optimizer_picklable():
"""
Fix https://github.com/NTMC-Community/MatchZoo/issues/726.
This function changes how keras behaves, use with caution.
"""
def __getstate__(self):
return keras.optimizers.serialize(self)
def __setstate__(self, state):
optimizer = keras.optimizers.deserialize(state)
self.__dict__ = optimizer.__dict__
cls = keras.optimizers.Optimizer
cls.__getstate__ = __getstate__
cls.__setstate__ = __setstate__
| 517 | 24.9 | 62 | py |
MatchZoo | MatchZoo-master/matchzoo/utils/__init__.py | from .one_hot import one_hot
from .tensor_type import TensorType
from .list_recursive_subclasses import list_recursive_concrete_subclasses
from .make_keras_optimizer_picklable import make_keras_optimizer_picklable
| 214 | 42 | 74 | py |
MatchZoo | MatchZoo-master/matchzoo/contrib/models/esim.py | """ESIM model."""
import keras
import keras.backend as K
import tensorflow as tf
import matchzoo as mz
from matchzoo.engine.base_model import BaseModel
from matchzoo.engine.param import Param
from matchzoo.engine.param_table import ParamTable
class ESIM(BaseModel):
"""
ESIM model.
Examples:
>>> model = ESIM()
>>> task = classification_task = mz.tasks.Classification(num_classes=2)
>>> model.params['task'] = task
>>> model.params['input_shapes'] = [(20, ), (40, )]
>>> model.params['lstm_dim'] = 300
>>> model.params['mlp_num_units'] = 300
>>> model.params['embedding_input_dim'] = 5000
>>> model.params['embedding_output_dim'] = 10
>>> model.params['embedding_trainable'] = False
>>> model.params['mlp_num_layers'] = 0
>>> model.params['mlp_num_fan_out'] = 300
>>> model.params['mlp_activation_func'] = 'tanh'
>>> model.params['mask_value'] = 0
>>> model.params['dropout_rate'] = 0.5
>>> model.params['optimizer'] = keras.optimizers.Adam(lr=4e-4)
>>> model.guess_and_fill_missing_params()
>>> model.build()
"""
@classmethod
def get_default_params(cls) -> ParamTable:
"""Get default parameters."""
params = super().get_default_params(with_embedding=True,
with_multi_layer_perceptron=True)
params.add(Param(
name='dropout_rate',
value=0.5,
desc="The dropout rate for all fully-connected layer"
))
params.add(Param(
name='lstm_dim',
value=8,
desc="The dimension of LSTM layer."
))
params.add(Param(
name='mask_value',
value=0,
desc="The value would be regarded as pad"
))
return params
def _expand_dim(self, inp: tf.Tensor, axis: int) -> keras.layers.Layer:
"""
Wrap keras.backend.expand_dims into a Lambda layer.
:param inp: input tensor to expand the dimension
:param axis: the axis of new dimension
"""
return keras.layers.Lambda(lambda x: tf.expand_dims(x, axis=axis))(inp)
def _make_atten_mask_layer(self) -> keras.layers.Layer:
"""
Make mask layer for attention weight matrix so that
each word won't pay attention to <PAD> timestep.
"""
return keras.layers.Lambda(
lambda weight_mask: weight_mask[0] + (1.0 - weight_mask[1]) * -1e7,
name="atten_mask")
def _make_bilstm_layer(self, lstm_dim: int) -> keras.layers.Layer:
"""
Bidirectional LSTM layer in ESIM.
:param lstm_dim: int, dimension of LSTM layer
:return: `keras.layers.Layer`.
"""
return keras.layers.Bidirectional(
layer=keras.layers.LSTM(lstm_dim, return_sequences=True),
merge_mode='concat')
def _max(self, texts: tf.Tensor, mask: tf.Tensor) -> tf.Tensor:
"""
Compute the max of each text according to their real length
:param texts: np.array with shape [B, T, H]
:param lengths: np.array with shape [B, T, ],
where 1 means valid, 0 means pad
"""
mask = self._expand_dim(mask, axis=2)
new_texts = keras.layers.Multiply()([texts, mask])
text_max = keras.layers.Lambda(
lambda x: tf.reduce_max(x, axis=1),
)(new_texts)
return text_max
def _avg(self, texts: tf.Tensor, mask: tf.Tensor) -> tf.Tensor:
"""
Compute the mean of each text according to their real length
:param texts: np.array with shape [B, T, H]
:param lengths: np.array with shape [B, T, ],
where 1 means valid, 0 means pad
"""
mask = self._expand_dim(mask, axis=2)
new_texts = keras.layers.Multiply()([texts, mask])
# timestep-wise division, exclude the PAD number when calc avg
text_avg = keras.layers.Lambda(
lambda text_mask:
tf.reduce_sum(text_mask[0], axis=1) / tf.reduce_sum(text_mask[1], axis=1),
)([new_texts, mask])
return text_avg
def build(self):
"""Build model."""
# parameters
lstm_dim = self._params['lstm_dim']
dropout_rate = self._params['dropout_rate']
# layers
create_mask = keras.layers.Lambda(
lambda x:
tf.cast(tf.not_equal(x, self._params['mask_value']), K.floatx())
)
embedding = self._make_embedding_layer()
lstm_compare = self._make_bilstm_layer(lstm_dim)
lstm_compose = self._make_bilstm_layer(lstm_dim)
dense_compare = keras.layers.Dense(units=lstm_dim,
activation='relu',
use_bias=True)
dropout = keras.layers.Dropout(dropout_rate)
# model
a, b = self._make_inputs() # [B, T_a], [B, T_b]
a_mask = create_mask(a) # [B, T_a]
b_mask = create_mask(b) # [B, T_b]
# encoding
a_emb = dropout(embedding(a)) # [B, T_a, E_dim]
b_emb = dropout(embedding(b)) # [B, T_b, E_dim]
a_ = lstm_compare(a_emb) # [B, T_a, H*2]
b_ = lstm_compare(b_emb) # [B, T_b, H*2]
# mask a_ and b_, since the <pad> position is no more zero
a_ = keras.layers.Multiply()([a_, self._expand_dim(a_mask, axis=2)])
b_ = keras.layers.Multiply()([b_, self._expand_dim(b_mask, axis=2)])
# local inference
e = keras.layers.Dot(axes=-1)([a_, b_]) # [B, T_a, T_b]
_ab_mask = keras.layers.Multiply()( # _ab_mask: [B, T_a, T_b]
[self._expand_dim(a_mask, axis=2), # [B, T_a, 1]
self._expand_dim(b_mask, axis=1)]) # [B, 1, T_b]
pm = keras.layers.Permute((2, 1))
mask_layer = self._make_atten_mask_layer()
softmax_layer = keras.layers.Softmax(axis=-1)
e_a = softmax_layer(mask_layer([e, _ab_mask])) # [B, T_a, T_b]
e_b = softmax_layer(mask_layer([pm(e), pm(_ab_mask)])) # [B, T_b, T_a]
# alignment (a_t = a~, b_t = b~ )
a_t = keras.layers.Dot(axes=(2, 1))([e_a, b_]) # [B, T_a, H*2]
b_t = keras.layers.Dot(axes=(2, 1))([e_b, a_]) # [B, T_b, H*2]
# local inference info enhancement
m_a = keras.layers.Concatenate(axis=-1)([
a_,
a_t,
keras.layers.Subtract()([a_, a_t]),
keras.layers.Multiply()([a_, a_t])]) # [B, T_a, H*2*4]
m_b = keras.layers.Concatenate(axis=-1)([
b_,
b_t,
keras.layers.Subtract()([b_, b_t]),
keras.layers.Multiply()([b_, b_t])]) # [B, T_b, H*2*4]
# project m_a and m_b from 4*H*2 dim to H dim
m_a = dropout(dense_compare(m_a)) # [B, T_a, H]
m_b = dropout(dense_compare(m_b)) # [B, T_a, H]
# inference composition
v_a = lstm_compose(m_a) # [B, T_a, H*2]
v_b = lstm_compose(m_b) # [B, T_b, H*2]
# pooling
v_a = keras.layers.Concatenate(axis=-1)(
[self._avg(v_a, a_mask), self._max(v_a, a_mask)]) # [B, H*4]
v_b = keras.layers.Concatenate(axis=-1)(
[self._avg(v_b, b_mask), self._max(v_b, b_mask)]) # [B, H*4]
v = keras.layers.Concatenate(axis=-1)([v_a, v_b]) # [B, H*8]
# mlp (multilayer perceptron) classifier
output = self._make_multi_layer_perceptron_layer()(v) # [B, H]
output = dropout(output)
output = self._make_output_layer()(output) # [B, #classes]
self._backend = keras.Model(inputs=[a, b], outputs=output)
| 7,807 | 35.657277 | 90 | py |
MatchZoo | MatchZoo-master/matchzoo/contrib/models/match_lstm.py | """Match LSTM model."""
import keras
import keras.backend as K
import tensorflow as tf
from matchzoo.engine.base_model import BaseModel
from matchzoo.engine.param import Param
from matchzoo.engine import hyper_spaces
class MatchLSTM(BaseModel):
"""
Match LSTM model.
Examples:
>>> model = MatchLSTM()
>>> model.guess_and_fill_missing_params(verbose=0)
>>> model.params['embedding_input_dim'] = 10000
>>> model.params['embedding_output_dim'] = 100
>>> model.params['embedding_trainable'] = True
>>> model.params['fc_num_units'] = 200
>>> model.params['lstm_num_units'] = 256
>>> model.params['dropout_rate'] = 0.5
>>> model.build()
"""
@classmethod
def get_default_params(cls):
"""Get default parameters."""
params = super().get_default_params(with_embedding=True)
params.add(Param(
'lstm_num_units', 256,
hyper_space=hyper_spaces.quniform(low=128, high=384, q=32),
desc="The hidden size in the LSTM layer."
))
params.add(Param(
'fc_num_units', 200,
hyper_space=hyper_spaces.quniform(
low=100, high=300, q=20),
desc="The hidden size in the full connection layer."
))
params.add(Param(
'dropout_rate', 0.0,
hyper_space=hyper_spaces.quniform(
low=0.0, high=0.9, q=0.01),
desc="The dropout rate."
))
return params
def build(self):
"""Build model."""
input_left, input_right = self._make_inputs()
len_left = input_left.shape[1]
len_right = input_right.shape[1]
embedding = self._make_embedding_layer()
embed_left = embedding(input_left)
embed_right = embedding(input_right)
lstm_left = keras.layers.LSTM(self._params['lstm_num_units'],
return_sequences=True,
name='lstm_left')
lstm_right = keras.layers.LSTM(self._params['lstm_num_units'],
return_sequences=True,
name='lstm_right')
encoded_left = lstm_left(embed_left)
encoded_right = lstm_right(embed_right)
def attention(tensors):
"""Attention layer."""
left, right = tensors
tensor_left = tf.expand_dims(left, axis=2)
tensor_right = tf.expand_dims(right, axis=1)
tensor_left = K.repeat_elements(tensor_left, len_right, 2)
tensor_right = K.repeat_elements(tensor_right, len_left, 1)
tensor_merged = tf.concat([tensor_left, tensor_right], axis=-1)
middle_output = keras.layers.Dense(self._params['fc_num_units'],
activation='tanh')(
tensor_merged)
attn_scores = keras.layers.Dense(1)(middle_output)
attn_scores = tf.squeeze(attn_scores, axis=3)
exp_attn_scores = tf.math.exp(
attn_scores - tf.reduce_max(attn_scores, axis=-1, keepdims=True))
exp_sum = tf.reduce_sum(exp_attn_scores, axis=-1, keepdims=True)
attention_weights = exp_attn_scores / exp_sum
return K.batch_dot(attention_weights, right)
attn_layer = keras.layers.Lambda(attention)
left_attn_vec = attn_layer([encoded_left, encoded_right])
concat = keras.layers.Concatenate(axis=1)(
[left_attn_vec, encoded_right])
lstm_merge = keras.layers.LSTM(self._params['lstm_num_units'] * 2,
return_sequences=False,
name='lstm_merge')
merged = lstm_merge(concat)
dropout = keras.layers.Dropout(
rate=self._params['dropout_rate'])(merged)
phi = keras.layers.Dense(self._params['fc_num_units'],
activation='tanh')(dropout)
inputs = [input_left, input_right]
out = self._make_output_layer()(phi)
self._backend = keras.Model(inputs=inputs, outputs=[out])
| 4,182 | 39.221154 | 81 | py |
MatchZoo | MatchZoo-master/matchzoo/contrib/models/hbmp.py | """HBMP model."""
import keras
import typing
from matchzoo.engine import hyper_spaces
from matchzoo.engine.param_table import ParamTable
from matchzoo.engine.param import Param
from matchzoo.engine.base_model import BaseModel
class HBMP(BaseModel):
"""
HBMP model.
Examples:
>>> model = HBMP()
>>> model.guess_and_fill_missing_params(verbose=0)
>>> model.params['embedding_input_dim'] = 200
>>> model.params['embedding_output_dim'] = 100
>>> model.params['embedding_trainable'] = True
>>> model.params['alpha'] = 0.1
>>> model.params['mlp_num_layers'] = 3
>>> model.params['mlp_num_units'] = [10, 10]
>>> model.params['lstm_num_units'] = 5
>>> model.params['dropout_rate'] = 0.1
>>> model.build()
"""
@classmethod
def get_default_params(cls) -> ParamTable:
""":return: model default parameters."""
params = super().get_default_params(with_embedding=True)
params['optimizer'] = 'adam'
params.add(Param(name='alpha', value=0.1,
desc="Negative slope coefficient of LeakyReLU "
"function."))
params.add(Param(name='mlp_num_layers', value=3,
desc="The number of layers of mlp."))
params.add(Param(name='mlp_num_units', value=[10, 10],
desc="The hidden size of the FC layers, but not "
"include the final layer."))
params.add(Param(name='lstm_num_units', value=5,
desc="The hidden size of the LSTM layer."))
params.add(Param(
name='dropout_rate', value=0.1,
hyper_space=hyper_spaces.quniform(
low=0.0, high=0.8, q=0.01),
desc="The dropout rate."
))
return params
def build(self):
"""Build model structure."""
input_left, input_right = self._make_inputs()
embedding = self._make_embedding_layer()
embed_left = embedding(input_left)
embed_right = embedding(input_right)
# Get sentence embedding
embed_sen_left = self._sentence_encoder(
embed_left,
lstm_num_units=self._params['lstm_num_units'],
drop_rate=self._params['dropout_rate'])
embed_sen_right = self._sentence_encoder(
embed_right,
lstm_num_units=self._params['lstm_num_units'],
drop_rate=self._params['dropout_rate'])
# Concatenate two sentence embedding: [embed_sen_left, embed_sen_right,
# |embed_sen_left-embed_sen_right|, embed_sen_left*embed_sen_right]
embed_minus = keras.layers.Subtract()(
[embed_sen_left, embed_sen_right])
embed_minus_abs = keras.layers.Lambda(lambda x: abs(x))(embed_minus)
embed_multiply = keras.layers.Multiply()(
[embed_sen_left, embed_sen_right])
concat = keras.layers.Concatenate(axis=1)(
[embed_sen_left, embed_sen_right, embed_minus_abs, embed_multiply])
# Multiply perception layers to classify
mlp_out = self._classifier(
concat,
mlp_num_layers=self._params['mlp_num_layers'],
mlp_num_units=self._params['mlp_num_units'],
drop_rate=self._params['dropout_rate'],
leaky_relu_alpah=self._params['alpha'])
out = self._make_output_layer()(mlp_out)
self._backend = keras.Model(
inputs=[input_left, input_right], outputs=out)
def _classifier(
self,
input_: typing.Any,
mlp_num_layers: int,
mlp_num_units: list,
drop_rate: float,
leaky_relu_alpah: float
) -> typing.Any:
for i in range(mlp_num_layers - 1):
input_ = keras.layers.Dropout(rate=drop_rate)(input_)
input_ = keras.layers.Dense(mlp_num_units[i])(input_)
input_ = keras.layers.LeakyReLU(alpha=leaky_relu_alpah)(input_)
return input_
def _sentence_encoder(
self,
input_: typing.Any,
lstm_num_units: int,
drop_rate: float
) -> typing.Any:
"""
Stack three BiLSTM MaxPooling blocks as a hierarchical structure.
Concatenate the output of three blocs as the input sentence embedding.
Each BiLSTM layer reads the input sentence as the input.
Each BiLSTM layer except the first one is initialized(the initial
hidden state and the cell state) with the final state of the previous
layer.
"""
emb1 = keras.layers.Bidirectional(
keras.layers.LSTM(
units=lstm_num_units,
return_sequences=True,
return_state=True,
dropout=drop_rate,
recurrent_dropout=drop_rate),
merge_mode='concat')(input_)
emb1_maxpooling = keras.layers.GlobalMaxPooling1D()(emb1[0])
emb2 = keras.layers.Bidirectional(
keras.layers.LSTM(
units=lstm_num_units,
return_sequences=True,
return_state=True,
dropout=drop_rate,
recurrent_dropout=drop_rate),
merge_mode='concat')(input_, initial_state=emb1[1:5])
emb2_maxpooling = keras.layers.GlobalMaxPooling1D()(emb2[0])
emb3 = keras.layers.Bidirectional(
keras.layers.LSTM(
units=lstm_num_units,
return_sequences=True,
return_state=True,
dropout=drop_rate,
recurrent_dropout=drop_rate),
merge_mode='concat')(input_, initial_state=emb2[1:5])
emb3_maxpooling = keras.layers.GlobalMaxPooling1D()(emb3[0])
emb = keras.layers.Concatenate(axis=1)(
[emb1_maxpooling, emb2_maxpooling, emb3_maxpooling])
return emb
| 5,896 | 37.045161 | 79 | py |
MatchZoo | MatchZoo-master/matchzoo/contrib/models/diin.py | """DIIN model."""
import typing
import keras
import keras.backend as K
import tensorflow as tf
from matchzoo import preprocessors
from matchzoo.contrib.layers import DecayingDropoutLayer
from matchzoo.contrib.layers import EncodingLayer
from matchzoo.engine import hyper_spaces
from matchzoo.engine.base_model import BaseModel
from matchzoo.engine.param import Param
from matchzoo.engine.param_table import ParamTable
class DIIN(BaseModel):
"""
DIIN model.
Examples:
>>> model = DIIN()
>>> model.guess_and_fill_missing_params()
>>> model.params['embedding_input_dim'] = 10000
>>> model.params['embedding_output_dim'] = 300
>>> model.params['embedding_trainable'] = True
>>> model.params['optimizer'] = 'adam'
>>> model.params['dropout_initial_keep_rate'] = 1.0
>>> model.params['dropout_decay_interval'] = 10000
>>> model.params['dropout_decay_rate'] = 0.977
>>> model.params['char_embedding_input_dim'] = 100
>>> model.params['char_embedding_output_dim'] = 8
>>> model.params['char_conv_filters'] = 100
>>> model.params['char_conv_kernel_size'] = 5
>>> model.params['first_scale_down_ratio'] = 0.3
>>> model.params['nb_dense_blocks'] = 3
>>> model.params['layers_per_dense_block'] = 8
>>> model.params['growth_rate'] = 20
>>> model.params['transition_scale_down_ratio'] = 0.5
>>> model.build()
"""
@classmethod
def get_default_params(cls) -> ParamTable:
""":return: model default parameters."""
params = super().get_default_params(with_embedding=True)
params['optimizer'] = 'adam'
params.add(Param(name='dropout_decay_interval', value=10000,
desc="The decay interval of decaying_dropout."))
params.add(Param(name='char_embedding_input_dim', value=100,
desc="The input dimension of character embedding "
"layer."))
params.add(Param(name='char_embedding_output_dim', value=2,
desc="The output dimension of character embedding "
"layer."))
params.add(Param(name='char_conv_filters', value=8,
desc="The filter size of character convolution "
"layer."))
params.add(Param(name='char_conv_kernel_size', value=2,
desc="The kernel size of character convolution "
"layer."))
params.add(Param(name='first_scale_down_ratio', value=0.3,
desc="The channel scale down ratio of the "
"convolution layer before densenet."))
params.add(Param(name='nb_dense_blocks', value=1,
desc="The number of blocks in densenet."))
params.add(Param(name='layers_per_dense_block', value=2,
desc="The number of convolution layers in dense "
"block."))
params.add(Param(name='growth_rate', value=2,
desc="The filter size of each convolution layer in "
"dense block."))
params.add(Param(name='transition_scale_down_ratio', value=0.5,
desc="The channel scale down ratio of the "
"convolution layer in transition block."))
params.add(Param(
name='dropout_initial_keep_rate', value=1.0,
hyper_space=hyper_spaces.quniform(
low=0.8, high=1.0, q=0.02),
desc="The initial keep rate of decaying_dropout."
))
params.add(Param(
name='dropout_decay_rate', value=0.97,
hyper_space=hyper_spaces.quniform(
low=0.90, high=0.99, q=0.01),
desc="The decay rate of decaying_dropout."
))
return params
@classmethod
def get_default_preprocessor(cls):
""":return: Default preprocessor."""
return preprocessors.DIINPreprocessor()
def guess_and_fill_missing_params(self, verbose: int = 1):
"""
Guess and fill missing parameters in :attr:'params'.
Use this method to automatically fill-in hyper parameters.
This involves some guessing so the parameter it fills could be
wrong. For example, the default task is 'Ranking', and if we do not
set it to 'Classification' manually for data packs prepared for
classification, then the shape of the model output and the data will
mismatch.
:param verbose: Verbosity.
"""
self._params.get('input_shapes').set_default([(32,),
(32,),
(32, 16),
(32, 16),
(32,),
(32,)], verbose)
super().guess_and_fill_missing_params(verbose)
def _make_inputs(self) -> list:
text_left = keras.layers.Input(
name='text_left',
shape=self._params['input_shapes'][0]
)
text_right = keras.layers.Input(
name='text_right',
shape=self._params['input_shapes'][1]
)
char_left = keras.layers.Input(
name='char_left',
shape=self._params['input_shapes'][2]
)
char_right = keras.layers.Input(
name='char_right',
shape=self._params['input_shapes'][3]
)
match_left = keras.layers.Input(
name='match_left',
shape=self._params['input_shapes'][4]
)
match_right = keras.layers.Input(
name='match_right',
shape=self._params['input_shapes'][5]
)
return [text_left, text_right,
char_left, char_right,
match_left, match_right]
def build(self):
"""Build model structure."""
# Scalar dimensions referenced here:
# B = batch size (number of sequences)
# D = word embedding size
# L = 'input_left' sequence length
# R = 'input_right' sequence length
# C = fixed word length
inputs = self._make_inputs()
# Left text and right text.
# shape = [B, L]
# shape = [B, R]
text_left, text_right = inputs[0:2]
# Left character and right character.
# shape = [B, L, C]
# shape = [B, R, C]
char_left, char_right = inputs[2:4]
# Left exact match and right exact match.
# shape = [B, L]
# shape = [B, R]
match_left, match_right = inputs[4:6]
# Embedding module
left_embeddings = []
right_embeddings = []
# Word embedding feature
word_embedding = self._make_embedding_layer()
# shape = [B, L, D]
left_word_embedding = word_embedding(text_left)
# shape = [B, R, D]
right_word_embedding = word_embedding(text_right)
left_word_embedding = DecayingDropoutLayer(
initial_keep_rate=self._params['dropout_initial_keep_rate'],
decay_interval=self._params['dropout_decay_interval'],
decay_rate=self._params['dropout_decay_rate']
)(left_word_embedding)
right_word_embedding = DecayingDropoutLayer(
initial_keep_rate=self._params['dropout_initial_keep_rate'],
decay_interval=self._params['dropout_decay_interval'],
decay_rate=self._params['dropout_decay_rate']
)(right_word_embedding)
left_embeddings.append(left_word_embedding)
right_embeddings.append(right_word_embedding)
# Exact match feature
# shape = [B, L, 1]
left_exact_match = keras.layers.Reshape(
target_shape=(K.int_shape(match_left)[1], 1,)
)(match_left)
# shape = [B, R, 1]
right_exact_match = keras.layers.Reshape(
target_shape=(K.int_shape(match_left)[1], 1,)
)(match_right)
left_embeddings.append(left_exact_match)
right_embeddings.append(right_exact_match)
# Char embedding feature
char_embedding = self._make_char_embedding_layer()
char_embedding.build(
input_shape=(None, None, K.int_shape(char_left)[-1]))
left_char_embedding = char_embedding(char_left)
right_char_embedding = char_embedding(char_right)
left_embeddings.append(left_char_embedding)
right_embeddings.append(right_char_embedding)
# Concatenate
left_embedding = keras.layers.Concatenate()(left_embeddings)
right_embedding = keras.layers.Concatenate()(right_embeddings)
d = K.int_shape(left_embedding)[-1]
# Encoding module
left_encoding = EncodingLayer(
initial_keep_rate=self._params['dropout_initial_keep_rate'],
decay_interval=self._params['dropout_decay_interval'],
decay_rate=self._params['dropout_decay_rate']
)(left_embedding)
right_encoding = EncodingLayer(
initial_keep_rate=self._params['dropout_initial_keep_rate'],
decay_interval=self._params['dropout_decay_interval'],
decay_rate=self._params['dropout_decay_rate']
)(right_embedding)
# Interaction module
interaction = keras.layers.Lambda(self._make_interaction)(
[left_encoding, right_encoding])
# Feature extraction module
feature_extractor_input = keras.layers.Conv2D(
filters=int(d * self._params['first_scale_down_ratio']),
kernel_size=(1, 1),
activation=None)(interaction)
feature_extractor = self._create_densenet()
features = feature_extractor(feature_extractor_input)
# Output module
features = DecayingDropoutLayer(
initial_keep_rate=self._params['dropout_initial_keep_rate'],
decay_interval=self._params['dropout_decay_interval'],
decay_rate=self._params['dropout_decay_rate'])(features)
out = self._make_output_layer()(features)
self._backend = keras.Model(inputs=inputs, outputs=out)
def _make_char_embedding_layer(self) -> keras.layers.Layer:
"""
Apply embedding, conv and maxpooling operation over time dimension
for each token to obtain a vector.
:return: Wrapper Keras 'Layer' as character embedding feature
extractor.
"""
return keras.layers.TimeDistributed(keras.Sequential([
keras.layers.Embedding(
input_dim=self._params['char_embedding_input_dim'],
output_dim=self._params['char_embedding_output_dim'],
input_length=self._params['input_shapes'][2][-1]),
keras.layers.Conv1D(
filters=self._params['char_conv_filters'],
kernel_size=self._params['char_conv_kernel_size']),
keras.layers.GlobalMaxPooling1D()]))
def _make_interaction(self, inputs_) -> typing.Any:
left_encoding = inputs_[0]
right_encoding = inputs_[1]
left_encoding = tf.expand_dims(left_encoding, axis=2)
right_encoding = tf.expand_dims(right_encoding, axis=1)
interaction = left_encoding * right_encoding
return interaction
def _create_densenet(self) -> typing.Callable:
"""
DenseNet is consisted of 'nb_dense_blocks' sets of Dense block
and Transition block pair.
:return: Wrapper Keras 'Layer' as DenseNet, tensor in tensor out.
"""
def _wrapper(x):
for _ in range(self._params['nb_dense_blocks']):
# Dense block
# Apply 'layers_per_dense_block' convolution layers.
for _ in range(self._params['layers_per_dense_block']):
out_conv = keras.layers.Conv2D(
filters=self._params['growth_rate'],
kernel_size=(3, 3),
padding='same',
activation='relu')(x)
x = keras.layers.Concatenate(axis=-1)([x, out_conv])
# Transition block
# Apply a convolution layer and a maxpooling layer.
scale_down_ratio = self._params['transition_scale_down_ratio']
nb_filter = int(K.int_shape(x)[-1] * scale_down_ratio)
x = keras.layers.Conv2D(
filters=nb_filter,
kernel_size=(1, 1),
padding='same',
activation=None)(x)
x = keras.layers.MaxPool2D(strides=(2, 2))(x)
out_densenet = keras.layers.Flatten()(x)
return out_densenet
return _wrapper
| 12,960 | 40.27707 | 78 | py |
MatchZoo | MatchZoo-master/matchzoo/contrib/models/match_srnn.py | """An implementation of Match-SRNN Model."""
import keras
from matchzoo.contrib.layers import MatchingTensorLayer
from matchzoo.contrib.layers import SpatialGRU
from matchzoo.engine import hyper_spaces
from matchzoo.engine.base_model import BaseModel
from matchzoo.engine.param import Param
from matchzoo.engine.param_table import ParamTable
class MatchSRNN(BaseModel):
"""
Match-SRNN Model.
Examples:
>>> model = MatchSRNN()
>>> model.params['channels'] = 4
>>> model.params['units'] = 10
>>> model.params['dropout_rate'] = 0.0
>>> model.params['direction'] = 'lt'
>>> model.guess_and_fill_missing_params(verbose=0)
>>> model.build()
"""
@classmethod
def get_default_params(cls) -> ParamTable:
""":return: model default parameters."""
params = super().get_default_params(with_embedding=True)
params.add(Param(name='channels', value=4,
desc="Number of word interaction tensor channels"))
params.add(Param(name='units', value=10,
desc="Number of SpatialGRU units"))
params.add(Param(name='direction', value='lt',
desc="Direction of SpatialGRU scanning"))
params.add(Param(
name='dropout_rate', value=0.0,
hyper_space=hyper_spaces.quniform(low=0.0, high=0.8,
q=0.01),
desc="The dropout rate."
))
return params
def build(self):
"""
Build model structure.
Match-SRNN: Modeling the Recursive Matching Structure
with Spatial RNN
"""
# Scalar dimensions referenced here:
# B = batch size (number of sequences)
# D = embedding size
# L = `input_left` sequence length
# R = `input_right` sequence length
# C = number of channels
# Left input and right input.
# query = [B, L]
# doc = [B, R]
query, doc = self._make_inputs()
# Process left and right input.
# embed_query = [B, L, D]
# embed_doc = [B, R, D]
embedding = self._make_embedding_layer()
embed_query = embedding(query)
embed_doc = embedding(doc)
# Get matching tensor
# matching_tensor = [B, C, L, R]
matching_tensor_layer = MatchingTensorLayer(
channels=self._params['channels'])
matching_tensor = matching_tensor_layer([embed_query, embed_doc])
# Apply spatial GRU to the word level interaction tensor
# h_ij = [B, U]
spatial_gru = SpatialGRU(
units=self._params['units'],
direction=self._params['direction'])
h_ij = spatial_gru(matching_tensor)
# Apply Dropout
x = keras.layers.Dropout(
rate=self._params['dropout_rate'])(h_ij)
# Make output layer
x_out = self._make_output_layer()(x)
self._backend = keras.Model(inputs=[query, doc], outputs=x_out)
| 3,058 | 31.542553 | 76 | py |
MatchZoo | MatchZoo-master/matchzoo/contrib/models/bimpm.py | """BiMPM."""
from keras.models import Model
from keras.layers import Dense, Concatenate, Dropout
from keras.layers import Bidirectional, LSTM
from matchzoo.engine.param import Param
from matchzoo.engine.param_table import ParamTable
from matchzoo.engine.base_model import BaseModel
from matchzoo.contrib.layers import MultiPerspectiveLayer
class BiMPM(BaseModel):
"""
BiMPM.
Reference:
https://github.com/zhiguowang/BiMPM/blob/master/src/SentenceMatchModelGraph.py#L43-L186
Examples:
>>> import matchzoo as mz
>>> model = mz.contrib.models.BiMPM()
>>> model.guess_and_fill_missing_params(verbose=0)
>>> model.build()
"""
@classmethod
def get_default_params(cls) -> ParamTable:
""":return: model default parameters."""
params = super().get_default_params(with_embedding=True)
params['optimizer'] = 'adam'
# params.add(Param('dim_word_embedding', 50))
# TODO(tjf): remove unused params in the final version
# params.add(Param('dim_char_embedding', 50))
# params.add(Param('word_embedding_mat'))
# params.add(Param('char_embedding_mat'))
# params.add(Param('embedding_random_scale', 0.2))
# params.add(Param('activation_embedding', 'softmax'))
# BiMPM Setting
params.add(Param('perspective', {'full': True,
'max-pooling': True,
'attentive': True,
'max-attentive': True}))
params.add(Param('mp_dim', 3))
params.add(Param('att_dim', 3))
params.add(Param('hidden_size', 4))
params.add(Param('dropout_rate', 0.0))
params.add(Param('w_initializer', 'glorot_uniform'))
params.add(Param('b_initializer', 'zeros'))
params.add(Param('activation_hidden', 'linear'))
params.add(Param('with_match_highway', False))
params.add(Param('with_aggregation_highway', False))
return params
def build(self):
"""Build model structure."""
# ~ Input Layer
input_left, input_right = self._make_inputs()
# Word Representation Layer
# TODO: concatenate word level embedding and character level embedding.
embedding = self._make_embedding_layer()
embed_left = embedding(input_left)
embed_right = embedding(input_right)
# L119-L121
# https://github.com/zhiguowang/BiMPM/blob/master/src/SentenceMatchModelGraph.py#L119-L121
embed_left = Dropout(self._params['dropout_rate'])(embed_left)
embed_right = Dropout(self._params['dropout_rate'])(embed_right)
# ~ Word Level Matching Layer
# Reference:
# https://github.com/zhiguowang/BiMPM/blob/master/src/match_utils.py#L207-L223
# TODO
pass
# ~ Encoding Layer
# Note: When merge_mode = None, output will be [forward, backward],
# The default merge_mode is concat, and the output will be [lstm].
# If with return_state, then the output would append [h,c,h,c].
bi_lstm = Bidirectional(
LSTM(self._params['hidden_size'],
return_sequences=True,
return_state=True,
dropout=self._params['dropout_rate'],
kernel_initializer=self._params['w_initializer'],
bias_initializer=self._params['b_initializer']),
merge_mode='concat')
# x_left = [lstm_lt, forward_h_lt, _, backward_h_lt, _ ]
x_left = bi_lstm(embed_left)
x_right = bi_lstm(embed_right)
# ~ Multi-Perspective Matching layer.
# Output is two sequence of vectors.
# Cons: Haven't support multiple context layer
multi_perspective = MultiPerspectiveLayer(self._params['att_dim'],
self._params['mp_dim'],
self._params['perspective'])
# Note: input to `keras layer` must be list of tensors.
mp_left = multi_perspective(x_left + x_right)
mp_right = multi_perspective(x_right + x_left)
# ~ Dropout Layer
mp_left = Dropout(self._params['dropout_rate'])(mp_left)
mp_right = Dropout(self._params['dropout_rate'])(mp_right)
# ~ Highway Layer
# reference:
# https://github.com/zhiguowang/BiMPM/blob/master/src/match_utils.py#L289-L295
if self._params['with_match_highway']:
# the input is left matching representations (question / passage)
pass
# ~ Aggregation layer
# TODO: mask the above layer
aggregation = Bidirectional(
LSTM(self._params['hidden_size'],
return_sequences=False,
return_state=False,
dropout=self._params['dropout_rate'],
kernel_initializer=self._params['w_initializer'],
bias_initializer=self._params['b_initializer']),
merge_mode='concat')
rep_left = aggregation(mp_left)
rep_right = aggregation(mp_right)
# Concatenate the concatenated vector of left and right.
x = Concatenate()([rep_left, rep_right])
# ~ Highway Network
# reference:
# https://github.com/zhiguowang/BiMPM/blob/master/src/match_utils.py#L289-L295
if self._params['with_aggregation_highway']:
pass
# ~ Prediction layer.
# reference:
# https://github.com/zhiguowang/BiMPM/blob/master/src/SentenceMatchModelGraph.py#L140-L153
x = Dense(self._params['hidden_size'],
activation=self._params['activation_hidden'])(x)
x = Dense(self._params['hidden_size'],
activation=self._params['activation_hidden'])(x)
x_out = self._make_output_layer()(x)
self._backend = Model(inputs=[input_left, input_right],
outputs=x_out)
| 6,010 | 39.073333 | 98 | py |
MatchZoo | MatchZoo-master/matchzoo/contrib/layers/decaying_dropout_layer.py | """An implementation of Decaying Dropout Layer."""
import tensorflow as tf
from keras import backend as K
from keras.engine import Layer
class DecayingDropoutLayer(Layer):
"""
Layer that processes dropout with exponential decayed keep rate during
training.
:param initial_keep_rate: the initial keep rate of decaying dropout.
:param decay_interval: the decay interval of decaying dropout.
:param decay_rate: the decay rate of decaying dropout.
:param noise_shape: a 1D integer tensor representing the shape of the
binary dropout mask that will be multiplied with the input.
:param seed: a python integer to use as random seed.
:param kwargs: standard layer keyword arguments.
Examples:
>>> import matchzoo as mz
>>> layer = mz.contrib.layers.DecayingDropoutLayer(
... initial_keep_rate=1.0,
... decay_interval=10000,
... decay_rate=0.977,
... )
>>> num_batch, num_dim =5, 10
>>> layer.build([num_batch, num_dim])
"""
def __init__(self,
initial_keep_rate: float = 1.0,
decay_interval: int = 10000,
decay_rate: float = 0.977,
noise_shape=None,
seed=None,
**kwargs):
""":class: 'DecayingDropoutLayer' constructor."""
super(DecayingDropoutLayer, self).__init__(**kwargs)
self._iterations = None
self._initial_keep_rate = initial_keep_rate
self._decay_interval = decay_interval
self._decay_rate = min(1.0, max(0.0, decay_rate))
self._noise_shape = noise_shape
self._seed = seed
def _get_noise_shape(self, inputs):
if self._noise_shape is None:
return self._noise_shape
symbolic_shape = tf.shape(inputs)
noise_shape = [symbolic_shape[axis] if shape is None else shape
for axis, shape in enumerate(self._noise_shape)]
return tuple(noise_shape)
def build(self, input_shape):
"""
Build the layer.
:param input_shape: the shape of the input tensor,
for DecayingDropoutLayer we need one input tensor.
"""
self._iterations = self.add_weight(name='iterations',
shape=(1,),
dtype=K.floatx(),
initializer='zeros',
trainable=False)
super(DecayingDropoutLayer, self).build(input_shape)
def call(self, inputs, training=None):
"""
The computation logic of DecayingDropoutLayer.
:param inputs: an input tensor.
"""
noise_shape = self._get_noise_shape(inputs)
t = tf.cast(self._iterations, K.floatx()) + 1
p = t / float(self._decay_interval)
keep_rate = self._initial_keep_rate * tf.pow(self._decay_rate, p)
def dropped_inputs():
update_op = self._iterations.assign_add([1])
with tf.control_dependencies([update_op]):
return tf.nn.dropout(inputs, 1 - keep_rate[0], noise_shape,
seed=self._seed)
return K.in_train_phase(dropped_inputs, inputs, training=training)
def get_config(self):
"""Get the config dict of DecayingDropoutLayer."""
config = {'initial_keep_rate': self._initial_keep_rate,
'decay_interval': self._decay_interval,
'decay_rate': self._decay_rate,
'noise_shape': self._noise_shape,
'seed': self._seed}
base_config = super(DecayingDropoutLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| 3,805 | 37.06 | 75 | py |
MatchZoo | MatchZoo-master/matchzoo/contrib/layers/spatial_gru.py | """An implementation of Spatial GRU Layer."""
import typing
import tensorflow as tf
from keras import backend as K
from keras.engine import Layer
from keras.layers import Permute
from keras.layers import Reshape
from keras import activations
from keras import initializers
class SpatialGRU(Layer):
"""
Spatial GRU layer.
:param units: Number of SpatialGRU units.
:param activation: Activation function to use. Default:
hyperbolic tangent (`tanh`). If you pass `None`, no
activation is applied (ie. "linear" activation: `a(x) = x`).
:param recurrent_activation: Activation function to use for
the recurrent step. Default: sigmoid (`sigmoid`).
If you pass `None`, no activation is applied (ie. "linear"
activation: `a(x) = x`).
:param kernel_initializer: Initializer for the `kernel` weights
matrix, used for the linear transformation of the inputs.
:param recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix, used for the linear transformation of the
recurrent state.
:param direction: Scanning direction. `lt` (i.e., left top)
indicates the scanning from left top to right bottom, and
`rb` (i.e., right bottom) indicates the scanning from
right bottom to left top.
:param kwargs: Standard layer keyword arguments.
Examples:
>>> import matchzoo as mz
>>> layer = mz.contrib.layers.SpatialGRU(units=10,
... direction='lt')
>>> num_batch, channel, left_len, right_len = 5, 5, 3, 2
>>> layer.build([num_batch, channel, left_len, right_len])
"""
def __init__(
self,
units: int = 10,
activation: str = 'tanh',
recurrent_activation: str = 'sigmoid',
kernel_initializer: str = 'glorot_uniform',
recurrent_initializer: str = 'orthogonal',
direction: str = 'lt',
**kwargs
):
""":class:`SpatialGRU` constructor."""
super().__init__(**kwargs)
self._units = units
self._activation = activations.get(activation)
self._recurrent_activation = activations.get(recurrent_activation)
self._kernel_initializer = initializers.get(kernel_initializer)
self._recurrent_initializer = initializers.get(recurrent_initializer)
self._direction = direction
def build(self, input_shape: typing.Any):
"""
Build the layer.
:param input_shape: the shapes of the input tensors.
"""
# Scalar dimensions referenced here:
# B = batch size (number of sequences)
# L = `input_left` sequence length
# R = `input_right` sequence length
# C = number of channels
# U = number of units
# input_shape = [B, C, L, R]
self._batch_size = input_shape[0]
self._channel = input_shape[1]
self._input_dim = self._channel + 3 * self._units
self._text1_maxlen = input_shape[2]
self._text2_maxlen = input_shape[3]
self._recurrent_step = self._text1_maxlen * self._text2_maxlen
# W = [3*U+C, 7*U]
self._W = self.add_weight(
name='W',
shape=(self._input_dim, self._units * 7),
initializer=self._kernel_initializer,
trainable=True
)
# U = [3*U, U]
self._U = self.add_weight(
name='U',
shape=(self._units * 3, self._units),
initializer=self._recurrent_initializer,
trainable=True
)
# bias = [8*U,]
self._bias = self.add_weight(
name='bias',
shape=(self._units * 8,),
initializer='zeros',
trainable=True
)
# w_rl, w_rt, w_rd = [B, 3*U]
self._wr = self._W[:, :self._units * 3]
# b_rl, b_rt, b_rd = [B, 3*U]
self._br = self._bias[:self._units * 3]
# w_zi, w_zl, w_zt, w_zd = [B, 4*U]
self._wz = self._W[:, self._units * 3: self._units * 7]
# b_zi, b_zl, b_zt, b_zd = [B, 4*U]
self._bz = self._bias[self._units * 3: self._units * 7]
# w_ij = [C, U]
self._w_ij = self.add_weight(
name='W_ij',
shape=(self._channel, self._units),
initializer=self._recurrent_initializer,
trainable=True
)
# b_ij = [7*U]
self._b_ij = self._bias[self._units * 7:]
super(SpatialGRU, self).build(input_shape)
def softmax_by_row(self, z: typing.Any) -> tuple:
"""Conduct softmax on each dimension across the four gates."""
# z_transform: [B, U, 4]
z_transform = Permute((2, 1))(Reshape((4, self._units))(z))
size = [-1, 1, -1]
# Perform softmax on each slice
for i in range(0, self._units):
begin = [0, i, 0]
# z_slice: [B, 1, 4]
z_slice = tf.slice(z_transform, begin, size)
if i == 0:
z_s = tf.nn.softmax(z_slice)
else:
z_s = tf.concat([z_s, tf.nn.softmax(z_slice)], 1)
# zi, zl, zt, zd: [B, U]
zi, zl, zt, zd = tf.unstack(z_s, axis=2)
return zi, zl, zt, zd
def calculate_recurrent_unit(
self,
inputs: typing.Any,
states: typing.Any,
step: int,
h: typing.Any,
) -> tuple:
"""
Calculate recurrent unit.
:param inputs: A TensorArray which contains interaction
between left text and right text.
:param states: A TensorArray which stores the hidden state
of every step.
:param step: Recurrent step.
:param h: Hidden state from last operation.
"""
# Get index i, j
i = tf.math.floordiv(step, tf.constant(self._text2_maxlen))
j = tf.math.mod(step, tf.constant(self._text2_maxlen))
# Get hidden state h_diag, h_top, h_left
# h_diag, h_top, h_left = [B, U]
h_diag = states.read(i * (self._text2_maxlen + 1) + j)
h_top = states.read(i * (self._text2_maxlen + 1) + j + 1)
h_left = states.read((i + 1) * (self._text2_maxlen + 1) + j)
# Get interaction between word i, j: s_ij
# s_ij = [B, C]
s_ij = inputs.read(step)
# Concatenate h_top, h_left, h_diag, s_ij
# q = [B, 3*U+C]
q = tf.concat([tf.concat([h_top, h_left], 1),
tf.concat([h_diag, s_ij], 1)], 1)
# Calculate reset gate
# r = [B, 3*U]
r = self._recurrent_activation(
self._time_distributed_dense(self._wr, q, self._br))
# Calculate updating gate
# z: [B, 4*U]
z = self._time_distributed_dense(self._wz, q, self._bz)
# Perform softmax
# zi, zl, zt, zd: [B, U]
zi, zl, zt, zd = self.softmax_by_row(z)
# Get h_ij_
# h_ij_ = [B, U]
h_ij_l = self._time_distributed_dense(self._w_ij, s_ij, self._b_ij)
h_ij_r = K.dot(r * (tf.concat([h_left, h_top, h_diag], 1)), self._U)
h_ij_ = self._activation(h_ij_l + h_ij_r)
# Calculate h_ij
# h_ij = [B, U]
h_ij = zl * h_left + zt * h_top + zd * h_diag + zi * h_ij_
# Write h_ij to states
states = states.write(((i + 1) * (self._text2_maxlen + 1) + j + 1),
h_ij)
h_ij.set_shape(h_top.get_shape())
return inputs, states, step + 1, h_ij
def call(self, inputs: list, **kwargs) -> typing.Any:
"""
The computation logic of SpatialGRU.
:param inputs: input tensors.
"""
batch_size = tf.shape(inputs)[0]
# h0 = [B, U]
self._bounder_state_h0 = tf.zeros([batch_size, self._units])
# input_x = [L, R, B, C]
input_x = tf.transpose(inputs, [2, 3, 0, 1])
if self._direction == 'rb':
# input_x: [R, L, B, C]
input_x = tf.reverse(input_x, [0, 1])
elif self._direction != 'lt':
raise ValueError(f"Invalid direction. "
f"`{self._direction}` received. "
f"Must be in `lt`, `rb`.")
# input_x = [L*R*B, C]
input_x = tf.reshape(input_x, [-1, self._channel])
# input_x = L*R * [B, C]
input_x = tf.split(
axis=0,
num_or_size_splits=self._text1_maxlen * self._text2_maxlen,
value=input_x
)
# inputs = L*R * [B, C]
inputs = tf.TensorArray(
dtype=tf.float32,
size=self._text1_maxlen * self._text2_maxlen,
name='inputs'
)
inputs = inputs.unstack(input_x)
# states = (L+1)*(R+1) * [B, U]
states = tf.TensorArray(
dtype=tf.float32,
size=(self._text1_maxlen + 1) * (self._text2_maxlen + 1),
name='states',
clear_after_read=False
)
# Initialize states
for i in range(self._text2_maxlen + 1):
states = states.write(i, self._bounder_state_h0)
for i in range(1, self._text1_maxlen + 1):
states = states.write(i * (self._text2_maxlen + 1),
self._bounder_state_h0)
# Calculate h_ij
# h_ij = [B, U]
_, _, _, h_ij = tf.while_loop(
cond=lambda _0, _1, i, _3: tf.less(i, self._recurrent_step),
body=self.calculate_recurrent_unit,
loop_vars=(
inputs,
states,
tf.constant(0, dtype=tf.int32),
self._bounder_state_h0
),
parallel_iterations=1,
swap_memory=True
)
return h_ij
def compute_output_shape(self, input_shape: typing.Any) -> tuple:
"""
Calculate the layer output shape.
:param input_shape: the shapes of the input tensors.
"""
output_shape = [input_shape[0], self._units]
return tuple(output_shape)
@classmethod
def _time_distributed_dense(cls, w, x, b):
x = K.dot(x, w)
x = K.bias_add(x, b)
return x
| 10,175 | 33.969072 | 77 | py |
MatchZoo | MatchZoo-master/matchzoo/contrib/layers/attention_layer.py | """An implementation of Attention Layer for Bimpm model."""
import tensorflow as tf
from keras import backend as K
from keras.engine import Layer
class AttentionLayer(Layer):
"""
Layer that compute attention for BiMPM model.
For detailed information, see Bilateral Multi-Perspective Matching for
Natural Language Sentences, section 3.2.
Reference:
https://github.com/zhiguowang/BiMPM/blob/master/src/layer_utils.py#L145-L196
Examples:
>>> import matchzoo as mz
>>> layer = mz.contrib.layers.AttentionLayer(att_dim=50)
>>> layer.compute_output_shape([(32, 10, 100), (32, 40, 100)])
(32, 10, 40)
"""
def __init__(self,
att_dim: int,
att_type: str = 'default',
dropout_rate: float = 0.0):
"""
class: `AttentionLayer` constructor.
:param att_dim: int
:param att_type: int
"""
super(AttentionLayer, self).__init__()
self._att_dim = att_dim
self._att_type = att_type
self._dropout_rate = dropout_rate
@property
def att_dim(self):
"""Get the attention dimension."""
return self._att_dim
@property
def att_type(self):
"""Get the attention type."""
return self._att_type
def build(self, input_shapes):
"""
Build the layer.
:param input_shapes: input_shape_lt, input_shape_rt
"""
if not isinstance(input_shapes, list):
raise ValueError('A attention layer should be called '
'on a list of inputs.')
hidden_dim_lt = input_shapes[0][2]
hidden_dim_rt = input_shapes[1][2]
self.attn_w1 = self.add_weight(name='attn_w1',
shape=(hidden_dim_lt,
self._att_dim),
initializer='uniform',
trainable=True)
if hidden_dim_lt == hidden_dim_rt:
self.attn_w2 = self.attn_w1
else:
self.attn_w2 = self.add_weight(name='attn_w2',
shape=(hidden_dim_rt,
self._att_dim),
initializer='uniform',
trainable=True)
# diagonal_W: (1, 1, a)
self.diagonal_W = self.add_weight(name='diagonal_W',
shape=(1,
1,
self._att_dim),
initializer='uniform',
trainable=True)
self.built = True
def call(self, x: list, **kwargs):
"""
Calculate attention.
:param x: [reps_lt, reps_rt]
:return attn_prob: [b, s_lt, s_rt]
"""
if not isinstance(x, list):
raise ValueError('A attention layer should be called '
'on a list of inputs.')
reps_lt, reps_rt = x
attn_w1 = self.attn_w1
attn_w1 = tf.expand_dims(tf.expand_dims(attn_w1, axis=0), axis=0)
# => [1, 1, d, a]
reps_lt = tf.expand_dims(reps_lt, axis=-1)
attn_reps_lt = tf.reduce_sum(reps_lt * attn_w1, axis=2)
# => [b, s_lt, d, -1]
attn_w2 = self.attn_w2
attn_w2 = tf.expand_dims(tf.expand_dims(attn_w2, axis=0), axis=0)
# => [1, 1, d, a]
reps_rt = tf.expand_dims(reps_rt, axis=-1)
attn_reps_rt = tf.reduce_sum(reps_rt * attn_w2, axis=2) # [b, s_rt, d, -1]
attn_reps_lt = tf.tanh(attn_reps_lt) # [b, s_lt, a]
attn_reps_rt = tf.tanh(attn_reps_rt) # [b, s_rt, a]
# diagonal_W
attn_reps_lt = attn_reps_lt * self.diagonal_W # [b, s_lt, a]
attn_reps_rt = tf.transpose(attn_reps_rt, (0, 2, 1))
# => [b, a, s_rt]
attn_value = K.batch_dot(attn_reps_lt, attn_reps_rt) # [b, s_lt, s_rt]
# Softmax operation
attn_prob = tf.nn.softmax(attn_value) # [b, s_lt, s_rt]
# TODO(tjf) remove diagonal or not for normalization
# if remove_diagonal: attn_value = attn_value * diagonal
if len(x) == 4:
mask_lt, mask_rt = x[2], x[3]
attn_prob *= tf.expand_dims(mask_lt, axis=2)
attn_prob *= tf.expand_dims(mask_rt, axis=1)
return attn_prob
def compute_output_shape(self, input_shapes):
"""Calculate the layer output shape."""
if not isinstance(input_shapes, list):
raise ValueError('A attention layer should be called '
'on a list of inputs.')
input_shape_lt, input_shape_rt = input_shapes[0], input_shapes[1]
return input_shape_lt[0], input_shape_lt[1], input_shape_rt[1]
| 4,960 | 33.213793 | 83 | py |
MatchZoo | MatchZoo-master/matchzoo/contrib/layers/semantic_composite_layer.py | """An implementation of EncodingModule for DIIN model."""
import tensorflow as tf
from keras import backend as K
from keras.engine import Layer
from matchzoo.contrib.layers import DecayingDropoutLayer
class EncodingLayer(Layer):
"""
Apply a self-attention layer and a semantic composite fuse gate
to compute the encoding result of one tensor.
:param initial_keep_rate: the initial_keep_rate parameter of
DecayingDropoutLayer.
:param decay_interval: the decay_interval parameter of
DecayingDropoutLayer.
:param decay_rate: the decay_rate parameter of DecayingDropoutLayer.
:param kwargs: standard layer keyword arguments.
Example:
>>> import matchzoo as mz
>>> layer = mz.contrib.layers.EncodingLayer(1.0, 10000, 0.977)
>>> num_batch, left_len, num_dim = 5, 32, 10
>>> layer.build([num_batch, left_len, num_dim])
"""
def __init__(self,
initial_keep_rate: float,
decay_interval: int,
decay_rate: float,
**kwargs):
""":class: 'EncodingLayer' constructor."""
super(EncodingLayer, self).__init__(**kwargs)
self._initial_keep_rate = initial_keep_rate
self._decay_interval = decay_interval
self._decay_rate = decay_rate
self._w_itr_att = None
self._w1 = None
self._w2 = None
self._w3 = None
self._b1 = None
self._b2 = None
self._b3 = None
def build(self, input_shape):
"""
Build the layer.
:param input_shape: the shape of the input tensor,
for EncodingLayer we need one input tensor.
"""
d = input_shape[-1]
self._w_itr_att = self.add_weight(
name='w_itr_att', shape=(3 * d,), initializer='glorot_uniform')
self._w1 = self.add_weight(
name='w1', shape=(2 * d, d,), initializer='glorot_uniform')
self._w2 = self.add_weight(
name='w2', shape=(2 * d, d,), initializer='glorot_uniform')
self._w3 = self.add_weight(
name='w3', shape=(2 * d, d,), initializer='glorot_uniform')
self._b1 = self.add_weight(
name='b1', shape=(d,), initializer='zeros')
self._b2 = self.add_weight(
name='b2', shape=(d,), initializer='zeros')
self._b3 = self.add_weight(
name='b3', shape=(d,), initializer='zeros')
super(EncodingLayer, self).build(input_shape)
def call(self, inputs, **kwargs):
"""
The computation logic of EncodingLayer.
:param inputs: an input tensor.
"""
# Scalar dimensions referenced here:
# b = batch size
# p = inputs.shape()[1]
# d = inputs.shape()[2]
# The input shape is [b, p, d]
# shape = [b, 1, p, d]
x = tf.expand_dims(inputs, 1) * 0
# shape = [b, 1, d, p]
x = tf.transpose(x, (0, 1, 3, 2))
# shape = [b, p, d, p]
mid = x + tf.expand_dims(inputs, -1)
# shape = [b, p, d, p]
up = tf.transpose(mid, (0, 3, 2, 1))
# shape = [b, p, 3d, p]
inputs_concat = tf.concat([up, mid, up * mid], axis=2)
# Self-attention layer.
# shape = [b, p, p]
A = K.dot(self._w_itr_att, inputs_concat)
# shape = [b, p, p]
SA = tf.nn.softmax(A, axis=2)
# shape = [b, p, d]
itr_attn = K.batch_dot(SA, inputs)
# Semantic composite fuse gate.
# shape = [b, p, 2d]
inputs_attn_concat = tf.concat([inputs, itr_attn], axis=2)
concat_dropout = DecayingDropoutLayer(
initial_keep_rate=self._initial_keep_rate,
decay_interval=self._decay_interval,
decay_rate=self._decay_rate
)(inputs_attn_concat)
# shape = [b, p, d]
z = tf.tanh(K.dot(concat_dropout, self._w1) + self._b1)
# shape = [b, p, d]
r = tf.sigmoid(K.dot(concat_dropout, self._w2) + self._b2)
# shape = [b, p, d]
f = tf.sigmoid(K.dot(concat_dropout, self._w3) + self._b3)
# shape = [b, p, d]
encoding = r * inputs + f * z
return encoding
| 4,198 | 33.418033 | 75 | py |
MatchZoo | MatchZoo-master/matchzoo/contrib/layers/multi_perspective_layer.py | """An implementation of MultiPerspectiveLayer for Bimpm model."""
import tensorflow as tf
from keras import backend as K
from keras.engine import Layer
from matchzoo.contrib.layers.attention_layer import AttentionLayer
class MultiPerspectiveLayer(Layer):
"""
A keras implementation of multi-perspective layer of BiMPM.
For detailed information, see Bilateral Multi-Perspective
Matching for Natural Language Sentences, section 3.2.
Examples:
>>> import matchzoo as mz
>>> perspective={'full': True, 'max-pooling': True,
... 'attentive': True, 'max-attentive': True}
>>> layer = mz.contrib.layers.MultiPerspectiveLayer(
... att_dim=50, mp_dim=20, perspective=perspective)
>>> layer.compute_output_shape(
... [(32, 10, 100), (32, 50), None, (32, 50), None,
... [(32, 40, 100), (32, 50), None, (32, 50), None]])
(32, 10, 83)
"""
def __init__(self,
att_dim: int,
mp_dim: int,
perspective: dict):
"""Class initialization."""
super(MultiPerspectiveLayer, self).__init__()
self._att_dim = att_dim
self._mp_dim = mp_dim
self._perspective = perspective
@classmethod
def list_available_perspectives(cls) -> list:
"""List available strategy for multi-perspective matching."""
return ['full', 'max-pooling', 'attentive', 'max-attentive']
@property
def num_perspective(self):
"""Get the number of perspectives that is True."""
return sum(self._perspective.values())
def build(self, input_shape: list):
"""Input shape."""
# The shape of the weights is l * d.
if self._perspective.get('full'):
self.full_match = MpFullMatch(self._mp_dim)
if self._perspective.get('max-pooling'):
self.max_pooling_match = MpMaxPoolingMatch(self._mp_dim)
if self._perspective.get('attentive'):
self.attentive_match = MpAttentiveMatch(self._att_dim,
self._mp_dim)
if self._perspective.get('max-attentive'):
self.max_attentive_match = MpMaxAttentiveMatch(self._att_dim)
self.built = True
def call(self, x: list, **kwargs):
"""Call."""
seq_lt, seq_rt = x[:5], x[5:]
# unpack seq_left and seq_right
# all hidden states, last hidden state of forward pass,
# last cell state of forward pass, last hidden state of
# backward pass, last cell state of backward pass.
lstm_reps_lt, forward_h_lt, _, backward_h_lt, _ = seq_lt
lstm_reps_rt, forward_h_rt, _, backward_h_rt, _ = seq_rt
match_tensor_list = []
match_dim = 0
if self._perspective.get('full'):
# Each forward & backward contextual embedding compare
# with the last step of the last time step of the other sentence.
h_lt = tf.concat([forward_h_lt, backward_h_lt], axis=-1)
full_match_tensor = self.full_match([h_lt, lstm_reps_rt])
match_tensor_list.append(full_match_tensor)
match_dim += self._mp_dim + 1
if self._perspective.get('max-pooling'):
# Each contextual embedding compare with each contextual embedding.
# retain the maximum of each dimension.
max_match_tensor = self.max_pooling_match([lstm_reps_lt,
lstm_reps_rt])
match_tensor_list.append(max_match_tensor)
match_dim += self._mp_dim
if self._perspective.get('attentive'):
# Each contextual embedding compare with each contextual embedding.
# retain sum of weighted mean of each dimension.
attentive_tensor = self.attentive_match([lstm_reps_lt,
lstm_reps_rt])
match_tensor_list.append(attentive_tensor)
match_dim += self._mp_dim + 1
if self._perspective.get('max-attentive'):
# Each contextual embedding compare with each contextual embedding.
# retain max of weighted mean of each dimension.
relevancy_matrix = _calc_relevancy_matrix(lstm_reps_lt,
lstm_reps_rt)
max_attentive_tensor = self.max_attentive_match([lstm_reps_lt,
lstm_reps_rt,
relevancy_matrix])
match_tensor_list.append(max_attentive_tensor)
match_dim += self._mp_dim + 1
mp_tensor = tf.concat(match_tensor_list, axis=-1)
return mp_tensor
def compute_output_shape(self, input_shape: list):
"""Compute output shape."""
shape_a = input_shape[0]
match_dim = 0
if self._perspective.get('full'):
match_dim += self._mp_dim + 1
if self._perspective.get('max-pooling'):
match_dim += self._mp_dim
if self._perspective.get('attentive'):
match_dim += self._mp_dim + 1
if self._perspective.get('max-attentive'):
match_dim += self._mp_dim + 1
return shape_a[0], shape_a[1], match_dim
class MpFullMatch(Layer):
"""Mp Full Match Layer."""
def __init__(self, mp_dim):
"""Init."""
super(MpFullMatch, self).__init__()
self.mp_dim = mp_dim
def build(self, input_shapes):
"""Build."""
# input_shape = input_shapes[0]
self.built = True
def call(self, x, **kwargs):
"""Call.
"""
rep_lt, reps_rt = x
att_lt = tf.expand_dims(rep_lt, 1)
match_tensor, match_dim = _multi_perspective_match(self.mp_dim,
reps_rt,
att_lt)
# match_tensor => [b, len_rt, mp_dim+1]
return match_tensor
def compute_output_shape(self, input_shape):
"""Compute output shape."""
return input_shape[1][0], input_shape[1][1], self.mp_dim + 1
class MpMaxPoolingMatch(Layer):
"""MpMaxPoolingMatch."""
def __init__(self, mp_dim):
"""Init."""
super(MpMaxPoolingMatch, self).__init__()
self.mp_dim = mp_dim
def build(self, input_shapes):
"""Build."""
d = input_shapes[0][-1]
self.kernel = self.add_weight(name='kernel',
shape=(1, 1, 1, self.mp_dim, d),
initializer='uniform',
trainable=True)
self.built = True
def call(self, x, **kwargs):
"""Call."""
reps_lt, reps_rt = x
# kernel: [1, 1, 1, mp_dim, d]
# lstm_lt => [b, len_lt, 1, 1, d]
reps_lt = tf.expand_dims(reps_lt, axis=2)
reps_lt = tf.expand_dims(reps_lt, axis=2)
reps_lt = reps_lt * self.kernel
# lstm_rt -> [b, 1, len_rt, 1, d]
reps_rt = tf.expand_dims(reps_rt, axis=2)
reps_rt = tf.expand_dims(reps_rt, axis=1)
match_tensor = _cosine_distance(reps_lt, reps_rt, cosine_norm=False)
max_match_tensor = tf.reduce_max(match_tensor, axis=1)
# match_tensor => [b, len_rt, m]
return max_match_tensor
def compute_output_shape(self, input_shape):
"""Compute output shape."""
return input_shape[1][0], input_shape[1][1], self.mp_dim
class MpAttentiveMatch(Layer):
"""
MpAttentiveMatch Layer.
Reference:
https://github.com/zhiguowang/BiMPM/blob/master/src/match_utils.py#L188-L193
Examples:
>>> import matchzoo as mz
>>> layer = mz.contrib.layers.multi_perspective_layer.MpAttentiveMatch(
... att_dim=30, mp_dim=20)
>>> layer.compute_output_shape([(32, 10, 100), (32, 40, 100)])
(32, 40, 20)
"""
def __init__(self, att_dim, mp_dim):
"""Init."""
super(MpAttentiveMatch, self).__init__()
self.att_dim = att_dim
self.mp_dim = mp_dim
def build(self, input_shapes):
"""Build."""
# input_shape = input_shapes[0]
self.built = True
def call(self, x, **kwargs):
"""Call."""
reps_lt, reps_rt = x[0], x[1]
# attention prob matrix
attention_layer = AttentionLayer(self.att_dim)
attn_prob = attention_layer([reps_rt, reps_lt])
# attention reps
att_lt = K.batch_dot(attn_prob, reps_lt)
# mp match
attn_match_tensor, match_dim = _multi_perspective_match(self.mp_dim,
reps_rt,
att_lt)
return attn_match_tensor
def compute_output_shape(self, input_shape):
"""Compute output shape."""
return input_shape[1][0], input_shape[1][1], self.mp_dim
class MpMaxAttentiveMatch(Layer):
"""MpMaxAttentiveMatch."""
def __init__(self, mp_dim):
"""Init."""
super(MpMaxAttentiveMatch, self).__init__()
self.mp_dim = mp_dim
def build(self, input_shapes):
"""Build."""
# input_shape = input_shapes[0]
self.built = True
def call(self, x):
"""Call."""
reps_lt, reps_rt = x[0], x[1]
relevancy_matrix = x[2]
max_att_lt = cal_max_question_representation(reps_lt, relevancy_matrix)
max_attentive_tensor, match_dim = _multi_perspective_match(self.mp_dim,
reps_rt,
max_att_lt)
return max_attentive_tensor
def cal_max_question_representation(reps_lt, attn_scores):
"""
Calculate max_question_representation.
:param reps_lt: [batch_size, passage_len, hidden_size]
:param attn_scores: []
:return: [batch_size, passage_len, hidden_size].
"""
attn_positions = tf.argmax(attn_scores, axis=2)
max_reps_lt = collect_representation(reps_lt, attn_positions)
return max_reps_lt
def collect_representation(representation, positions):
"""
Collect_representation.
:param representation: [batch_size, node_num, feature_dim]
:param positions: [batch_size, neighbour_num]
:return: [batch_size, neighbour_num]?
"""
return collect_probs(representation, positions)
def collect_final_step_of_lstm(lstm_representation, lengths):
"""
Collect final step of lstm.
:param lstm_representation: [batch_size, len_rt, dim]
:param lengths: [batch_size]
:return: [batch_size, dim]
"""
lengths = tf.maximum(lengths, K.zeros_like(lengths))
batch_size = tf.shape(lengths)[0]
# shape (batch_size)
batch_nums = tf.range(0, limit=batch_size)
# shape (batch_size, 2)
indices = tf.stack((batch_nums, lengths), axis=1)
result = tf.gather_nd(lstm_representation, indices,
name='last-forwar-lstm')
# [batch_size, dim]
return result
def collect_probs(probs, positions):
"""
Collect Probabilities.
Reference:
https://github.com/zhiguowang/BiMPM/blob/master/src/layer_utils.py#L128-L140
:param probs: [batch_size, chunks_size]
:param positions: [batch_size, pair_size]
:return: [batch_size, pair_size]
"""
batch_size = tf.shape(probs)[0]
pair_size = tf.shape(positions)[1]
# shape (batch_size)
batch_nums = K.arange(0, batch_size)
# [batch_size, 1]
batch_nums = tf.reshape(batch_nums, shape=[-1, 1])
# [batch_size, pair_size]
batch_nums = K.tile(batch_nums, [1, pair_size])
# shape (batch_size, pair_size, 2)
# Alert: to solve error message
positions = tf.cast(positions, tf.int32)
indices = tf.stack([batch_nums, positions], axis=2)
pair_probs = tf.gather_nd(probs, indices)
# pair_probs = tf.reshape(pair_probs, shape=[batch_size, pair_size])
return pair_probs
def _multi_perspective_match(mp_dim, reps_rt, att_lt,
with_cosine=True, with_mp_cosine=True):
"""
The core function of zhiguowang's implementation.
reference:
https://github.com/zhiguowang/BiMPM/blob/master/src/match_utils.py#L207-L223
:param mp_dim: about 20
:param reps_rt: [batch, len_rt, dim]
:param att_lt: [batch, len_rt, dim]
:param with_cosine: True
:param with_mp_cosine: True
:return: [batch, len, 1 + mp_dim]
"""
shape_rt = tf.shape(reps_rt)
batch_size = shape_rt[0]
len_lt = shape_rt[1]
match_dim = 0
match_result_list = []
if with_cosine:
cosine_tensor = _cosine_distance(reps_rt, att_lt, False)
cosine_tensor = tf.reshape(cosine_tensor,
[batch_size, len_lt, 1])
match_result_list.append(cosine_tensor)
match_dim += 1
if with_mp_cosine:
mp_cosine_layer = MpCosineLayer(mp_dim)
mp_cosine_tensor = mp_cosine_layer([reps_rt, att_lt])
mp_cosine_tensor = tf.reshape(mp_cosine_tensor,
[batch_size, len_lt, mp_dim])
match_result_list.append(mp_cosine_tensor)
match_dim += mp_cosine_layer.mp_dim
match_result = tf.concat(match_result_list, 2)
return match_result, match_dim
class MpCosineLayer(Layer):
"""
Implementation of Multi-Perspective Cosine Distance.
Reference:
https://github.com/zhiguowang/BiMPM/blob/master/src/match_utils.py#L121-L129
Examples:
>>> import matchzoo as mz
>>> layer = mz.contrib.layers.multi_perspective_layer.MpCosineLayer(
... mp_dim=50)
>>> layer.compute_output_shape([(32, 10, 100), (32, 10, 100)])
(32, 10, 50)
"""
def __init__(self, mp_dim, **kwargs):
"""Init."""
self.mp_dim = mp_dim
super(MpCosineLayer, self).__init__(**kwargs)
def build(self, input_shape):
"""Build."""
self.kernel = self.add_weight(name='kernel',
shape=(1, 1, self.mp_dim,
input_shape[0][-1]),
initializer='uniform',
trainable=True)
super(MpCosineLayer, self).build(input_shape)
def call(self, x, **kwargs):
"""Call."""
v1, v2 = x
v1 = tf.expand_dims(v1, 2) * self.kernel # [b, s_lt, m, d]
v2 = tf.expand_dims(v2, 2) # [b, s_lt, 1, d]
return _cosine_distance(v1, v2, False)
def compute_output_shape(self, input_shape):
"""Compute output shape."""
return input_shape[0][0], input_shape[0][1], self.mp_dim
def _calc_relevancy_matrix(reps_lt, reps_rt):
reps_lt = tf.expand_dims(reps_lt, 1) # [b, 1, len_lt, d]
reps_rt = tf.expand_dims(reps_rt, 2) # [b, len_rt, 1, d]
relevancy_matrix = _cosine_distance(reps_lt, reps_rt)
# => [b, len_rt, len_lt, d]
return relevancy_matrix
def _mask_relevancy_matrix(relevancy_matrix, mask_lt, mask_rt):
"""
Mask relevancy matrix.
:param relevancy_matrix: [b, len_rt, len_lt]
:param mask_lt: [b, len_lt]
:param mask_rt: [b, len_rt]
:return: masked_matrix: [b, len_rt, len_lt]
"""
if mask_lt is not None:
relevancy_matrix = relevancy_matrix * tf.expand_dims(mask_lt, 1)
relevancy_matrix = relevancy_matrix * tf.expand_dims(mask_rt, 2)
return relevancy_matrix
def _cosine_distance(v1, v2, cosine_norm=True, eps=1e-6):
"""
Only requires `tf.reduce_sum(v1 * v2, axis=-1)`.
:param v1: [batch, time_steps(v1), 1, m, d]
:param v2: [batch, 1, time_steps(v2), m, d]
:param cosine_norm: True
:param eps: 1e-6
:return: [batch, time_steps(v1), time_steps(v2), m]
"""
cosine_numerator = tf.reduce_sum(v1 * v2, axis=-1)
if not cosine_norm:
return K.tanh(cosine_numerator)
v1_norm = K.sqrt(tf.maximum(tf.reduce_sum(tf.square(v1), axis=-1), eps))
v2_norm = K.sqrt(tf.maximum(tf.reduce_sum(tf.square(v2), axis=-1), eps))
return cosine_numerator / v1_norm / v2_norm
| 16,251 | 33.652452 | 80 | py |
MatchZoo | MatchZoo-master/matchzoo/contrib/layers/matching_tensor_layer.py | """An implementation of Matching Tensor Layer."""
import typing
import numpy as np
import tensorflow as tf
from keras import backend as K
from keras.engine import Layer
from keras.initializers import constant
class MatchingTensorLayer(Layer):
"""
Layer that captures the basic interactions between two tensors.
:param channels: Number of word interaction tensor channels
:param normalize: Whether to L2-normalize samples along the
dot product axis before taking the dot product.
If set to True, then the output of the dot product
is the cosine proximity between the two samples.
:param init_diag: Whether to initialize the diagonal elements
of the matrix.
:param kwargs: Standard layer keyword arguments.
Examples:
>>> import matchzoo as mz
>>> layer = mz.contrib.layers.MatchingTensorLayer(channels=4,
... normalize=True,
... init_diag=True)
>>> num_batch, left_len, right_len, num_dim = 5, 3, 2, 10
>>> layer.build([[num_batch, left_len, num_dim],
... [num_batch, right_len, num_dim]])
"""
def __init__(self, channels: int = 4, normalize: bool = True,
init_diag: bool = True, **kwargs):
""":class:`MatchingTensorLayer` constructor."""
super().__init__(**kwargs)
self._channels = channels
self._normalize = normalize
self._init_diag = init_diag
self._shape1 = None
self._shape2 = None
def build(self, input_shape: list):
"""
Build the layer.
:param input_shape: the shapes of the input tensors,
for MatchingTensorLayer we need two input tensors.
"""
# Used purely for shape validation.
if not isinstance(input_shape, list) or len(input_shape) != 2:
raise ValueError('A `MatchingTensorLayer` layer should be called '
'on a list of 2 inputs.')
self._shape1 = input_shape[0]
self._shape2 = input_shape[1]
for idx in (0, 2):
if self._shape1[idx] != self._shape2[idx]:
raise ValueError(
'Incompatible dimensions: '
f'{self._shape1[idx]} != {self._shape2[idx]}.'
f'Layer shapes: {self._shape1}, {self._shape2}.'
)
if self._init_diag:
interaction_matrix = np.float32(
np.random.uniform(
-0.05, 0.05,
[self._channels, self._shape1[2], self._shape2[2]]
)
)
for channel_index in range(self._channels):
np.fill_diagonal(interaction_matrix[channel_index], 0.1)
self.interaction_matrix = self.add_weight(
name='interaction_matrix',
shape=(self._channels, self._shape1[2], self._shape2[2]),
initializer=constant(interaction_matrix),
trainable=True
)
else:
self.interaction_matrix = self.add_weight(
name='interaction_matrix',
shape=(self._channels, self._shape1[2], self._shape2[2]),
initializer='uniform',
trainable=True
)
super(MatchingTensorLayer, self).build(input_shape)
def call(self, inputs: list, **kwargs) -> typing.Any:
"""
The computation logic of MatchingTensorLayer.
:param inputs: two input tensors.
"""
x1 = inputs[0]
x2 = inputs[1]
# Normalize x1 and x2
if self._normalize:
x1 = K.l2_normalize(x1, axis=2)
x2 = K.l2_normalize(x2, axis=2)
# b = batch size
# l = length of `x1`
# r = length of `x2`
# d, e = embedding size
# c = number of channels
# output = [b, c, l, r]
output = tf.einsum(
'bld,cde,bre->bclr',
x1, self.interaction_matrix, x2
)
return output
def compute_output_shape(self, input_shape: list) -> tuple:
"""
Calculate the layer output shape.
:param input_shape: the shapes of the input tensors,
for MatchingTensorLayer we need two input tensors.
"""
if not isinstance(input_shape, list) or len(input_shape) != 2:
raise ValueError('A `MatchingTensorLayer` layer should be called '
'on a list of 2 inputs.')
shape1 = list(input_shape[0])
shape2 = list(input_shape[1])
if len(shape1) != 3 or len(shape2) != 3:
raise ValueError('A `MatchingTensorLayer` layer should be called '
'on 2 inputs with 3 dimensions.')
if shape1[0] != shape2[0] or shape1[2] != shape2[2]:
raise ValueError('A `MatchingTensorLayer` layer should be called '
'on 2 inputs with same 0,2 dimensions.')
output_shape = [shape1[0], self._channels, shape1[1], shape2[1]]
return tuple(output_shape)
| 5,169 | 37.014706 | 78 | py |
MatchZoo | MatchZoo-master/matchzoo/losses/rank_hinge_loss.py | """The rank hinge loss."""
import numpy as np
import tensorflow as tf
from keras import layers, backend as K
from keras.losses import Loss
from keras.utils import losses_utils
class RankHingeLoss(Loss):
"""
Rank hinge loss.
Examples:
>>> from keras import backend as K
>>> x_pred = K.variable(np.array([[1.0], [1.2], [0.8], [1.4]]))
>>> x_true = K.variable(np.array([[1], [0], [1], [0]]))
>>> expect = ((1.0 + 1.2 - 1.0) + (1.0 + 1.4 - 0.8)) / 2
>>> expect
1.4
>>> loss = K.eval(RankHingeLoss(num_neg=1, margin=1.0)(x_true, x_pred))
>>> np.isclose(loss, expect)
True
"""
def __init__(self, num_neg: int = 1, margin: float = 1.0):
"""
:class:`RankHingeLoss` constructor.
:param num_neg: number of negative instances in hinge loss.
:param margin: the margin between positive and negative scores.
"""
super().__init__(reduction=losses_utils.Reduction.SUM_OVER_BATCH_SIZE,
name="rank_hinge")
self._num_neg = num_neg
self._margin = margin
def call(self, y_true: np.array, y_pred: np.array,
sample_weight=None) -> np.array:
"""
Calculate rank hinge loss.
:param y_true: Label.
:param y_pred: Predicted result.
:return: Hinge loss computed by user-defined margin.
"""
y_pos = layers.Lambda(lambda a: a[::(self._num_neg + 1), :],
output_shape=(1,))(y_pred)
y_neg = []
for neg_idx in range(self._num_neg):
y_neg.append(
layers.Lambda(
lambda a: a[(neg_idx + 1)::(self._num_neg + 1), :],
output_shape=(1,))(y_pred))
y_neg = tf.concat(y_neg, axis=-1)
y_neg = tf.reduce_mean(y_neg, axis=-1, keepdims=True)
loss = tf.maximum(0., self._margin + y_neg - y_pos)
return losses_utils.compute_weighted_loss(
loss, sample_weight, reduction=self.reduction)
@property
def num_neg(self):
"""`num_neg` getter."""
return self._num_neg
@property
def margin(self):
"""`margin` getter."""
return self._margin
| 2,253 | 30.305556 | 79 | py |
MatchZoo | MatchZoo-master/matchzoo/losses/rank_cross_entropy_loss.py | """The rank cross entropy loss."""
import numpy as np
import tensorflow as tf
from keras import layers, backend as K
from keras.losses import Loss
from keras.utils import losses_utils
class RankCrossEntropyLoss(Loss):
"""
Rank cross entropy loss.
Examples:
>>> from keras import backend as K
>>> softmax = lambda x: np.exp(x)/np.sum(np.exp(x), axis=0)
>>> x_pred = K.variable(np.array([[1.0], [1.2], [0.8]]))
>>> x_true = K.variable(np.array([[1], [0], [0]]))
>>> expect = -np.log(softmax(np.array([[1.0], [1.2], [0.8]])))
>>> loss = K.eval(RankCrossEntropyLoss(num_neg=2)(x_true, x_pred))
>>> np.isclose(loss, expect[0]).all()
True
"""
def __init__(self, num_neg: int = 1):
"""
:class:`RankCrossEntropyLoss` constructor.
:param num_neg: number of negative instances in cross entropy loss.
"""
super().__init__(reduction=losses_utils.Reduction.SUM_OVER_BATCH_SIZE,
name="rank_crossentropy")
self._num_neg = num_neg
def call(self, y_true: np.array, y_pred: np.array,
sample_weight=None) -> np.array:
"""
Calculate rank cross entropy loss.
:param y_true: Label.
:param y_pred: Predicted result.
:return: Crossentropy loss computed by user-defined negative number.
"""
logits = layers.Lambda(lambda a: a[::(self._num_neg + 1), :])(y_pred)
labels = layers.Lambda(lambda a: a[::(self._num_neg + 1), :])(y_true)
logits, labels = [logits], [labels]
for neg_idx in range(self._num_neg):
neg_logits = layers.Lambda(
lambda a: a[neg_idx + 1::(self._num_neg + 1), :])(y_pred)
neg_labels = layers.Lambda(
lambda a: a[neg_idx + 1::(self._num_neg + 1), :])(y_true)
logits.append(neg_logits)
labels.append(neg_labels)
logits = tf.concat(logits, axis=-1)
labels = tf.concat(labels, axis=-1)
smoothed_prob = tf.nn.softmax(logits) + np.finfo(float).eps
loss = -(tf.reduce_sum(labels * tf.math.log(smoothed_prob), axis=-1))
return losses_utils.compute_weighted_loss(
loss, sample_weight, reduction=self.reduction)
@property
def num_neg(self):
"""`num_neg` getter."""
return self._num_neg
| 2,389 | 35.212121 | 78 | py |
MatchZoo | MatchZoo-master/tests/unit_test/test_layers.py | import numpy as np
import pytest
from keras import backend as K
from matchzoo import layers
from matchzoo.contrib.layers import SpatialGRU
from matchzoo.contrib.layers import MatchingTensorLayer
def test_matching_layers():
s1_value = np.array([[[1, 2], [2, 3], [3, 4]],
[[0.1, 0.2], [0.2, 0.3], [0.3, 0.4]]
])
s2_value = np.array([[[1, 2], [2, 3]],
[[0.1, 0.2], [0.2, 0.3]]
])
s3_value = np.array([[[1, 2], [2, 3]],
[[0.1, 0.2], [0.2, 0.3]],
[[0.1, 0.2], [0.2, 0.3]]
])
s1_tensor = K.variable(s1_value)
s2_tensor = K.variable(s2_value)
s3_tensor = K.variable(s3_value)
for matching_type in ['dot', 'mul', 'plus', 'minus', 'concat']:
model = layers.MatchingLayer(matching_type=matching_type)([s1_tensor, s2_tensor])
ret = K.eval(model)
with pytest.raises(ValueError):
layers.MatchingLayer(matching_type='error')
with pytest.raises(ValueError):
layers.MatchingLayer()([s1_tensor, s3_tensor])
def test_spatial_gru():
s_value = K.variable(np.array([[[[1, 2], [2, 3], [3, 4]],
[[4, 5], [5, 6], [6, 7]]],
[[[0.1, 0.2], [0.2, 0.3], [0.3, 0.4]],
[[0.4, 0.5], [0.5, 0.6], [0.6, 0.7]]]]))
for direction in ['lt', 'rb']:
model = SpatialGRU(direction=direction)
_ = K.eval(model(s_value))
with pytest.raises(ValueError):
SpatialGRU(direction='lr')(s_value)
def test_matching_tensor_layer():
s1_value = np.array([[[1, 2], [2, 3], [3, 4]],
[[0.1, 0.2], [0.2, 0.3], [0.3, 0.4]]])
s2_value = np.array([[[1, 2], [2, 3]],
[[0.1, 0.2], [0.2, 0.3]]])
s3_value = np.array([[[1, 2], [2, 3]],
[[0.1, 0.2], [0.2, 0.3]],
[[0.1, 0.2], [0.2, 0.3]]])
s1_tensor = K.variable(s1_value)
s2_tensor = K.variable(s2_value)
s3_tensor = K.variable(s3_value)
for init_diag in [True, False]:
model = MatchingTensorLayer(init_diag=init_diag)
_ = K.eval(model([s1_tensor, s2_tensor]))
with pytest.raises(ValueError):
MatchingTensorLayer()([s1_tensor, s3_tensor])
| 2,369 | 37.852459 | 89 | py |
MatchZoo | MatchZoo-master/tests/unit_test/test_losses.py | import numpy as np
from keras import backend as K
from matchzoo import losses
def test_hinge_loss():
true_value = K.variable(np.array([[1.2], [1],
[1], [1]]))
pred_value = K.variable(np.array([[1.2], [0.1],
[0], [-0.3]]))
expected_loss = (0 + 1 - 0.3 + 0) / 2.0
loss = K.eval(losses.RankHingeLoss()(true_value, pred_value))
assert np.isclose(expected_loss, loss)
expected_loss = (2 + 0.1 - 1.2 + 2 - 0.3 + 0) / 2.0
loss = K.eval(losses.RankHingeLoss(margin=2)(true_value, pred_value))
assert np.isclose(expected_loss, loss)
true_value = K.variable(np.array([[1.2], [1], [0.8],
[1], [1], [0.8]]))
pred_value = K.variable(np.array([[1.2], [0.1], [-0.5],
[0], [0], [-0.3]]))
expected_loss = (0 + 1 - 0.15) / 2.0
loss = K.eval(losses.RankHingeLoss(num_neg=2, margin=1)(
true_value, pred_value))
assert np.isclose(expected_loss, loss)
def test_rank_crossentropy_loss():
losses.neg_num = 1
def softmax(x):
return np.exp(x) / np.sum(np.exp(x), axis=0)
true_value = K.variable(np.array([[1.], [0.],
[0.], [1.]]))
pred_value = K.variable(np.array([[0.8], [0.1],
[0.8], [0.1]]))
expected_loss = (-np.log(softmax([0.8, 0.1])[0]) - np.log(
softmax([0.8, 0.1])[1])) / 2
loss = K.eval(losses.RankCrossEntropyLoss()(true_value, pred_value))
assert np.isclose(expected_loss, loss)
true_value = K.variable(np.array([[1.], [0.], [0.],
[0.], [1.], [0.]]))
pred_value = K.variable(np.array([[0.8], [0.1], [0.1],
[0.8], [0.1], [0.1]]))
expected_loss = (-np.log(softmax([0.8, 0.1, 0.1])[0]) - np.log(
softmax([0.8, 0.1, 0.1])[1])) / 2
loss = K.eval(losses.RankCrossEntropyLoss(num_neg=2)(
true_value, pred_value))
assert np.isclose(expected_loss, loss)
| 2,082 | 39.843137 | 73 | py |
MatchZoo | MatchZoo-master/tests/unit_test/test_data_generator.py | import copy
import pytest
import keras
import matchzoo as mz
@pytest.fixture(scope='module')
def data_gen():
return mz.DataGenerator(mz.datasets.toy.load_data())
@pytest.mark.parametrize('attr', [
'callbacks',
'num_neg',
'num_dup',
'mode',
'batch_size',
'shuffle',
])
def test_data_generator_getters_setters(data_gen, attr):
assert hasattr(data_gen, attr)
val = getattr(data_gen, attr)
setattr(data_gen, attr, val)
assert getattr(data_gen, attr) == val
def test_resample():
model = mz.models.Naive()
prpr = model.get_default_preprocessor()
data_raw = mz.datasets.toy.load_data()
data = prpr.fit_transform(data_raw)
model.params.update(prpr.context)
model.params['task'] = mz.tasks.Ranking()
model.build()
model.compile()
data_gen = mz.DataGenerator(
data_pack=data,
mode='pair',
resample=True,
batch_size=4
)
class CheckResample(keras.callbacks.Callback):
def __init__(self, data_gen):
super().__init__()
self._data_gen = data_gen
self._orig_indices = None
self._flags = []
def on_epoch_end(self, epoch, logs=None):
curr_indices = self._data_gen.batch_indices
if not self._orig_indices:
self._orig_indices = copy.deepcopy(curr_indices)
else:
self._flags.append(self._orig_indices != curr_indices)
self._orig_indices = curr_indices
check_resample = CheckResample(data_gen)
model.fit_generator(data_gen, epochs=5, callbacks=[check_resample])
assert check_resample._flags
assert all(check_resample._flags)
| 1,700 | 24.772727 | 71 | py |
MatchZoo | MatchZoo-master/tests/unit_test/models/test_models.py | """
These tests are simplied because the original verion takes too much time to
run, making CI fails as it reaches the time limit.
"""
import pytest
import copy
from pathlib import Path
import shutil
import matchzoo as mz
from keras.backend import clear_session
@pytest.fixture(scope='module', params=[
mz.tasks.Ranking(loss=mz.losses.RankCrossEntropyLoss(num_neg=2)),
mz.tasks.Classification(num_classes=2),
])
def task(request):
return request.param
@pytest.fixture(scope='module')
def train_raw(task):
return mz.datasets.toy.load_data('train', task)[:5]
@pytest.fixture(scope='module', params=mz.models.list_available())
def model_class(request):
return request.param
@pytest.fixture(scope='module')
def embedding():
return mz.datasets.toy.load_embedding()
@pytest.fixture(scope='module')
def setup(task, model_class, train_raw, embedding):
clear_session() # prevent OOM during CI tests
return mz.auto.prepare(
task=task,
model_class=model_class,
data_pack=train_raw,
embedding=embedding
)
@pytest.fixture(scope='module')
def model(setup):
return setup[0]
@pytest.fixture(scope='module')
def preprocessor(setup):
return setup[1]
@pytest.fixture(scope='module')
def gen_builder(setup):
return setup[2]
@pytest.fixture(scope='module')
def embedding_matrix(setup):
return setup[3]
@pytest.fixture(scope='module')
def data(train_raw, preprocessor, gen_builder):
return gen_builder.build(preprocessor.transform(train_raw))[0]
@pytest.mark.slow
def test_model_fit_eval_predict(model, data):
x, y = data
batch_size = len(x['id_left'])
assert model.fit(x, y, batch_size=batch_size, verbose=0)
assert model.evaluate(x, y, batch_size=batch_size)
assert model.predict(x, batch_size=batch_size) is not None
@pytest.mark.cron
def test_save_load_model(model):
tmpdir = '.matchzoo_test_save_load_tmpdir'
if Path(tmpdir).exists():
shutil.rmtree(tmpdir)
try:
model.save(tmpdir)
assert mz.load_model(tmpdir)
with pytest.raises(FileExistsError):
model.save(tmpdir)
finally:
if Path(tmpdir).exists():
shutil.rmtree(tmpdir)
@pytest.mark.cron
def test_hyper_space(model):
for _ in range(2):
new_params = copy.deepcopy(model.params)
sample = mz.hyper_spaces.sample(new_params.hyper_space)
for key, value in sample.items():
new_params[key] = value
new_model = new_params['model_class'](params=new_params)
new_model.build()
new_model.compile()
| 2,603 | 22.889908 | 75 | py |
MatchZoo | MatchZoo-master/docs/source/conf.py | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
sys.path.insert(0, os.path.abspath('../../matchzoo'))
sys.path.insert(0, os.path.abspath('../../matchzoo/auto'))
sys.path.insert(0, os.path.abspath('../../matchzoo/data_generator'))
sys.path.insert(0, os.path.abspath('../../matchzoo/data_pack'))
sys.path.insert(0, os.path.abspath('../../matchzoo/datasets'))
sys.path.insert(0, os.path.abspath('../../matchzoo/embedding'))
sys.path.insert(0, os.path.abspath('../../matchzoo/engine'))
sys.path.insert(0, os.path.abspath('../../matchzoo/layers'))
sys.path.insert(0, os.path.abspath('../../matchzoo/losses'))
sys.path.insert(0, os.path.abspath('../../matchzoo/metrics'))
sys.path.insert(0, os.path.abspath('../../matchzoo/models'))
sys.path.insert(0, os.path.abspath('../../matchzoo/preprocessors'))
sys.path.insert(0, os.path.abspath('../../matchzoo/tasks'))
sys.path.insert(0, os.path.abspath('../../matchzoo/utils'))
# -- Project information -----------------------------------------------------
project = 'MatchZoo'
copyright = '2018, MatchZoo'
author = 'MatchZoo'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '2.1'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'sphinx_autodoc_typehints',
'nbsphinx',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# from recommonmark.parser import CommonMarkParser
# source_parsers = {
# '.md':CommonMarkParser
# }
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store','**.ipynb_checkpoints']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'MatchZoodoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'MatchZoo.tex', 'MatchZoo Documentation',
'MatchZoo', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'matchzoo', 'MatchZoo Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'MatchZoo', 'MatchZoo Documentation',
author, 'MatchZoo', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
| 5,927 | 31.751381 | 79 | py |
Analyzing-the-Generalization-Capability-of-SGLD-Using-Properties-of-Gaussian-Channels | Analyzing-the-Generalization-Capability-of-SGLD-Using-Properties-of-Gaussian-Channels-main/code/main.py | import numpy as np
import torch
import math
from torch import nn, optim
from torch.utils.data import DataLoader
from torch.utils.data import SubsetRandomSampler
import importlib
import copy
import argparse
from torchvision import transforms, datasets
from torch.autograd import Variable
from torch.optim import Optimizer
from torch.optim.sgd import SGD
from torch.nn.utils import clip_grad_norm_
class CIFAR10RandomLabels(datasets.CIFAR10):
def __init__(self, corrupt_prob=0.0, num_classes=10, **kwargs):
super(CIFAR10RandomLabels, self).__init__(**kwargs)
self.n_classes = num_classes
if corrupt_prob > 0:
self.corrupt_labels(corrupt_prob)
def corrupt_labels(self, corrupt_prob):
labels = np.array(self.targets)
np.random.seed(12345)
mask = np.random.rand(len(labels)) <= corrupt_prob
rnd_labels = np.random.choice(self.n_classes, mask.sum())
labels[mask] = rnd_labels
labels = [int(x) for x in labels]
self.targets = labels
class MNISTRandomLabels(datasets.MNIST):
def __init__(self, corrupt_prob=0.0, num_classes=10, **kwargs):
super(MNISTRandomLabels, self).__init__(**kwargs)
self.n_classes = num_classes
if corrupt_prob > 0:
self.corrupt_labels(corrupt_prob)
def corrupt_labels(self, corrupt_prob):
labels = np.array(self.targets)
np.random.seed(12345)
mask = np.random.rand(len(labels)) <= corrupt_prob
rnd_labels = np.random.choice(self.n_classes, mask.sum())
labels[mask] = rnd_labels
labels = [int(x) for x in labels]
self.targets = labels
class Langevin_SGD(Optimizer):
def __init__(self, params, lr, weight_decay=0, nesterov=False, beta=1, K=100, D=50, sigma=0.5, decay_rate = 0.96, decay_steps=2000):
if lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if weight_decay < 0.0:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, weight_decay=weight_decay)
self.beta = beta
self.K = K
self.D = D
self.lr = lr
self.sigma = sigma
self.decay_rate = decay_rate
self.decay_steps = decay_steps
self.steps = 0
super(Langevin_SGD, self).__init__(params, defaults)
def __setstate__(self, state):
super(SGD, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('nesterov', False)
def step(self, closure=None):
self.beta = 4 * self.lr / ((0.002*math.sqrt(2) * self.lr)**2)
gradient = []
for group in self.param_groups:
weight_decay = group['weight_decay']
clip_grad_norm_(group['params'], self.K, norm_type=2)
for p in group['params']:
if p.grad is None:
continue
d_p = p.grad.data
if len(p.shape) == 1 and p.shape[0] == 1:
p.data.add_(-self.lr, d_p)
else:
if weight_decay != 0:
d_p.add_(weight_decay, p.data)
unit_noise = Variable(p.data.new(p.size()).normal_())
p.data.add_(-self.lr, d_p)
p.data.add_((2*self.lr/self.beta)**0.5, unit_noise)
if torch.norm(p.data).item() >= self.D/2:
p.data = p.data / torch.norm(p.data) * (self.D/2)
gradient = gradient + (d_p.cpu().numpy().flatten().tolist())
if (self.steps > 0 and self.steps % self.decay_steps==0):
self.lr = self.lr * self.decay_rate
self.steps = self.steps + 1
if self.lr < 0.0005:
self.lr = 0.0005
return (np.array(gradient)).flatten()
def train(args, model, device, train_loader, criterion, optimizer, epoch, batchsize, num_batches):
sum_loss, sum_correct = 0, 0
model.train()
gradient_array = np.zeros((num_batches, count_parameters(model)))
for i, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
output = model(data)
loss = criterion(output, target)
pred = output.max(1)[1]
sum_correct += pred.eq(target).sum().item()
sum_loss += len(data) * loss.item()
optimizer.zero_grad()
loss.backward()
gradient = optimizer.step()
gradient_array[i] = gradient
return 1 - (sum_correct / len(train_loader.dataset)), sum_loss / len(train_loader.dataset), np.array(gradient_array)
def validate(args, model, device, val_loader, criterion, optimizer, length=0):
sum_loss, sum_correct = 0, 0
model.eval()
total_grad = []
count = 0
for i, (data, target) in enumerate(val_loader):
count = count + 1
data, target = data.to(device), target.to(device)
output = model(data)
loss = criterion(output, target)
pred = output.max(1)[1]
sum_correct += pred.eq(target).sum().item()
sum_loss += len(data) * criterion(output, target).item()
optimizer.zero_grad()
loss.backward()
gradient = []
params = list(model.parameters())
for p in params:
if p.grad is None:
continue
d_p = p.grad.data
gradient = gradient + (d_p.cpu().numpy().flatten().tolist())
gradient = (np.array(gradient)).flatten()
if (total_grad == []):
total_grad = gradient
else:
total_grad = total_grad + gradient
if (length == 0):
length = len(val_loader.dataset)
return 1 - (sum_correct / length), sum_loss / length, total_grad / count
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def main():
parser = argparse.ArgumentParser(description='Training a fully connected NN with one hidden layer')
parser.add_argument('--no-cuda', default=False, action='store_true',
help='disables CUDA training')
parser.add_argument('--datadir', default='datasets', type=str,
help='path to the directory that contains the datasets (default: datasets)')
parser.add_argument('--dataset', default='CIFAR10', type=str,
help='name of the dataset (options: MNIST | CIFAR10 | CIFAR100 | SVHN, default: CIFAR10)')
parser.add_argument('--model', default='vgg', type=str,
help='architecture (options: fc | vgg, default: vgg)')
parser.add_argument('--epochs', default=1000, type=int,
help='number of epochs to train (default: 1000)')
parser.add_argument('--stopcond', default=0.01, type=float,
help='stopping condtion based on the cross-entropy loss (default: 0.01)')
parser.add_argument('--batchsize', default=64, type=int,
help='input batch size (default: 64)')
parser.add_argument('--learningrate', default=0.05, type=float,
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', default=0.9, type=float,
help='momentum (default: 0.9)')
parser.add_argument('--label_corrupt_prob', default=0, type=float,
help='label_corrupt_prob (default: 0)')
parser.add_argument('--num_sample_path', default=1, type=float,
help='num_sample_path (default: 0)')
args = parser.parse_args()
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
print(device)
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
nchannels, nclasses, img_dim, = 3, 10, 32
if args.dataset == 'MNIST': nchannels = 1
if args.dataset == 'CIFAR100': nclasses = 100
num_sample_path = int(args.num_sample_path)
size_of_training_set = 5000
num_batches = size_of_training_set // args.batchsize
tr_err_list = np.empty((args.epochs, num_sample_path))
tr_loss_list = np.empty((args.epochs, num_sample_path))
val_err_list = np.empty((args.epochs, num_sample_path))
val_loss_list = np.empty((args.epochs, num_sample_path))
variance_list = np.empty((num_batches, args.epochs))
optimizer = None
subset_indices = np.random.choice(50000,size_of_training_set, replace=False)
if args.dataset == 'MNIST':
normalize = transforms.Normalize(mean=[0.131], std=[0.289])
else:
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
transform_train = transforms.Compose([transforms.Resize(32), transforms.ToTensor(), normalize])
transform_test = transforms.Compose([transforms.Resize(32), transforms.ToTensor(), normalize])
if args.dataset == 'MNIST':
train_dataset = MNISTRandomLabels(root='./data', train=True, download=True,
transform=transform_train, num_classes=10,
corrupt_prob=args.label_corrupt_prob)
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batchsize, sampler=subset_indices, shuffle=False, **kwargs)
val_loader = torch.utils.data.DataLoader(
MNISTRandomLabels(root='./data', train=False,
transform=transform_test, num_classes=10,
corrupt_prob=args.label_corrupt_prob), batch_size=args.batchsize, shuffle=False, **kwargs)
else:
train_dataset = CIFAR10RandomLabels(root='./data', train=True, download=True,
transform=transform_train, num_classes=10,
corrupt_prob=args.label_corrupt_prob)
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batchsize, sampler=subset_indices, shuffle=False, **kwargs)
val_loader = torch.utils.data.DataLoader(
CIFAR10RandomLabels(root='./data', train=False,
transform=transform_test, num_classes=10,
corrupt_prob=args.label_corrupt_prob), batch_size=args.batchsize, shuffle=False, **kwargs)
for i in range(num_sample_path):
model = getattr(importlib.import_module('models.{}'.format(args.model)), 'Network')(nchannels, nclasses)
model = model.to(device)
criterion = nn.CrossEntropyLoss().to(device)
optimizer = Langevin_SGD(model.parameters(), args.learningrate)
for epoch in range(0, args.epochs):
tr_err, tr_loss, gradient = train(args, model, device, train_loader, criterion, optimizer, epoch, args.batchsize, num_batches)
tr_err, tr_loss, _ = validate(args, model, device, train_loader, criterion, optimizer, length=size_of_training_set)
val_err, val_loss, _ = validate(args, model, device, val_loader, criterion, optimizer)
tr_err_list[epoch, i] = tr_err
tr_loss_list[epoch, i] = tr_loss
val_err_list[epoch, i] = val_err
val_loss_list[epoch, i] = val_loss
for t in range(gradient.shape[0]):
filename = "gradient/" + str(t) + "_" + str(epoch) + "_" + str(i)
np.save(filename, gradient[t])
print("epoch " + str(epoch+1) + " completed")
print("Sample path " + str(i+1) + " completed")
average_tr_err = np.mean(tr_err_list, axis=1)
average_tr_loss = np.mean(tr_loss_list, axis=1)
average_val_err = np.mean(val_err_list, axis=1)
average_val_loss = np.mean(val_loss_list, axis=1)
for i in range(num_batches):
for j in range(args.epochs):
sum_var = 0
bar = 0
for k in range(num_sample_path):
filename = "gradient/" + str(i) + "_" + str(j) + "_" + str(k) + ".npy"
bar = bar + np.load(filename)
bar = bar / num_sample_path
for k in range(num_sample_path):
filename = "gradient/" + str(i) + "_" + str(j) + "_" + str(k) + ".npy"
foo = np.load(filename)
sum_var = sum_var + np.linalg.norm(foo-bar)**2
sum_var = sum_var/num_sample_path
variance_list[i,j] = sum_var
print('Print the average result from multiple sample path:')
left_hand = []
right_hand = []
surrogate_loss = []
for epoch in range(0, args.epochs):
tr_err = average_tr_err[epoch]
tr_loss = average_tr_loss[epoch]
val_err = average_val_err[epoch]
val_loss = average_val_loss[epoch]
bound_var = 0
for m in range(num_batches):
sum_var = 0
for t in range(m, num_batches * (epoch+1), num_batches):
eta = args.learningrate * (optimizer.decay_rate ** (t // optimizer.decay_steps))
beta = 4 * eta / ((0.002*math.sqrt(2) * eta)**2)
product_var = eta * beta * variance_list[m, t // num_batches]
for t_q in range(t+1, (epoch+1) * num_batches):
if not (t_q % num_batches == m):
product_var = product_var
sum_var = sum_var + product_var
bound_var = bound_var + math.sqrt(sum_var)
bound_var = bound_var * math.sqrt(2 * args.batchsize) * 0.5 / (2.0 * size_of_training_set)
left_hand.append(abs(average_tr_err[epoch] - average_val_err[epoch]))
surrogate_loss.append(abs(average_tr_loss[epoch] - average_val_loss[epoch]))
right_hand.append(bound_var)
print(f'Epoch: {epoch + 1}/{args.epochs}\t Average Training loss: {tr_loss:.8f}', f'Average Training error: {tr_err:.8f}\t Average Validation error: {val_err:.8f}', f'Average Validation loss: {val_loss:.8f}\t Average Bound: {bound_var:.8f}\t')
acc_hand = average_tr_err
np.save("left_hand", left_hand)
np.save("right_hand", right_hand)
np.save("acc", acc_hand)
if __name__ == '__main__':
main()
| 14,060 | 39.059829 | 251 | py |
Analyzing-the-Generalization-Capability-of-SGLD-Using-Properties-of-Gaussian-Channels | Analyzing-the-Generalization-Capability-of-SGLD-Using-Properties-of-Gaussian-Channels-main/code/models/fc.py | import torch.nn as nn
class Network(nn.Module):
def __init__(self, nchannels, nclasses):
super(Network, self).__init__()
self.classifier = nn.Sequential(nn.Linear( nchannels * 32 * 32, 32, bias=True), nn.ReLU(inplace=True),
nn.Linear( 32, 32, bias=True), nn.ReLU(inplace=True),
nn.Linear( 32, nclasses, bias=True))
def forward(self, x):
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
| 525 | 36.571429 | 110 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.