repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
DiffPure | DiffPure-master/score_sde/models/layers.py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: skip-file
"""Common layers for defining score networks.
"""
import math
import string
from functools import partial
import torch.nn as nn
import torch
import torch.nn.functional as F
import numpy as np
from .normalization import ConditionalInstanceNorm2dPlus
def get_act(config):
"""Get activation functions from the config file."""
if config.model.nonlinearity.lower() == 'elu':
return nn.ELU()
elif config.model.nonlinearity.lower() == 'relu':
return nn.ReLU()
elif config.model.nonlinearity.lower() == 'lrelu':
return nn.LeakyReLU(negative_slope=0.2)
elif config.model.nonlinearity.lower() == 'swish':
return nn.SiLU()
else:
raise NotImplementedError('activation function does not exist!')
def ncsn_conv1x1(in_planes, out_planes, stride=1, bias=True, dilation=1, init_scale=1., padding=0):
"""1x1 convolution. Same as NCSNv1/v2."""
conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=bias, dilation=dilation,
padding=padding)
init_scale = 1e-10 if init_scale == 0 else init_scale
conv.weight.data *= init_scale
conv.bias.data *= init_scale
return conv
def variance_scaling(scale, mode, distribution,
in_axis=1, out_axis=0,
dtype=torch.float32,
device='cpu'):
"""Ported from JAX. """
def _compute_fans(shape, in_axis=1, out_axis=0):
receptive_field_size = np.prod(shape) / shape[in_axis] / shape[out_axis]
fan_in = shape[in_axis] * receptive_field_size
fan_out = shape[out_axis] * receptive_field_size
return fan_in, fan_out
def init(shape, dtype=dtype, device=device):
fan_in, fan_out = _compute_fans(shape, in_axis, out_axis)
if mode == "fan_in":
denominator = fan_in
elif mode == "fan_out":
denominator = fan_out
elif mode == "fan_avg":
denominator = (fan_in + fan_out) / 2
else:
raise ValueError(
"invalid mode for variance scaling initializer: {}".format(mode))
variance = scale / denominator
if distribution == "normal":
return torch.randn(*shape, dtype=dtype, device=device) * np.sqrt(variance)
elif distribution == "uniform":
return (torch.rand(*shape, dtype=dtype, device=device) * 2. - 1.) * np.sqrt(3 * variance)
else:
raise ValueError("invalid distribution for variance scaling initializer")
return init
def default_init(scale=1.):
"""The same initialization used in DDPM."""
scale = 1e-10 if scale == 0 else scale
return variance_scaling(scale, 'fan_avg', 'uniform')
class Dense(nn.Module):
"""Linear layer with `default_init`."""
def __init__(self):
super().__init__()
def ddpm_conv1x1(in_planes, out_planes, stride=1, bias=True, init_scale=1., padding=0):
"""1x1 convolution with DDPM initialization."""
conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, padding=padding, bias=bias)
conv.weight.data = default_init(init_scale)(conv.weight.data.shape)
nn.init.zeros_(conv.bias)
return conv
def ncsn_conv3x3(in_planes, out_planes, stride=1, bias=True, dilation=1, init_scale=1., padding=1):
"""3x3 convolution with PyTorch initialization. Same as NCSNv1/NCSNv2."""
init_scale = 1e-10 if init_scale == 0 else init_scale
conv = nn.Conv2d(in_planes, out_planes, stride=stride, bias=bias,
dilation=dilation, padding=padding, kernel_size=3)
conv.weight.data *= init_scale
conv.bias.data *= init_scale
return conv
def ddpm_conv3x3(in_planes, out_planes, stride=1, bias=True, dilation=1, init_scale=1., padding=1):
"""3x3 convolution with DDPM initialization."""
conv = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=padding,
dilation=dilation, bias=bias)
conv.weight.data = default_init(init_scale)(conv.weight.data.shape)
nn.init.zeros_(conv.bias)
return conv
###########################################################################
# Functions below are ported over from the NCSNv1/NCSNv2 codebase:
# https://github.com/ermongroup/ncsn
# https://github.com/ermongroup/ncsnv2
###########################################################################
class CRPBlock(nn.Module):
def __init__(self, features, n_stages, act=nn.ReLU(), maxpool=True):
super().__init__()
self.convs = nn.ModuleList()
for i in range(n_stages):
self.convs.append(ncsn_conv3x3(features, features, stride=1, bias=False))
self.n_stages = n_stages
if maxpool:
self.pool = nn.MaxPool2d(kernel_size=5, stride=1, padding=2)
else:
self.pool = nn.AvgPool2d(kernel_size=5, stride=1, padding=2)
self.act = act
def forward(self, x):
x = self.act(x)
path = x
for i in range(self.n_stages):
path = self.pool(path)
path = self.convs[i](path)
x = path + x
return x
class CondCRPBlock(nn.Module):
def __init__(self, features, n_stages, num_classes, normalizer, act=nn.ReLU()):
super().__init__()
self.convs = nn.ModuleList()
self.norms = nn.ModuleList()
self.normalizer = normalizer
for i in range(n_stages):
self.norms.append(normalizer(features, num_classes, bias=True))
self.convs.append(ncsn_conv3x3(features, features, stride=1, bias=False))
self.n_stages = n_stages
self.pool = nn.AvgPool2d(kernel_size=5, stride=1, padding=2)
self.act = act
def forward(self, x, y):
x = self.act(x)
path = x
for i in range(self.n_stages):
path = self.norms[i](path, y)
path = self.pool(path)
path = self.convs[i](path)
x = path + x
return x
class RCUBlock(nn.Module):
def __init__(self, features, n_blocks, n_stages, act=nn.ReLU()):
super().__init__()
for i in range(n_blocks):
for j in range(n_stages):
setattr(self, '{}_{}_conv'.format(i + 1, j + 1), ncsn_conv3x3(features, features, stride=1, bias=False))
self.stride = 1
self.n_blocks = n_blocks
self.n_stages = n_stages
self.act = act
def forward(self, x):
for i in range(self.n_blocks):
residual = x
for j in range(self.n_stages):
x = self.act(x)
x = getattr(self, '{}_{}_conv'.format(i + 1, j + 1))(x)
x += residual
return x
class CondRCUBlock(nn.Module):
def __init__(self, features, n_blocks, n_stages, num_classes, normalizer, act=nn.ReLU()):
super().__init__()
for i in range(n_blocks):
for j in range(n_stages):
setattr(self, '{}_{}_norm'.format(i + 1, j + 1), normalizer(features, num_classes, bias=True))
setattr(self, '{}_{}_conv'.format(i + 1, j + 1), ncsn_conv3x3(features, features, stride=1, bias=False))
self.stride = 1
self.n_blocks = n_blocks
self.n_stages = n_stages
self.act = act
self.normalizer = normalizer
def forward(self, x, y):
for i in range(self.n_blocks):
residual = x
for j in range(self.n_stages):
x = getattr(self, '{}_{}_norm'.format(i + 1, j + 1))(x, y)
x = self.act(x)
x = getattr(self, '{}_{}_conv'.format(i + 1, j + 1))(x)
x += residual
return x
class MSFBlock(nn.Module):
def __init__(self, in_planes, features):
super().__init__()
assert isinstance(in_planes, list) or isinstance(in_planes, tuple)
self.convs = nn.ModuleList()
self.features = features
for i in range(len(in_planes)):
self.convs.append(ncsn_conv3x3(in_planes[i], features, stride=1, bias=True))
def forward(self, xs, shape):
sums = torch.zeros(xs[0].shape[0], self.features, *shape, device=xs[0].device)
for i in range(len(self.convs)):
h = self.convs[i](xs[i])
h = F.interpolate(h, size=shape, mode='bilinear', align_corners=True)
sums += h
return sums
class CondMSFBlock(nn.Module):
def __init__(self, in_planes, features, num_classes, normalizer):
super().__init__()
assert isinstance(in_planes, list) or isinstance(in_planes, tuple)
self.convs = nn.ModuleList()
self.norms = nn.ModuleList()
self.features = features
self.normalizer = normalizer
for i in range(len(in_planes)):
self.convs.append(ncsn_conv3x3(in_planes[i], features, stride=1, bias=True))
self.norms.append(normalizer(in_planes[i], num_classes, bias=True))
def forward(self, xs, y, shape):
sums = torch.zeros(xs[0].shape[0], self.features, *shape, device=xs[0].device)
for i in range(len(self.convs)):
h = self.norms[i](xs[i], y)
h = self.convs[i](h)
h = F.interpolate(h, size=shape, mode='bilinear', align_corners=True)
sums += h
return sums
class RefineBlock(nn.Module):
def __init__(self, in_planes, features, act=nn.ReLU(), start=False, end=False, maxpool=True):
super().__init__()
assert isinstance(in_planes, tuple) or isinstance(in_planes, list)
self.n_blocks = n_blocks = len(in_planes)
self.adapt_convs = nn.ModuleList()
for i in range(n_blocks):
self.adapt_convs.append(RCUBlock(in_planes[i], 2, 2, act))
self.output_convs = RCUBlock(features, 3 if end else 1, 2, act)
if not start:
self.msf = MSFBlock(in_planes, features)
self.crp = CRPBlock(features, 2, act, maxpool=maxpool)
def forward(self, xs, output_shape):
assert isinstance(xs, tuple) or isinstance(xs, list)
hs = []
for i in range(len(xs)):
h = self.adapt_convs[i](xs[i])
hs.append(h)
if self.n_blocks > 1:
h = self.msf(hs, output_shape)
else:
h = hs[0]
h = self.crp(h)
h = self.output_convs(h)
return h
class CondRefineBlock(nn.Module):
def __init__(self, in_planes, features, num_classes, normalizer, act=nn.ReLU(), start=False, end=False):
super().__init__()
assert isinstance(in_planes, tuple) or isinstance(in_planes, list)
self.n_blocks = n_blocks = len(in_planes)
self.adapt_convs = nn.ModuleList()
for i in range(n_blocks):
self.adapt_convs.append(
CondRCUBlock(in_planes[i], 2, 2, num_classes, normalizer, act)
)
self.output_convs = CondRCUBlock(features, 3 if end else 1, 2, num_classes, normalizer, act)
if not start:
self.msf = CondMSFBlock(in_planes, features, num_classes, normalizer)
self.crp = CondCRPBlock(features, 2, num_classes, normalizer, act)
def forward(self, xs, y, output_shape):
assert isinstance(xs, tuple) or isinstance(xs, list)
hs = []
for i in range(len(xs)):
h = self.adapt_convs[i](xs[i], y)
hs.append(h)
if self.n_blocks > 1:
h = self.msf(hs, y, output_shape)
else:
h = hs[0]
h = self.crp(h, y)
h = self.output_convs(h, y)
return h
class ConvMeanPool(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size=3, biases=True, adjust_padding=False):
super().__init__()
if not adjust_padding:
conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride=1, padding=kernel_size // 2, bias=biases)
self.conv = conv
else:
conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride=1, padding=kernel_size // 2, bias=biases)
self.conv = nn.Sequential(
nn.ZeroPad2d((1, 0, 1, 0)),
conv
)
def forward(self, inputs):
output = self.conv(inputs)
output = sum([output[:, :, ::2, ::2], output[:, :, 1::2, ::2],
output[:, :, ::2, 1::2], output[:, :, 1::2, 1::2]]) / 4.
return output
class MeanPoolConv(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size=3, biases=True):
super().__init__()
self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride=1, padding=kernel_size // 2, bias=biases)
def forward(self, inputs):
output = inputs
output = sum([output[:, :, ::2, ::2], output[:, :, 1::2, ::2],
output[:, :, ::2, 1::2], output[:, :, 1::2, 1::2]]) / 4.
return self.conv(output)
class UpsampleConv(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size=3, biases=True):
super().__init__()
self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride=1, padding=kernel_size // 2, bias=biases)
self.pixelshuffle = nn.PixelShuffle(upscale_factor=2)
def forward(self, inputs):
output = inputs
output = torch.cat([output, output, output, output], dim=1)
output = self.pixelshuffle(output)
return self.conv(output)
class ConditionalResidualBlock(nn.Module):
def __init__(self, input_dim, output_dim, num_classes, resample=1, act=nn.ELU(),
normalization=ConditionalInstanceNorm2dPlus, adjust_padding=False, dilation=None):
super().__init__()
self.non_linearity = act
self.input_dim = input_dim
self.output_dim = output_dim
self.resample = resample
self.normalization = normalization
if resample == 'down':
if dilation > 1:
self.conv1 = ncsn_conv3x3(input_dim, input_dim, dilation=dilation)
self.normalize2 = normalization(input_dim, num_classes)
self.conv2 = ncsn_conv3x3(input_dim, output_dim, dilation=dilation)
conv_shortcut = partial(ncsn_conv3x3, dilation=dilation)
else:
self.conv1 = ncsn_conv3x3(input_dim, input_dim)
self.normalize2 = normalization(input_dim, num_classes)
self.conv2 = ConvMeanPool(input_dim, output_dim, 3, adjust_padding=adjust_padding)
conv_shortcut = partial(ConvMeanPool, kernel_size=1, adjust_padding=adjust_padding)
elif resample is None:
if dilation > 1:
conv_shortcut = partial(ncsn_conv3x3, dilation=dilation)
self.conv1 = ncsn_conv3x3(input_dim, output_dim, dilation=dilation)
self.normalize2 = normalization(output_dim, num_classes)
self.conv2 = ncsn_conv3x3(output_dim, output_dim, dilation=dilation)
else:
conv_shortcut = nn.Conv2d
self.conv1 = ncsn_conv3x3(input_dim, output_dim)
self.normalize2 = normalization(output_dim, num_classes)
self.conv2 = ncsn_conv3x3(output_dim, output_dim)
else:
raise Exception('invalid resample value')
if output_dim != input_dim or resample is not None:
self.shortcut = conv_shortcut(input_dim, output_dim)
self.normalize1 = normalization(input_dim, num_classes)
def forward(self, x, y):
output = self.normalize1(x, y)
output = self.non_linearity(output)
output = self.conv1(output)
output = self.normalize2(output, y)
output = self.non_linearity(output)
output = self.conv2(output)
if self.output_dim == self.input_dim and self.resample is None:
shortcut = x
else:
shortcut = self.shortcut(x)
return shortcut + output
class ResidualBlock(nn.Module):
def __init__(self, input_dim, output_dim, resample=None, act=nn.ELU(),
normalization=nn.InstanceNorm2d, adjust_padding=False, dilation=1):
super().__init__()
self.non_linearity = act
self.input_dim = input_dim
self.output_dim = output_dim
self.resample = resample
self.normalization = normalization
if resample == 'down':
if dilation > 1:
self.conv1 = ncsn_conv3x3(input_dim, input_dim, dilation=dilation)
self.normalize2 = normalization(input_dim)
self.conv2 = ncsn_conv3x3(input_dim, output_dim, dilation=dilation)
conv_shortcut = partial(ncsn_conv3x3, dilation=dilation)
else:
self.conv1 = ncsn_conv3x3(input_dim, input_dim)
self.normalize2 = normalization(input_dim)
self.conv2 = ConvMeanPool(input_dim, output_dim, 3, adjust_padding=adjust_padding)
conv_shortcut = partial(ConvMeanPool, kernel_size=1, adjust_padding=adjust_padding)
elif resample is None:
if dilation > 1:
conv_shortcut = partial(ncsn_conv3x3, dilation=dilation)
self.conv1 = ncsn_conv3x3(input_dim, output_dim, dilation=dilation)
self.normalize2 = normalization(output_dim)
self.conv2 = ncsn_conv3x3(output_dim, output_dim, dilation=dilation)
else:
# conv_shortcut = nn.Conv2d ### Something wierd here.
conv_shortcut = partial(ncsn_conv1x1)
self.conv1 = ncsn_conv3x3(input_dim, output_dim)
self.normalize2 = normalization(output_dim)
self.conv2 = ncsn_conv3x3(output_dim, output_dim)
else:
raise Exception('invalid resample value')
if output_dim != input_dim or resample is not None:
self.shortcut = conv_shortcut(input_dim, output_dim)
self.normalize1 = normalization(input_dim)
def forward(self, x):
output = self.normalize1(x)
output = self.non_linearity(output)
output = self.conv1(output)
output = self.normalize2(output)
output = self.non_linearity(output)
output = self.conv2(output)
if self.output_dim == self.input_dim and self.resample is None:
shortcut = x
else:
shortcut = self.shortcut(x)
return shortcut + output
###########################################################################
# Functions below are ported over from the DDPM codebase:
# https://github.com/hojonathanho/diffusion/blob/master/diffusion_tf/nn.py
###########################################################################
def get_timestep_embedding(timesteps, embedding_dim, max_positions=10000):
assert len(timesteps.shape) == 1 # and timesteps.dtype == tf.int32
half_dim = embedding_dim // 2
# magic number 10000 is from transformers
emb = math.log(max_positions) / (half_dim - 1)
# emb = math.log(2.) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float32, device=timesteps.device) * -emb)
# emb = tf.range(num_embeddings, dtype=jnp.float32)[:, None] * emb[None, :]
# emb = tf.cast(timesteps, dtype=jnp.float32)[:, None] * emb[None, :]
emb = timesteps.float()[:, None] * emb[None, :]
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
if embedding_dim % 2 == 1: # zero pad
emb = F.pad(emb, (0, 1), mode='constant')
assert emb.shape == (timesteps.shape[0], embedding_dim)
return emb
def _einsum(a, b, c, x, y):
einsum_str = '{},{}->{}'.format(''.join(a), ''.join(b), ''.join(c))
return torch.einsum(einsum_str, x, y)
def contract_inner(x, y):
"""tensordot(x, y, 1)."""
x_chars = list(string.ascii_lowercase[:len(x.shape)])
y_chars = list(string.ascii_lowercase[len(x.shape):len(y.shape) + len(x.shape)])
y_chars[0] = x_chars[-1] # first axis of y and last of x get summed
out_chars = x_chars[:-1] + y_chars[1:]
return _einsum(x_chars, y_chars, out_chars, x, y)
class NIN(nn.Module):
def __init__(self, in_dim, num_units, init_scale=0.1):
super().__init__()
self.W = nn.Parameter(default_init(scale=init_scale)((in_dim, num_units)), requires_grad=True)
self.b = nn.Parameter(torch.zeros(num_units), requires_grad=True)
def forward(self, x):
x = x.permute(0, 2, 3, 1)
y = contract_inner(x, self.W) + self.b
return y.permute(0, 3, 1, 2)
class AttnBlock(nn.Module):
"""Channel-wise self-attention block."""
def __init__(self, channels):
super().__init__()
self.GroupNorm_0 = nn.GroupNorm(num_groups=32, num_channels=channels, eps=1e-6)
self.NIN_0 = NIN(channels, channels)
self.NIN_1 = NIN(channels, channels)
self.NIN_2 = NIN(channels, channels)
self.NIN_3 = NIN(channels, channels, init_scale=0.)
def forward(self, x):
B, C, H, W = x.shape
h = self.GroupNorm_0(x)
q = self.NIN_0(h)
k = self.NIN_1(h)
v = self.NIN_2(h)
w = torch.einsum('bchw,bcij->bhwij', q, k) * (int(C) ** (-0.5))
w = torch.reshape(w, (B, H, W, H * W))
w = F.softmax(w, dim=-1)
w = torch.reshape(w, (B, H, W, H, W))
h = torch.einsum('bhwij,bcij->bchw', w, v)
h = self.NIN_3(h)
return x + h
class Upsample(nn.Module):
def __init__(self, channels, with_conv=False):
super().__init__()
if with_conv:
self.Conv_0 = ddpm_conv3x3(channels, channels)
self.with_conv = with_conv
def forward(self, x):
B, C, H, W = x.shape
h = F.interpolate(x, (H * 2, W * 2), mode='nearest')
if self.with_conv:
h = self.Conv_0(h)
return h
class Downsample(nn.Module):
def __init__(self, channels, with_conv=False):
super().__init__()
if with_conv:
self.Conv_0 = ddpm_conv3x3(channels, channels, stride=2, padding=0)
self.with_conv = with_conv
def forward(self, x):
B, C, H, W = x.shape
# Emulate 'SAME' padding
if self.with_conv:
x = F.pad(x, (0, 1, 0, 1))
x = self.Conv_0(x)
else:
x = F.avg_pool2d(x, kernel_size=2, stride=2, padding=0)
assert x.shape == (B, C, H // 2, W // 2)
return x
class ResnetBlockDDPM(nn.Module):
"""The ResNet Blocks used in DDPM."""
def __init__(self, act, in_ch, out_ch=None, temb_dim=None, conv_shortcut=False, dropout=0.1):
super().__init__()
if out_ch is None:
out_ch = in_ch
self.GroupNorm_0 = nn.GroupNorm(num_groups=32, num_channels=in_ch, eps=1e-6)
self.act = act
self.Conv_0 = ddpm_conv3x3(in_ch, out_ch)
if temb_dim is not None:
self.Dense_0 = nn.Linear(temb_dim, out_ch)
self.Dense_0.weight.data = default_init()(self.Dense_0.weight.data.shape)
nn.init.zeros_(self.Dense_0.bias)
self.GroupNorm_1 = nn.GroupNorm(num_groups=32, num_channels=out_ch, eps=1e-6)
self.Dropout_0 = nn.Dropout(dropout)
self.Conv_1 = ddpm_conv3x3(out_ch, out_ch, init_scale=0.)
if in_ch != out_ch:
if conv_shortcut:
self.Conv_2 = ddpm_conv3x3(in_ch, out_ch)
else:
self.NIN_0 = NIN(in_ch, out_ch)
self.out_ch = out_ch
self.in_ch = in_ch
self.conv_shortcut = conv_shortcut
def forward(self, x, temb=None):
B, C, H, W = x.shape
assert C == self.in_ch
out_ch = self.out_ch if self.out_ch else self.in_ch
h = self.act(self.GroupNorm_0(x))
h = self.Conv_0(h)
# Add bias to each feature map conditioned on the time embedding
if temb is not None:
h += self.Dense_0(self.act(temb))[:, :, None, None]
h = self.act(self.GroupNorm_1(h))
h = self.Dropout_0(h)
h = self.Conv_1(h)
if C != out_ch:
if self.conv_shortcut:
x = self.Conv_2(x)
else:
x = self.NIN_0(x)
return x + h | 22,687 | 33.271903 | 112 | py |
DiffPure | DiffPure-master/score_sde/models/ddpm.py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: skip-file
"""DDPM model.
This code is the pytorch equivalent of:
https://github.com/hojonathanho/diffusion/blob/master/diffusion_tf/models/unet.py
"""
import torch
import torch.nn as nn
import functools
from . import utils, layers, normalization
RefineBlock = layers.RefineBlock
ResidualBlock = layers.ResidualBlock
ResnetBlockDDPM = layers.ResnetBlockDDPM
Upsample = layers.Upsample
Downsample = layers.Downsample
conv3x3 = layers.ddpm_conv3x3
get_act = layers.get_act
get_normalization = normalization.get_normalization
default_initializer = layers.default_init
@utils.register_model(name='ddpm')
class DDPM(nn.Module):
def __init__(self, config):
super().__init__()
self.act = act = get_act(config)
self.register_buffer('sigmas', torch.tensor(utils.get_sigmas(config)))
self.nf = nf = config.model.nf
ch_mult = config.model.ch_mult
self.num_res_blocks = num_res_blocks = config.model.num_res_blocks
self.attn_resolutions = attn_resolutions = config.model.attn_resolutions
dropout = config.model.dropout
resamp_with_conv = config.model.resamp_with_conv
self.num_resolutions = num_resolutions = len(ch_mult)
self.all_resolutions = all_resolutions = [config.data.image_size // (2 ** i) for i in range(num_resolutions)]
AttnBlock = functools.partial(layers.AttnBlock)
self.conditional = conditional = config.model.conditional
ResnetBlock = functools.partial(ResnetBlockDDPM, act=act, temb_dim=4 * nf, dropout=dropout)
if conditional:
# Condition on noise levels.
modules = [nn.Linear(nf, nf * 4)]
modules[0].weight.data = default_initializer()(modules[0].weight.data.shape)
nn.init.zeros_(modules[0].bias)
modules.append(nn.Linear(nf * 4, nf * 4))
modules[1].weight.data = default_initializer()(modules[1].weight.data.shape)
nn.init.zeros_(modules[1].bias)
self.centered = config.data.centered
channels = config.data.num_channels
# Downsampling block
modules.append(conv3x3(channels, nf))
hs_c = [nf]
in_ch = nf
for i_level in range(num_resolutions):
# Residual blocks for this resolution
for i_block in range(num_res_blocks):
out_ch = nf * ch_mult[i_level]
modules.append(ResnetBlock(in_ch=in_ch, out_ch=out_ch))
in_ch = out_ch
if all_resolutions[i_level] in attn_resolutions:
modules.append(AttnBlock(channels=in_ch))
hs_c.append(in_ch)
if i_level != num_resolutions - 1:
modules.append(Downsample(channels=in_ch, with_conv=resamp_with_conv))
hs_c.append(in_ch)
in_ch = hs_c[-1]
modules.append(ResnetBlock(in_ch=in_ch))
modules.append(AttnBlock(channels=in_ch))
modules.append(ResnetBlock(in_ch=in_ch))
# Upsampling block
for i_level in reversed(range(num_resolutions)):
for i_block in range(num_res_blocks + 1):
out_ch = nf * ch_mult[i_level]
modules.append(ResnetBlock(in_ch=in_ch + hs_c.pop(), out_ch=out_ch))
in_ch = out_ch
if all_resolutions[i_level] in attn_resolutions:
modules.append(AttnBlock(channels=in_ch))
if i_level != 0:
modules.append(Upsample(channels=in_ch, with_conv=resamp_with_conv))
assert not hs_c
modules.append(nn.GroupNorm(num_channels=in_ch, num_groups=32, eps=1e-6))
modules.append(conv3x3(in_ch, channels, init_scale=0.))
self.all_modules = nn.ModuleList(modules)
self.scale_by_sigma = config.model.scale_by_sigma
def forward(self, x, labels):
modules = self.all_modules
m_idx = 0
if self.conditional:
# timestep/scale embedding
timesteps = labels
temb = layers.get_timestep_embedding(timesteps, self.nf)
temb = modules[m_idx](temb)
m_idx += 1
temb = modules[m_idx](self.act(temb))
m_idx += 1
else:
temb = None
if self.centered:
# Input is in [-1, 1]
h = x
else:
# Input is in [0, 1]
h = 2 * x - 1.
# Downsampling block
hs = [modules[m_idx](h)]
m_idx += 1
for i_level in range(self.num_resolutions):
# Residual blocks for this resolution
for i_block in range(self.num_res_blocks):
h = modules[m_idx](hs[-1], temb)
m_idx += 1
if h.shape[-1] in self.attn_resolutions:
h = modules[m_idx](h)
m_idx += 1
hs.append(h)
if i_level != self.num_resolutions - 1:
hs.append(modules[m_idx](hs[-1]))
m_idx += 1
h = hs[-1]
h = modules[m_idx](h, temb)
m_idx += 1
h = modules[m_idx](h)
m_idx += 1
h = modules[m_idx](h, temb)
m_idx += 1
# Upsampling block
for i_level in reversed(range(self.num_resolutions)):
for i_block in range(self.num_res_blocks + 1):
h = modules[m_idx](torch.cat([h, hs.pop()], dim=1), temb)
m_idx += 1
if h.shape[-1] in self.attn_resolutions:
h = modules[m_idx](h)
m_idx += 1
if i_level != 0:
h = modules[m_idx](h)
m_idx += 1
assert not hs
h = self.act(modules[m_idx](h))
m_idx += 1
h = modules[m_idx](h)
m_idx += 1
assert m_idx == len(modules)
if self.scale_by_sigma:
# Divide the output by sigmas. Useful for training with the NCSN loss.
# The DDPM loss scales the network output by sigma in the loss function,
# so no need of doing it here.
used_sigmas = self.sigmas[labels, None, None, None]
h = h / used_sigmas
return h
| 6,082 | 32.423077 | 113 | py |
DiffPure | DiffPure-master/score_sde/models/ncsnv2.py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: skip-file
"""The NCSNv2 model."""
import torch
import torch.nn as nn
import functools
from .utils import get_sigmas, register_model
from .layers import (CondRefineBlock, RefineBlock, ResidualBlock, ncsn_conv3x3,
ConditionalResidualBlock, get_act)
from .normalization import get_normalization
CondResidualBlock = ConditionalResidualBlock
conv3x3 = ncsn_conv3x3
def get_network(config):
if config.data.image_size < 96:
return functools.partial(NCSNv2, config=config)
elif 96 <= config.data.image_size <= 128:
return functools.partial(NCSNv2_128, config=config)
elif 128 < config.data.image_size <= 256:
return functools.partial(NCSNv2_256, config=config)
else:
raise NotImplementedError(
f'No network suitable for {config.data.image_size}px implemented yet.')
@register_model(name='ncsnv2_64')
class NCSNv2(nn.Module):
def __init__(self, config):
super().__init__()
self.centered = config.data.centered
self.norm = get_normalization(config)
self.nf = nf = config.model.nf
self.act = act = get_act(config)
self.register_buffer('sigmas', torch.tensor(get_sigmas(config)))
self.config = config
self.begin_conv = nn.Conv2d(config.data.channels, nf, 3, stride=1, padding=1)
self.normalizer = self.norm(nf, config.model.num_scales)
self.end_conv = nn.Conv2d(nf, config.data.channels, 3, stride=1, padding=1)
self.res1 = nn.ModuleList([
ResidualBlock(self.nf, self.nf, resample=None, act=act,
normalization=self.norm),
ResidualBlock(self.nf, self.nf, resample=None, act=act,
normalization=self.norm)]
)
self.res2 = nn.ModuleList([
ResidualBlock(self.nf, 2 * self.nf, resample='down', act=act,
normalization=self.norm),
ResidualBlock(2 * self.nf, 2 * self.nf, resample=None, act=act,
normalization=self.norm)]
)
self.res3 = nn.ModuleList([
ResidualBlock(2 * self.nf, 2 * self.nf, resample='down', act=act,
normalization=self.norm, dilation=2),
ResidualBlock(2 * self.nf, 2 * self.nf, resample=None, act=act,
normalization=self.norm, dilation=2)]
)
if config.data.image_size == 28:
self.res4 = nn.ModuleList([
ResidualBlock(2 * self.nf, 2 * self.nf, resample='down', act=act,
normalization=self.norm, adjust_padding=True, dilation=4),
ResidualBlock(2 * self.nf, 2 * self.nf, resample=None, act=act,
normalization=self.norm, dilation=4)]
)
else:
self.res4 = nn.ModuleList([
ResidualBlock(2 * self.nf, 2 * self.nf, resample='down', act=act,
normalization=self.norm, adjust_padding=False, dilation=4),
ResidualBlock(2 * self.nf, 2 * self.nf, resample=None, act=act,
normalization=self.norm, dilation=4)]
)
self.refine1 = RefineBlock([2 * self.nf], 2 * self.nf, act=act, start=True)
self.refine2 = RefineBlock([2 * self.nf, 2 * self.nf], 2 * self.nf, act=act)
self.refine3 = RefineBlock([2 * self.nf, 2 * self.nf], self.nf, act=act)
self.refine4 = RefineBlock([self.nf, self.nf], self.nf, act=act, end=True)
def _compute_cond_module(self, module, x):
for m in module:
x = m(x)
return x
def forward(self, x, y):
if not self.centered:
h = 2 * x - 1.
else:
h = x
output = self.begin_conv(h)
layer1 = self._compute_cond_module(self.res1, output)
layer2 = self._compute_cond_module(self.res2, layer1)
layer3 = self._compute_cond_module(self.res3, layer2)
layer4 = self._compute_cond_module(self.res4, layer3)
ref1 = self.refine1([layer4], layer4.shape[2:])
ref2 = self.refine2([layer3, ref1], layer3.shape[2:])
ref3 = self.refine3([layer2, ref2], layer2.shape[2:])
output = self.refine4([layer1, ref3], layer1.shape[2:])
output = self.normalizer(output)
output = self.act(output)
output = self.end_conv(output)
used_sigmas = self.sigmas[y].view(x.shape[0], *([1] * len(x.shape[1:])))
output = output / used_sigmas
return output
@register_model(name='ncsn')
class NCSN(nn.Module):
def __init__(self, config):
super().__init__()
self.centered = config.data.centered
self.norm = get_normalization(config)
self.nf = nf = config.model.nf
self.act = act = get_act(config)
self.config = config
self.begin_conv = nn.Conv2d(config.data.channels, nf, 3, stride=1, padding=1)
self.normalizer = self.norm(nf, config.model.num_scales)
self.end_conv = nn.Conv2d(nf, config.data.channels, 3, stride=1, padding=1)
self.res1 = nn.ModuleList([
ConditionalResidualBlock(self.nf, self.nf, config.model.num_scales, resample=None, act=act,
normalization=self.norm),
ConditionalResidualBlock(self.nf, self.nf, config.model.num_scales, resample=None, act=act,
normalization=self.norm)]
)
self.res2 = nn.ModuleList([
ConditionalResidualBlock(self.nf, 2 * self.nf, config.model.num_scales, resample='down', act=act,
normalization=self.norm),
ConditionalResidualBlock(2 * self.nf, 2 * self.nf, config.model.num_scales, resample=None, act=act,
normalization=self.norm)]
)
self.res3 = nn.ModuleList([
ConditionalResidualBlock(2 * self.nf, 2 * self.nf, config.model.num_scales, resample='down', act=act,
normalization=self.norm, dilation=2),
ConditionalResidualBlock(2 * self.nf, 2 * self.nf, config.model.num_scales, resample=None, act=act,
normalization=self.norm, dilation=2)]
)
if config.data.image_size == 28:
self.res4 = nn.ModuleList([
ConditionalResidualBlock(2 * self.nf, 2 * self.nf, config.model.num_scales, resample='down', act=act,
normalization=self.norm, adjust_padding=True, dilation=4),
ConditionalResidualBlock(2 * self.nf, 2 * self.nf, config.model.num_scales, resample=None, act=act,
normalization=self.norm, dilation=4)]
)
else:
self.res4 = nn.ModuleList([
ConditionalResidualBlock(2 * self.nf, 2 * self.nf, config.model.num_scales, resample='down', act=act,
normalization=self.norm, adjust_padding=False, dilation=4),
ConditionalResidualBlock(2 * self.nf, 2 * self.nf, config.model.num_scales, resample=None, act=act,
normalization=self.norm, dilation=4)]
)
self.refine1 = CondRefineBlock([2 * self.nf], 2 * self.nf, config.model.num_scales, self.norm, act=act, start=True)
self.refine2 = CondRefineBlock([2 * self.nf, 2 * self.nf], 2 * self.nf, config.model.num_scales, self.norm, act=act)
self.refine3 = CondRefineBlock([2 * self.nf, 2 * self.nf], self.nf, config.model.num_scales, self.norm, act=act)
self.refine4 = CondRefineBlock([self.nf, self.nf], self.nf, config.model.num_scales, self.norm, act=act, end=True)
def _compute_cond_module(self, module, x, y):
for m in module:
x = m(x, y)
return x
def forward(self, x, y):
if not self.centered:
h = 2 * x - 1.
else:
h = x
output = self.begin_conv(h)
layer1 = self._compute_cond_module(self.res1, output, y)
layer2 = self._compute_cond_module(self.res2, layer1, y)
layer3 = self._compute_cond_module(self.res3, layer2, y)
layer4 = self._compute_cond_module(self.res4, layer3, y)
ref1 = self.refine1([layer4], y, layer4.shape[2:])
ref2 = self.refine2([layer3, ref1], y, layer3.shape[2:])
ref3 = self.refine3([layer2, ref2], y, layer2.shape[2:])
output = self.refine4([layer1, ref3], y, layer1.shape[2:])
output = self.normalizer(output, y)
output = self.act(output)
output = self.end_conv(output)
return output
@register_model(name='ncsnv2_128')
class NCSNv2_128(nn.Module):
"""NCSNv2 model architecture for 128px images."""
def __init__(self, config):
super().__init__()
self.centered = config.data.centered
self.norm = get_normalization(config)
self.nf = nf = config.model.nf
self.act = act = get_act(config)
self.register_buffer('sigmas', torch.tensor(get_sigmas(config)))
self.config = config
self.begin_conv = nn.Conv2d(config.data.channels, nf, 3, stride=1, padding=1)
self.normalizer = self.norm(nf, config.model.num_scales)
self.end_conv = nn.Conv2d(nf, config.data.channels, 3, stride=1, padding=1)
self.res1 = nn.ModuleList([
ResidualBlock(self.nf, self.nf, resample=None, act=act,
normalization=self.norm),
ResidualBlock(self.nf, self.nf, resample=None, act=act,
normalization=self.norm)]
)
self.res2 = nn.ModuleList([
ResidualBlock(self.nf, 2 * self.nf, resample='down', act=act,
normalization=self.norm),
ResidualBlock(2 * self.nf, 2 * self.nf, resample=None, act=act,
normalization=self.norm)]
)
self.res3 = nn.ModuleList([
ResidualBlock(2 * self.nf, 2 * self.nf, resample='down', act=act,
normalization=self.norm),
ResidualBlock(2 * self.nf, 2 * self.nf, resample=None, act=act,
normalization=self.norm)]
)
self.res4 = nn.ModuleList([
ResidualBlock(2 * self.nf, 4 * self.nf, resample='down', act=act,
normalization=self.norm, dilation=2),
ResidualBlock(4 * self.nf, 4 * self.nf, resample=None, act=act,
normalization=self.norm, dilation=2)]
)
self.res5 = nn.ModuleList([
ResidualBlock(4 * self.nf, 4 * self.nf, resample='down', act=act,
normalization=self.norm, dilation=4),
ResidualBlock(4 * self.nf, 4 * self.nf, resample=None, act=act,
normalization=self.norm, dilation=4)]
)
self.refine1 = RefineBlock([4 * self.nf], 4 * self.nf, act=act, start=True)
self.refine2 = RefineBlock([4 * self.nf, 4 * self.nf], 2 * self.nf, act=act)
self.refine3 = RefineBlock([2 * self.nf, 2 * self.nf], 2 * self.nf, act=act)
self.refine4 = RefineBlock([2 * self.nf, 2 * self.nf], self.nf, act=act)
self.refine5 = RefineBlock([self.nf, self.nf], self.nf, act=act, end=True)
def _compute_cond_module(self, module, x):
for m in module:
x = m(x)
return x
def forward(self, x, y):
if not self.centered:
h = 2 * x - 1.
else:
h = x
output = self.begin_conv(h)
layer1 = self._compute_cond_module(self.res1, output)
layer2 = self._compute_cond_module(self.res2, layer1)
layer3 = self._compute_cond_module(self.res3, layer2)
layer4 = self._compute_cond_module(self.res4, layer3)
layer5 = self._compute_cond_module(self.res5, layer4)
ref1 = self.refine1([layer5], layer5.shape[2:])
ref2 = self.refine2([layer4, ref1], layer4.shape[2:])
ref3 = self.refine3([layer3, ref2], layer3.shape[2:])
ref4 = self.refine4([layer2, ref3], layer2.shape[2:])
output = self.refine5([layer1, ref4], layer1.shape[2:])
output = self.normalizer(output)
output = self.act(output)
output = self.end_conv(output)
used_sigmas = self.sigmas[y].view(x.shape[0], *([1] * len(x.shape[1:])))
output = output / used_sigmas
return output
@register_model(name='ncsnv2_256')
class NCSNv2_256(nn.Module):
"""NCSNv2 model architecture for 256px images."""
def __init__(self, config):
super().__init__()
self.centered = config.data.centered
self.norm = get_normalization(config)
self.nf = nf = config.model.nf
self.act = act = get_act(config)
self.register_buffer('sigmas', torch.tensor(get_sigmas(config)))
self.config = config
self.begin_conv = nn.Conv2d(config.data.channels, nf, 3, stride=1, padding=1)
self.normalizer = self.norm(nf, config.model.num_scales)
self.end_conv = nn.Conv2d(nf, config.data.channels, 3, stride=1, padding=1)
self.res1 = nn.ModuleList([
ResidualBlock(self.nf, self.nf, resample=None, act=act,
normalization=self.norm),
ResidualBlock(self.nf, self.nf, resample=None, act=act,
normalization=self.norm)]
)
self.res2 = nn.ModuleList([
ResidualBlock(self.nf, 2 * self.nf, resample='down', act=act,
normalization=self.norm),
ResidualBlock(2 * self.nf, 2 * self.nf, resample=None, act=act,
normalization=self.norm)]
)
self.res3 = nn.ModuleList([
ResidualBlock(2 * self.nf, 2 * self.nf, resample='down', act=act,
normalization=self.norm),
ResidualBlock(2 * self.nf, 2 * self.nf, resample=None, act=act,
normalization=self.norm)]
)
self.res31 = nn.ModuleList([
ResidualBlock(2 * self.nf, 2 * self.nf, resample='down', act=act,
normalization=self.norm),
ResidualBlock(2 * self.nf, 2 * self.nf, resample=None, act=act,
normalization=self.norm)]
)
self.res4 = nn.ModuleList([
ResidualBlock(2 * self.nf, 4 * self.nf, resample='down', act=act,
normalization=self.norm, dilation=2),
ResidualBlock(4 * self.nf, 4 * self.nf, resample=None, act=act,
normalization=self.norm, dilation=2)]
)
self.res5 = nn.ModuleList([
ResidualBlock(4 * self.nf, 4 * self.nf, resample='down', act=act,
normalization=self.norm, dilation=4),
ResidualBlock(4 * self.nf, 4 * self.nf, resample=None, act=act,
normalization=self.norm, dilation=4)]
)
self.refine1 = RefineBlock([4 * self.nf], 4 * self.nf, act=act, start=True)
self.refine2 = RefineBlock([4 * self.nf, 4 * self.nf], 2 * self.nf, act=act)
self.refine3 = RefineBlock([2 * self.nf, 2 * self.nf], 2 * self.nf, act=act)
self.refine31 = RefineBlock([2 * self.nf, 2 * self.nf], 2 * self.nf, act=act)
self.refine4 = RefineBlock([2 * self.nf, 2 * self.nf], self.nf, act=act)
self.refine5 = RefineBlock([self.nf, self.nf], self.nf, act=act, end=True)
def _compute_cond_module(self, module, x):
for m in module:
x = m(x)
return x
def forward(self, x, y):
if not self.centered:
h = 2 * x - 1.
else:
h = x
output = self.begin_conv(h)
layer1 = self._compute_cond_module(self.res1, output)
layer2 = self._compute_cond_module(self.res2, layer1)
layer3 = self._compute_cond_module(self.res3, layer2)
layer31 = self._compute_cond_module(self.res31, layer3)
layer4 = self._compute_cond_module(self.res4, layer31)
layer5 = self._compute_cond_module(self.res5, layer4)
ref1 = self.refine1([layer5], layer5.shape[2:])
ref2 = self.refine2([layer4, ref1], layer4.shape[2:])
ref31 = self.refine31([layer31, ref2], layer31.shape[2:])
ref3 = self.refine3([layer3, ref31], layer3.shape[2:])
ref4 = self.refine4([layer2, ref3], layer2.shape[2:])
output = self.refine5([layer1, ref4], layer1.shape[2:])
output = self.normalizer(output)
output = self.act(output)
output = self.end_conv(output)
used_sigmas = self.sigmas[y].view(x.shape[0], *([1] * len(x.shape[1:])))
output = output / used_sigmas
return output | 16,043 | 37.567308 | 120 | py |
DiffPure | DiffPure-master/score_sde/models/normalization.py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Normalization layers."""
import torch.nn as nn
import torch
import functools
def get_normalization(config, conditional=False):
"""Obtain normalization modules from the config file."""
norm = config.model.normalization
if conditional:
if norm == 'InstanceNorm++':
return functools.partial(ConditionalInstanceNorm2dPlus, num_classes=config.model.num_classes)
else:
raise NotImplementedError(f'{norm} not implemented yet.')
else:
if norm == 'InstanceNorm':
return nn.InstanceNorm2d
elif norm == 'InstanceNorm++':
return InstanceNorm2dPlus
elif norm == 'VarianceNorm':
return VarianceNorm2d
elif norm == 'GroupNorm':
return nn.GroupNorm
else:
raise ValueError('Unknown normalization: %s' % norm)
class ConditionalBatchNorm2d(nn.Module):
def __init__(self, num_features, num_classes, bias=True):
super().__init__()
self.num_features = num_features
self.bias = bias
self.bn = nn.BatchNorm2d(num_features, affine=False)
if self.bias:
self.embed = nn.Embedding(num_classes, num_features * 2)
self.embed.weight.data[:, :num_features].uniform_() # Initialise scale at N(1, 0.02)
self.embed.weight.data[:, num_features:].zero_() # Initialise bias at 0
else:
self.embed = nn.Embedding(num_classes, num_features)
self.embed.weight.data.uniform_()
def forward(self, x, y):
out = self.bn(x)
if self.bias:
gamma, beta = self.embed(y).chunk(2, dim=1)
out = gamma.view(-1, self.num_features, 1, 1) * out + beta.view(-1, self.num_features, 1, 1)
else:
gamma = self.embed(y)
out = gamma.view(-1, self.num_features, 1, 1) * out
return out
class ConditionalInstanceNorm2d(nn.Module):
def __init__(self, num_features, num_classes, bias=True):
super().__init__()
self.num_features = num_features
self.bias = bias
self.instance_norm = nn.InstanceNorm2d(num_features, affine=False, track_running_stats=False)
if bias:
self.embed = nn.Embedding(num_classes, num_features * 2)
self.embed.weight.data[:, :num_features].uniform_() # Initialise scale at N(1, 0.02)
self.embed.weight.data[:, num_features:].zero_() # Initialise bias at 0
else:
self.embed = nn.Embedding(num_classes, num_features)
self.embed.weight.data.uniform_()
def forward(self, x, y):
h = self.instance_norm(x)
if self.bias:
gamma, beta = self.embed(y).chunk(2, dim=-1)
out = gamma.view(-1, self.num_features, 1, 1) * h + beta.view(-1, self.num_features, 1, 1)
else:
gamma = self.embed(y)
out = gamma.view(-1, self.num_features, 1, 1) * h
return out
class ConditionalVarianceNorm2d(nn.Module):
def __init__(self, num_features, num_classes, bias=False):
super().__init__()
self.num_features = num_features
self.bias = bias
self.embed = nn.Embedding(num_classes, num_features)
self.embed.weight.data.normal_(1, 0.02)
def forward(self, x, y):
vars = torch.var(x, dim=(2, 3), keepdim=True)
h = x / torch.sqrt(vars + 1e-5)
gamma = self.embed(y)
out = gamma.view(-1, self.num_features, 1, 1) * h
return out
class VarianceNorm2d(nn.Module):
def __init__(self, num_features, bias=False):
super().__init__()
self.num_features = num_features
self.bias = bias
self.alpha = nn.Parameter(torch.zeros(num_features))
self.alpha.data.normal_(1, 0.02)
def forward(self, x):
vars = torch.var(x, dim=(2, 3), keepdim=True)
h = x / torch.sqrt(vars + 1e-5)
out = self.alpha.view(-1, self.num_features, 1, 1) * h
return out
class ConditionalNoneNorm2d(nn.Module):
def __init__(self, num_features, num_classes, bias=True):
super().__init__()
self.num_features = num_features
self.bias = bias
if bias:
self.embed = nn.Embedding(num_classes, num_features * 2)
self.embed.weight.data[:, :num_features].uniform_() # Initialise scale at N(1, 0.02)
self.embed.weight.data[:, num_features:].zero_() # Initialise bias at 0
else:
self.embed = nn.Embedding(num_classes, num_features)
self.embed.weight.data.uniform_()
def forward(self, x, y):
if self.bias:
gamma, beta = self.embed(y).chunk(2, dim=-1)
out = gamma.view(-1, self.num_features, 1, 1) * x + beta.view(-1, self.num_features, 1, 1)
else:
gamma = self.embed(y)
out = gamma.view(-1, self.num_features, 1, 1) * x
return out
class NoneNorm2d(nn.Module):
def __init__(self, num_features, bias=True):
super().__init__()
def forward(self, x):
return x
class InstanceNorm2dPlus(nn.Module):
def __init__(self, num_features, bias=True):
super().__init__()
self.num_features = num_features
self.bias = bias
self.instance_norm = nn.InstanceNorm2d(num_features, affine=False, track_running_stats=False)
self.alpha = nn.Parameter(torch.zeros(num_features))
self.gamma = nn.Parameter(torch.zeros(num_features))
self.alpha.data.normal_(1, 0.02)
self.gamma.data.normal_(1, 0.02)
if bias:
self.beta = nn.Parameter(torch.zeros(num_features))
def forward(self, x):
means = torch.mean(x, dim=(2, 3))
m = torch.mean(means, dim=-1, keepdim=True)
v = torch.var(means, dim=-1, keepdim=True)
means = (means - m) / (torch.sqrt(v + 1e-5))
h = self.instance_norm(x)
if self.bias:
h = h + means[..., None, None] * self.alpha[..., None, None]
out = self.gamma.view(-1, self.num_features, 1, 1) * h + self.beta.view(-1, self.num_features, 1, 1)
else:
h = h + means[..., None, None] * self.alpha[..., None, None]
out = self.gamma.view(-1, self.num_features, 1, 1) * h
return out
class ConditionalInstanceNorm2dPlus(nn.Module):
def __init__(self, num_features, num_classes, bias=True):
super().__init__()
self.num_features = num_features
self.bias = bias
self.instance_norm = nn.InstanceNorm2d(num_features, affine=False, track_running_stats=False)
if bias:
self.embed = nn.Embedding(num_classes, num_features * 3)
self.embed.weight.data[:, :2 * num_features].normal_(1, 0.02) # Initialise scale at N(1, 0.02)
self.embed.weight.data[:, 2 * num_features:].zero_() # Initialise bias at 0
else:
self.embed = nn.Embedding(num_classes, 2 * num_features)
self.embed.weight.data.normal_(1, 0.02)
def forward(self, x, y):
means = torch.mean(x, dim=(2, 3))
m = torch.mean(means, dim=-1, keepdim=True)
v = torch.var(means, dim=-1, keepdim=True)
means = (means - m) / (torch.sqrt(v + 1e-5))
h = self.instance_norm(x)
if self.bias:
gamma, alpha, beta = self.embed(y).chunk(3, dim=-1)
h = h + means[..., None, None] * alpha[..., None, None]
out = gamma.view(-1, self.num_features, 1, 1) * h + beta.view(-1, self.num_features, 1, 1)
else:
gamma, alpha = self.embed(y).chunk(2, dim=-1)
h = h + means[..., None, None] * alpha[..., None, None]
out = gamma.view(-1, self.num_features, 1, 1) * h
return out
| 7,657 | 34.453704 | 106 | py |
DiffPure | DiffPure-master/score_sde/models/ema.py | # ---------------------------------------------------------------
# Taken from the following link as is from:
# https://github.com/yang-song/score_sde_pytorch/blob/main/models/ema.py
#
# The license for the original version of this file can be
# found in the `score_sde` directory (LICENSE_SCORE_SDE).
# ---------------------------------------------------------------
# Modified from https://raw.githubusercontent.com/fadel/pytorch_ema/master/torch_ema/ema.py
from __future__ import division
from __future__ import unicode_literals
import torch
# Partially based on: https://github.com/tensorflow/tensorflow/blob/r1.13/tensorflow/python/training/moving_averages.py
class ExponentialMovingAverage:
"""
Maintains (exponential) moving average of a set of parameters.
"""
def __init__(self, parameters, decay, use_num_updates=True):
"""
Args:
parameters: Iterable of `torch.nn.Parameter`; usually the result of
`model.parameters()`.
decay: The exponential decay.
use_num_updates: Whether to use number of updates when computing
averages.
"""
if decay < 0.0 or decay > 1.0:
raise ValueError('Decay must be between 0 and 1')
self.decay = decay
self.num_updates = 0 if use_num_updates else None
self.shadow_params = [p.clone().detach()
for p in parameters if p.requires_grad]
self.collected_params = []
def update(self, parameters):
"""
Update currently maintained parameters.
Call this every time the parameters are updated, such as the result of
the `optimizer.step()` call.
Args:
parameters: Iterable of `torch.nn.Parameter`; usually the same set of
parameters used to initialize this object.
"""
decay = self.decay
if self.num_updates is not None:
self.num_updates += 1
decay = min(decay, (1 + self.num_updates) / (10 + self.num_updates))
one_minus_decay = 1.0 - decay
with torch.no_grad():
parameters = [p for p in parameters if p.requires_grad]
for s_param, param in zip(self.shadow_params, parameters):
s_param.sub_(one_minus_decay * (s_param - param))
def copy_to(self, parameters):
"""
Copy current parameters into given collection of parameters.
Args:
parameters: Iterable of `torch.nn.Parameter`; the parameters to be
updated with the stored moving averages.
"""
parameters = [p for p in parameters if p.requires_grad]
for s_param, param in zip(self.shadow_params, parameters):
if param.requires_grad:
param.data.copy_(s_param.data)
def store(self, parameters):
"""
Save the current parameters for restoring later.
Args:
parameters: Iterable of `torch.nn.Parameter`; the parameters to be
temporarily stored.
"""
self.collected_params = [param.clone() for param in parameters]
def restore(self, parameters):
"""
Restore the parameters stored with the `store` method.
Useful to validate the model with EMA parameters without affecting the
original optimization process. Store the parameters before the
`copy_to` method. After validation (or model saving), use this to
restore the former parameters.
Args:
parameters: Iterable of `torch.nn.Parameter`; the parameters to be
updated with the stored parameters.
"""
for c_param, param in zip(self.collected_params, parameters):
param.data.copy_(c_param.data)
def state_dict(self):
return dict(decay=self.decay, num_updates=self.num_updates,
shadow_params=self.shadow_params)
def load_state_dict(self, state_dict):
self.decay = state_dict['decay']
self.num_updates = state_dict['num_updates']
self.shadow_params = state_dict['shadow_params'] | 3,783 | 34.698113 | 119 | py |
DiffPure | DiffPure-master/score_sde/models/ncsnpp.py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: skip-file
from . import utils, layers, layerspp, normalization
import torch.nn as nn
import functools
import torch
import numpy as np
ResnetBlockDDPM = layerspp.ResnetBlockDDPMpp
ResnetBlockBigGAN = layerspp.ResnetBlockBigGANpp
Combine = layerspp.Combine
conv3x3 = layerspp.conv3x3
conv1x1 = layerspp.conv1x1
get_act = layers.get_act
get_normalization = normalization.get_normalization
default_initializer = layers.default_init
@utils.register_model(name='ncsnpp')
class NCSNpp(nn.Module):
"""NCSN++ model"""
def __init__(self, config):
super().__init__()
self.config = config
self.act = act = get_act(config)
self.register_buffer('sigmas', torch.tensor(utils.get_sigmas(config)))
self.nf = nf = config.model.nf
ch_mult = config.model.ch_mult
self.num_res_blocks = num_res_blocks = config.model.num_res_blocks
self.attn_resolutions = attn_resolutions = config.model.attn_resolutions
dropout = config.model.dropout
resamp_with_conv = config.model.resamp_with_conv
self.num_resolutions = num_resolutions = len(ch_mult)
self.all_resolutions = all_resolutions = [config.data.image_size // (2 ** i) for i in range(num_resolutions)]
self.conditional = conditional = config.model.conditional # noise-conditional
fir = config.model.fir
fir_kernel = config.model.fir_kernel
self.skip_rescale = skip_rescale = config.model.skip_rescale
self.resblock_type = resblock_type = config.model.resblock_type.lower()
self.progressive = progressive = config.model.progressive.lower()
self.progressive_input = progressive_input = config.model.progressive_input.lower()
self.embedding_type = embedding_type = config.model.embedding_type.lower()
init_scale = config.model.init_scale
assert progressive in ['none', 'output_skip', 'residual']
assert progressive_input in ['none', 'input_skip', 'residual']
assert embedding_type in ['fourier', 'positional']
combine_method = config.model.progressive_combine.lower()
combiner = functools.partial(Combine, method=combine_method)
modules = []
# timestep/noise_level embedding; only for continuous training
if embedding_type == 'fourier':
# Gaussian Fourier features embeddings.
assert config.training.continuous, "Fourier features are only used for continuous training."
modules.append(layerspp.GaussianFourierProjection(
embedding_size=nf, scale=config.model.fourier_scale
))
embed_dim = 2 * nf
elif embedding_type == 'positional':
embed_dim = nf
else:
raise ValueError(f'embedding type {embedding_type} unknown.')
if conditional:
modules.append(nn.Linear(embed_dim, nf * 4))
modules[-1].weight.data = default_initializer()(modules[-1].weight.shape)
nn.init.zeros_(modules[-1].bias)
modules.append(nn.Linear(nf * 4, nf * 4))
modules[-1].weight.data = default_initializer()(modules[-1].weight.shape)
nn.init.zeros_(modules[-1].bias)
AttnBlock = functools.partial(layerspp.AttnBlockpp,
init_scale=init_scale,
skip_rescale=skip_rescale)
Upsample = functools.partial(layerspp.Upsample,
with_conv=resamp_with_conv, fir=fir, fir_kernel=fir_kernel)
if progressive == 'output_skip':
self.pyramid_upsample = layerspp.Upsample(fir=fir, fir_kernel=fir_kernel, with_conv=False)
elif progressive == 'residual':
pyramid_upsample = functools.partial(layerspp.Upsample,
fir=fir, fir_kernel=fir_kernel, with_conv=True)
Downsample = functools.partial(layerspp.Downsample,
with_conv=resamp_with_conv, fir=fir, fir_kernel=fir_kernel)
if progressive_input == 'input_skip':
self.pyramid_downsample = layerspp.Downsample(fir=fir, fir_kernel=fir_kernel, with_conv=False)
elif progressive_input == 'residual':
pyramid_downsample = functools.partial(layerspp.Downsample,
fir=fir, fir_kernel=fir_kernel, with_conv=True)
if resblock_type == 'ddpm':
ResnetBlock = functools.partial(ResnetBlockDDPM,
act=act,
dropout=dropout,
init_scale=init_scale,
skip_rescale=skip_rescale,
temb_dim=nf * 4)
elif resblock_type == 'biggan':
ResnetBlock = functools.partial(ResnetBlockBigGAN,
act=act,
dropout=dropout,
fir=fir,
fir_kernel=fir_kernel,
init_scale=init_scale,
skip_rescale=skip_rescale,
temb_dim=nf * 4)
else:
raise ValueError(f'resblock type {resblock_type} unrecognized.')
# Downsampling block
channels = config.data.num_channels
if progressive_input != 'none':
input_pyramid_ch = channels
modules.append(conv3x3(channels, nf))
hs_c = [nf]
in_ch = nf
for i_level in range(num_resolutions):
# Residual blocks for this resolution
for i_block in range(num_res_blocks):
out_ch = nf * ch_mult[i_level]
modules.append(ResnetBlock(in_ch=in_ch, out_ch=out_ch))
in_ch = out_ch
if all_resolutions[i_level] in attn_resolutions:
modules.append(AttnBlock(channels=in_ch))
hs_c.append(in_ch)
if i_level != num_resolutions - 1:
if resblock_type == 'ddpm':
modules.append(Downsample(in_ch=in_ch))
else:
modules.append(ResnetBlock(down=True, in_ch=in_ch))
if progressive_input == 'input_skip':
modules.append(combiner(dim1=input_pyramid_ch, dim2=in_ch))
if combine_method == 'cat':
in_ch *= 2
elif progressive_input == 'residual':
modules.append(pyramid_downsample(in_ch=input_pyramid_ch, out_ch=in_ch))
input_pyramid_ch = in_ch
hs_c.append(in_ch)
in_ch = hs_c[-1]
modules.append(ResnetBlock(in_ch=in_ch))
modules.append(AttnBlock(channels=in_ch))
modules.append(ResnetBlock(in_ch=in_ch))
pyramid_ch = 0
# Upsampling block
for i_level in reversed(range(num_resolutions)):
for i_block in range(num_res_blocks + 1):
out_ch = nf * ch_mult[i_level]
modules.append(ResnetBlock(in_ch=in_ch + hs_c.pop(),
out_ch=out_ch))
in_ch = out_ch
if all_resolutions[i_level] in attn_resolutions:
modules.append(AttnBlock(channels=in_ch))
if progressive != 'none':
if i_level == num_resolutions - 1:
if progressive == 'output_skip':
modules.append(nn.GroupNorm(num_groups=min(in_ch // 4, 32),
num_channels=in_ch, eps=1e-6))
modules.append(conv3x3(in_ch, channels, init_scale=init_scale))
pyramid_ch = channels
elif progressive == 'residual':
modules.append(nn.GroupNorm(num_groups=min(in_ch // 4, 32),
num_channels=in_ch, eps=1e-6))
modules.append(conv3x3(in_ch, in_ch, bias=True))
pyramid_ch = in_ch
else:
raise ValueError(f'{progressive} is not a valid name.')
else:
if progressive == 'output_skip':
modules.append(nn.GroupNorm(num_groups=min(in_ch // 4, 32),
num_channels=in_ch, eps=1e-6))
modules.append(conv3x3(in_ch, channels, bias=True, init_scale=init_scale))
pyramid_ch = channels
elif progressive == 'residual':
modules.append(pyramid_upsample(in_ch=pyramid_ch, out_ch=in_ch))
pyramid_ch = in_ch
else:
raise ValueError(f'{progressive} is not a valid name')
if i_level != 0:
if resblock_type == 'ddpm':
modules.append(Upsample(in_ch=in_ch))
else:
modules.append(ResnetBlock(in_ch=in_ch, up=True))
assert not hs_c
if progressive != 'output_skip':
modules.append(nn.GroupNorm(num_groups=min(in_ch // 4, 32),
num_channels=in_ch, eps=1e-6))
modules.append(conv3x3(in_ch, channels, init_scale=init_scale))
self.all_modules = nn.ModuleList(modules)
def forward(self, x, time_cond):
# timestep/noise_level embedding; only for continuous training
modules = self.all_modules
m_idx = 0
if self.embedding_type == 'fourier':
# Gaussian Fourier features embeddings.
used_sigmas = time_cond
temb = modules[m_idx](torch.log(used_sigmas))
m_idx += 1
elif self.embedding_type == 'positional':
# Sinusoidal positional embeddings.
timesteps = time_cond
used_sigmas = self.sigmas[time_cond.long()]
temb = layers.get_timestep_embedding(timesteps, self.nf)
else:
raise ValueError(f'embedding type {self.embedding_type} unknown.')
if self.conditional:
temb = modules[m_idx](temb)
m_idx += 1
temb = modules[m_idx](self.act(temb))
m_idx += 1
else:
temb = None
if not self.config.data.centered:
# If input data is in [0, 1]
x = 2 * x - 1.
# Downsampling block
input_pyramid = None
if self.progressive_input != 'none':
input_pyramid = x
hs = [modules[m_idx](x)]
m_idx += 1
for i_level in range(self.num_resolutions):
# Residual blocks for this resolution
for i_block in range(self.num_res_blocks):
h = modules[m_idx](hs[-1], temb)
m_idx += 1
if h.shape[-1] in self.attn_resolutions:
h = modules[m_idx](h)
m_idx += 1
hs.append(h)
if i_level != self.num_resolutions - 1:
if self.resblock_type == 'ddpm':
h = modules[m_idx](hs[-1])
m_idx += 1
else:
h = modules[m_idx](hs[-1], temb)
m_idx += 1
if self.progressive_input == 'input_skip':
input_pyramid = self.pyramid_downsample(input_pyramid)
h = modules[m_idx](input_pyramid, h)
m_idx += 1
elif self.progressive_input == 'residual':
input_pyramid = modules[m_idx](input_pyramid)
m_idx += 1
if self.skip_rescale:
input_pyramid = (input_pyramid + h) / np.sqrt(2.)
else:
input_pyramid = input_pyramid + h
h = input_pyramid
hs.append(h)
h = hs[-1]
h = modules[m_idx](h, temb)
m_idx += 1
h = modules[m_idx](h)
m_idx += 1
h = modules[m_idx](h, temb)
m_idx += 1
pyramid = None
# Upsampling block
for i_level in reversed(range(self.num_resolutions)):
for i_block in range(self.num_res_blocks + 1):
h = modules[m_idx](torch.cat([h, hs.pop()], dim=1), temb)
m_idx += 1
if h.shape[-1] in self.attn_resolutions:
h = modules[m_idx](h)
m_idx += 1
if self.progressive != 'none':
if i_level == self.num_resolutions - 1:
if self.progressive == 'output_skip':
pyramid = self.act(modules[m_idx](h))
m_idx += 1
pyramid = modules[m_idx](pyramid)
m_idx += 1
elif self.progressive == 'residual':
pyramid = self.act(modules[m_idx](h))
m_idx += 1
pyramid = modules[m_idx](pyramid)
m_idx += 1
else:
raise ValueError(f'{self.progressive} is not a valid name.')
else:
if self.progressive == 'output_skip':
pyramid = self.pyramid_upsample(pyramid)
pyramid_h = self.act(modules[m_idx](h))
m_idx += 1
pyramid_h = modules[m_idx](pyramid_h)
m_idx += 1
pyramid = pyramid + pyramid_h
elif self.progressive == 'residual':
pyramid = modules[m_idx](pyramid)
m_idx += 1
if self.skip_rescale:
pyramid = (pyramid + h) / np.sqrt(2.)
else:
pyramid = pyramid + h
h = pyramid
else:
raise ValueError(f'{self.progressive} is not a valid name')
if i_level != 0:
if self.resblock_type == 'ddpm':
h = modules[m_idx](h)
m_idx += 1
else:
h = modules[m_idx](h, temb)
m_idx += 1
assert not hs
if self.progressive == 'output_skip':
h = pyramid
else:
h = self.act(modules[m_idx](h))
m_idx += 1
h = modules[m_idx](h)
m_idx += 1
assert m_idx == len(modules)
if self.config.model.scale_by_sigma:
used_sigmas = used_sigmas.reshape((x.shape[0], *([1] * len(x.shape[1:]))))
h = h / used_sigmas
return h
| 13,653 | 34.743455 | 113 | py |
DiffPure | DiffPure-master/score_sde/models/layerspp.py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: skip-file
"""Layers for defining NCSN++.
"""
from . import layers
from . import up_or_down_sampling
import torch.nn as nn
import torch
import torch.nn.functional as F
import numpy as np
conv1x1 = layers.ddpm_conv1x1
conv3x3 = layers.ddpm_conv3x3
NIN = layers.NIN
default_init = layers.default_init
class GaussianFourierProjection(nn.Module):
"""Gaussian Fourier embeddings for noise levels."""
def __init__(self, embedding_size=256, scale=1.0):
super().__init__()
self.W = nn.Parameter(torch.randn(embedding_size) * scale, requires_grad=False)
def forward(self, x):
x_proj = x[:, None] * self.W[None, :] * 2 * np.pi
return torch.cat([torch.sin(x_proj), torch.cos(x_proj)], dim=-1)
class Combine(nn.Module):
"""Combine information from skip connections."""
def __init__(self, dim1, dim2, method='cat'):
super().__init__()
self.Conv_0 = conv1x1(dim1, dim2)
self.method = method
def forward(self, x, y):
h = self.Conv_0(x)
if self.method == 'cat':
return torch.cat([h, y], dim=1)
elif self.method == 'sum':
return h + y
else:
raise ValueError(f'Method {self.method} not recognized.')
class AttnBlockpp(nn.Module):
"""Channel-wise self-attention block. Modified from DDPM."""
def __init__(self, channels, skip_rescale=False, init_scale=0.):
super().__init__()
self.GroupNorm_0 = nn.GroupNorm(num_groups=min(channels // 4, 32), num_channels=channels,
eps=1e-6)
self.NIN_0 = NIN(channels, channels)
self.NIN_1 = NIN(channels, channels)
self.NIN_2 = NIN(channels, channels)
self.NIN_3 = NIN(channels, channels, init_scale=init_scale)
self.skip_rescale = skip_rescale
def forward(self, x):
B, C, H, W = x.shape
h = self.GroupNorm_0(x)
q = self.NIN_0(h)
k = self.NIN_1(h)
v = self.NIN_2(h)
w = torch.einsum('bchw,bcij->bhwij', q, k) * (int(C) ** (-0.5))
w = torch.reshape(w, (B, H, W, H * W))
w = F.softmax(w, dim=-1)
w = torch.reshape(w, (B, H, W, H, W))
h = torch.einsum('bhwij,bcij->bchw', w, v)
h = self.NIN_3(h)
if not self.skip_rescale:
return x + h
else:
return (x + h) / np.sqrt(2.)
class Upsample(nn.Module):
def __init__(self, in_ch=None, out_ch=None, with_conv=False, fir=False,
fir_kernel=(1, 3, 3, 1)):
super().__init__()
out_ch = out_ch if out_ch else in_ch
if not fir:
if with_conv:
self.Conv_0 = conv3x3(in_ch, out_ch)
else:
if with_conv:
self.Conv2d_0 = up_or_down_sampling.Conv2d(in_ch, out_ch,
kernel=3, up=True,
resample_kernel=fir_kernel,
use_bias=True,
kernel_init=default_init())
self.fir = fir
self.with_conv = with_conv
self.fir_kernel = fir_kernel
self.out_ch = out_ch
def forward(self, x):
B, C, H, W = x.shape
if not self.fir:
h = F.interpolate(x, (H * 2, W * 2), 'nearest')
if self.with_conv:
h = self.Conv_0(h)
else:
if not self.with_conv:
h = up_or_down_sampling.upsample_2d(x, self.fir_kernel, factor=2)
else:
h = self.Conv2d_0(x)
return h
class Downsample(nn.Module):
def __init__(self, in_ch=None, out_ch=None, with_conv=False, fir=False,
fir_kernel=(1, 3, 3, 1)):
super().__init__()
out_ch = out_ch if out_ch else in_ch
if not fir:
if with_conv:
self.Conv_0 = conv3x3(in_ch, out_ch, stride=2, padding=0)
else:
if with_conv:
self.Conv2d_0 = up_or_down_sampling.Conv2d(in_ch, out_ch,
kernel=3, down=True,
resample_kernel=fir_kernel,
use_bias=True,
kernel_init=default_init())
self.fir = fir
self.fir_kernel = fir_kernel
self.with_conv = with_conv
self.out_ch = out_ch
def forward(self, x):
B, C, H, W = x.shape
if not self.fir:
if self.with_conv:
x = F.pad(x, (0, 1, 0, 1))
x = self.Conv_0(x)
else:
x = F.avg_pool2d(x, 2, stride=2)
else:
if not self.with_conv:
x = up_or_down_sampling.downsample_2d(x, self.fir_kernel, factor=2)
else:
x = self.Conv2d_0(x)
return x
class ResnetBlockDDPMpp(nn.Module):
"""ResBlock adapted from DDPM."""
def __init__(self, act, in_ch, out_ch=None, temb_dim=None, conv_shortcut=False,
dropout=0.1, skip_rescale=False, init_scale=0.):
super().__init__()
out_ch = out_ch if out_ch else in_ch
self.GroupNorm_0 = nn.GroupNorm(num_groups=min(in_ch // 4, 32), num_channels=in_ch, eps=1e-6)
self.Conv_0 = conv3x3(in_ch, out_ch)
if temb_dim is not None:
self.Dense_0 = nn.Linear(temb_dim, out_ch)
self.Dense_0.weight.data = default_init()(self.Dense_0.weight.data.shape)
nn.init.zeros_(self.Dense_0.bias)
self.GroupNorm_1 = nn.GroupNorm(num_groups=min(out_ch // 4, 32), num_channels=out_ch, eps=1e-6)
self.Dropout_0 = nn.Dropout(dropout)
self.Conv_1 = conv3x3(out_ch, out_ch, init_scale=init_scale)
if in_ch != out_ch:
if conv_shortcut:
self.Conv_2 = conv3x3(in_ch, out_ch)
else:
self.NIN_0 = NIN(in_ch, out_ch)
self.skip_rescale = skip_rescale
self.act = act
self.out_ch = out_ch
self.conv_shortcut = conv_shortcut
def forward(self, x, temb=None):
h = self.act(self.GroupNorm_0(x))
h = self.Conv_0(h)
if temb is not None:
h += self.Dense_0(self.act(temb))[:, :, None, None]
h = self.act(self.GroupNorm_1(h))
h = self.Dropout_0(h)
h = self.Conv_1(h)
if x.shape[1] != self.out_ch:
if self.conv_shortcut:
x = self.Conv_2(x)
else:
x = self.NIN_0(x)
if not self.skip_rescale:
return x + h
else:
return (x + h) / np.sqrt(2.)
class ResnetBlockBigGANpp(nn.Module):
def __init__(self, act, in_ch, out_ch=None, temb_dim=None, up=False, down=False,
dropout=0.1, fir=False, fir_kernel=(1, 3, 3, 1),
skip_rescale=True, init_scale=0.):
super().__init__()
out_ch = out_ch if out_ch else in_ch
self.GroupNorm_0 = nn.GroupNorm(num_groups=min(in_ch // 4, 32), num_channels=in_ch, eps=1e-6)
self.up = up
self.down = down
self.fir = fir
self.fir_kernel = fir_kernel
self.Conv_0 = conv3x3(in_ch, out_ch)
if temb_dim is not None:
self.Dense_0 = nn.Linear(temb_dim, out_ch)
self.Dense_0.weight.data = default_init()(self.Dense_0.weight.shape)
nn.init.zeros_(self.Dense_0.bias)
self.GroupNorm_1 = nn.GroupNorm(num_groups=min(out_ch // 4, 32), num_channels=out_ch, eps=1e-6)
self.Dropout_0 = nn.Dropout(dropout)
self.Conv_1 = conv3x3(out_ch, out_ch, init_scale=init_scale)
if in_ch != out_ch or up or down:
self.Conv_2 = conv1x1(in_ch, out_ch)
self.skip_rescale = skip_rescale
self.act = act
self.in_ch = in_ch
self.out_ch = out_ch
def forward(self, x, temb=None):
h = self.act(self.GroupNorm_0(x))
if self.up:
if self.fir:
h = up_or_down_sampling.upsample_2d(h, self.fir_kernel, factor=2)
x = up_or_down_sampling.upsample_2d(x, self.fir_kernel, factor=2)
else:
h = up_or_down_sampling.naive_upsample_2d(h, factor=2)
x = up_or_down_sampling.naive_upsample_2d(x, factor=2)
elif self.down:
if self.fir:
h = up_or_down_sampling.downsample_2d(h, self.fir_kernel, factor=2)
x = up_or_down_sampling.downsample_2d(x, self.fir_kernel, factor=2)
else:
h = up_or_down_sampling.naive_downsample_2d(h, factor=2)
x = up_or_down_sampling.naive_downsample_2d(x, factor=2)
h = self.Conv_0(h)
# Add bias to each feature map conditioned on the time embedding
if temb is not None:
h += self.Dense_0(self.act(temb))[:, :, None, None]
h = self.act(self.GroupNorm_1(h))
h = self.Dropout_0(h)
h = self.Conv_1(h)
if self.in_ch != self.out_ch or self.up or self.down:
x = self.Conv_2(x)
if not self.skip_rescale:
return x + h
else:
return (x + h) / np.sqrt(2.)
| 9,001 | 31.734545 | 99 | py |
DiffPure | DiffPure-master/score_sde/op/upfirdn2d.py | # ---------------------------------------------------------------
# Taken from the following link as is from:
# https://github.com/yang-song/score_sde_pytorch/blob/main/op/upfirdn2d.py
#
# The license for the original version of this file can be
# found in the `score_sde` directory (LICENSE_SCORE_SDE).
# ---------------------------------------------------------------
import os
import torch
from torch.nn import functional as F
from torch.autograd import Function
from torch.utils.cpp_extension import load
module_path = os.path.dirname(__file__)
upfirdn2d_op = load(
"upfirdn2d",
sources=[
os.path.join(module_path, "upfirdn2d.cpp"),
os.path.join(module_path, "upfirdn2d_kernel.cu"),
],
)
class UpFirDn2dBackward(Function):
@staticmethod
def forward(
ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size
):
up_x, up_y = up
down_x, down_y = down
g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad
grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1)
grad_input = upfirdn2d_op.upfirdn2d(
grad_output,
grad_kernel,
down_x,
down_y,
up_x,
up_y,
g_pad_x0,
g_pad_x1,
g_pad_y0,
g_pad_y1,
)
grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3])
ctx.save_for_backward(kernel)
pad_x0, pad_x1, pad_y0, pad_y1 = pad
ctx.up_x = up_x
ctx.up_y = up_y
ctx.down_x = down_x
ctx.down_y = down_y
ctx.pad_x0 = pad_x0
ctx.pad_x1 = pad_x1
ctx.pad_y0 = pad_y0
ctx.pad_y1 = pad_y1
ctx.in_size = in_size
ctx.out_size = out_size
return grad_input
@staticmethod
def backward(ctx, gradgrad_input):
kernel, = ctx.saved_tensors
gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx.in_size[3], 1)
gradgrad_out = upfirdn2d_op.upfirdn2d(
gradgrad_input,
kernel,
ctx.up_x,
ctx.up_y,
ctx.down_x,
ctx.down_y,
ctx.pad_x0,
ctx.pad_x1,
ctx.pad_y0,
ctx.pad_y1,
)
# gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.out_size[0], ctx.out_size[1], ctx.in_size[3])
gradgrad_out = gradgrad_out.view(
ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1]
)
return gradgrad_out, None, None, None, None, None, None, None, None
class UpFirDn2d(Function):
@staticmethod
def forward(ctx, input, kernel, up, down, pad):
up_x, up_y = up
down_x, down_y = down
pad_x0, pad_x1, pad_y0, pad_y1 = pad
kernel_h, kernel_w = kernel.shape
batch, channel, in_h, in_w = input.shape
ctx.in_size = input.shape
input = input.reshape(-1, in_h, in_w, 1)
ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1]))
out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1
out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1
ctx.out_size = (out_h, out_w)
ctx.up = (up_x, up_y)
ctx.down = (down_x, down_y)
ctx.pad = (pad_x0, pad_x1, pad_y0, pad_y1)
g_pad_x0 = kernel_w - pad_x0 - 1
g_pad_y0 = kernel_h - pad_y0 - 1
g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1
g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1
ctx.g_pad = (g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1)
out = upfirdn2d_op.upfirdn2d(
input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1
)
# out = out.view(major, out_h, out_w, minor)
out = out.view(-1, channel, out_h, out_w)
return out
@staticmethod
def backward(ctx, grad_output):
kernel, grad_kernel = ctx.saved_tensors
grad_input = UpFirDn2dBackward.apply(
grad_output,
kernel,
grad_kernel,
ctx.up,
ctx.down,
ctx.pad,
ctx.g_pad,
ctx.in_size,
ctx.out_size,
)
return grad_input, None, None, None, None
def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)):
if input.device.type == "cpu":
out = upfirdn2d_native(
input, kernel, up, up, down, down, pad[0], pad[1], pad[0], pad[1]
)
else:
out = UpFirDn2d.apply(
input, kernel, (up, up), (down, down), (pad[0], pad[1], pad[0], pad[1])
)
return out
def upfirdn2d_native(
input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1
):
_, channel, in_h, in_w = input.shape
input = input.reshape(-1, in_h, in_w, 1)
_, in_h, in_w, minor = input.shape
kernel_h, kernel_w = kernel.shape
out = input.view(-1, in_h, 1, in_w, 1, minor)
out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1])
out = out.view(-1, in_h * up_y, in_w * up_x, minor)
out = F.pad(
out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)]
)
out = out[
:,
max(-pad_y0, 0) : out.shape[1] - max(-pad_y1, 0),
max(-pad_x0, 0) : out.shape[2] - max(-pad_x1, 0),
:,
]
out = out.permute(0, 3, 1, 2)
out = out.reshape(
[-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1]
)
w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w)
out = F.conv2d(out, w)
out = out.reshape(
-1,
minor,
in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1,
in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1,
)
out = out.permute(0, 2, 3, 1)
out = out[:, ::down_y, ::down_x, :]
out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1
out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1
return out.view(-1, channel, out_h, out_w)
| 6,043 | 27.91866 | 108 | py |
DiffPure | DiffPure-master/score_sde/op/__init__.py | # ---------------------------------------------------------------
# Taken from the following link as is from:
# https://github.com/yang-song/score_sde_pytorch/blob/main/op/__init__.py
#
# The license for the original version of this file can be
# found in the `score_sde` directory (LICENSE_SCORE_SDE).
# ---------------------------------------------------------------
from .fused_act import FusedLeakyReLU, fused_leaky_relu
from .upfirdn2d import upfirdn2d
| 459 | 40.818182 | 73 | py |
DiffPure | DiffPure-master/score_sde/op/fused_act.py | # ---------------------------------------------------------------
# Taken from the following link as is from:
# https://github.com/yang-song/score_sde_pytorch/blob/main/op/fused_act.py
#
# The license for the original version of this file can be
# found in the `score_sde` directory (LICENSE_SCORE_SDE).
# ---------------------------------------------------------------
import os
import torch
from torch import nn
from torch.nn import functional as F
from torch.autograd import Function
from torch.utils.cpp_extension import load
module_path = os.path.dirname(__file__)
fused = load(
"fused",
sources=[
os.path.join(module_path, "fused_bias_act.cpp"),
os.path.join(module_path, "fused_bias_act_kernel.cu"),
],
)
class FusedLeakyReLUFunctionBackward(Function):
@staticmethod
def forward(ctx, grad_output, out, negative_slope, scale):
ctx.save_for_backward(out)
ctx.negative_slope = negative_slope
ctx.scale = scale
empty = grad_output.new_empty(0)
grad_input = fused.fused_bias_act(
grad_output, empty, out, 3, 1, negative_slope, scale
)
dim = [0]
if grad_input.ndim > 2:
dim += list(range(2, grad_input.ndim))
grad_bias = grad_input.sum(dim).detach()
return grad_input, grad_bias
@staticmethod
def backward(ctx, gradgrad_input, gradgrad_bias):
out, = ctx.saved_tensors
gradgrad_out = fused.fused_bias_act(
gradgrad_input, gradgrad_bias, out, 3, 1, ctx.negative_slope, ctx.scale
)
return gradgrad_out, None, None, None
class FusedLeakyReLUFunction(Function):
@staticmethod
def forward(ctx, input, bias, negative_slope, scale):
empty = input.new_empty(0)
out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope, scale)
ctx.save_for_backward(out)
ctx.negative_slope = negative_slope
ctx.scale = scale
return out
@staticmethod
def backward(ctx, grad_output):
out, = ctx.saved_tensors
grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply(
grad_output, out, ctx.negative_slope, ctx.scale
)
return grad_input, grad_bias, None, None
class FusedLeakyReLU(nn.Module):
def __init__(self, channel, negative_slope=0.2, scale=2 ** 0.5):
super().__init__()
self.bias = nn.Parameter(torch.zeros(channel))
self.negative_slope = negative_slope
self.scale = scale
def forward(self, input):
return fused_leaky_relu(input, self.bias, self.negative_slope, self.scale)
def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5):
if input.device.type == "cpu":
rest_dim = [1] * (input.ndim - bias.ndim - 1)
return (
F.leaky_relu(
input + bias.view(1, bias.shape[0], *rest_dim), negative_slope=0.2
)
* scale
)
else:
return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale)
| 3,061 | 27.886792 | 83 | py |
DiffPure | DiffPure-master/runners/diffpure_guided.py | # ---------------------------------------------------------------
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the NVIDIA Source Code License
# for DiffPure. To view a copy of this license, see the LICENSE file.
# ---------------------------------------------------------------
import os
import random
import torch
import torchvision.utils as tvu
from guided_diffusion.script_util import create_model_and_diffusion, model_and_diffusion_defaults
class GuidedDiffusion(torch.nn.Module):
def __init__(self, args, config, device=None, model_dir='pretrained/guided_diffusion'):
super().__init__()
self.args = args
self.config = config
if device is None:
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
self.device = device
# load model
model_config = model_and_diffusion_defaults()
model_config.update(vars(self.config.model))
print(f'model_config: {model_config}')
model, diffusion = create_model_and_diffusion(**model_config)
model.load_state_dict(torch.load(f'{model_dir}/256x256_diffusion_uncond.pt', map_location='cpu'))
model.requires_grad_(False).eval().to(self.device)
if model_config['use_fp16']:
model.convert_to_fp16()
self.model = model
self.diffusion = diffusion
self.betas = torch.from_numpy(diffusion.betas).float().to(self.device)
def image_editing_sample(self, img, bs_id=0, tag=None):
with torch.no_grad():
assert isinstance(img, torch.Tensor)
batch_size = img.shape[0]
if tag is None:
tag = 'rnd' + str(random.randint(0, 10000))
out_dir = os.path.join(self.args.log_dir, 'bs' + str(bs_id) + '_' + tag)
assert img.ndim == 4, img.ndim
img = img.to(self.device)
x0 = img
if bs_id < 2:
os.makedirs(out_dir, exist_ok=True)
tvu.save_image((x0 + 1) * 0.5, os.path.join(out_dir, f'original_input.png'))
xs = []
for it in range(self.args.sample_step):
e = torch.randn_like(x0)
total_noise_levels = self.args.t
a = (1 - self.betas).cumprod(dim=0)
x = x0 * a[total_noise_levels - 1].sqrt() + e * (1.0 - a[total_noise_levels - 1]).sqrt()
if bs_id < 2:
tvu.save_image((x + 1) * 0.5, os.path.join(out_dir, f'init_{it}.png'))
for i in reversed(range(total_noise_levels)):
t = torch.tensor([i] * batch_size, device=self.device)
x = self.diffusion.p_sample(self.model, x, t,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None)["sample"]
# added intermediate step vis
if (i - 99) % 100 == 0 and bs_id < 2:
tvu.save_image((x + 1) * 0.5, os.path.join(out_dir, f'noise_t_{i}_{it}.png'))
x0 = x
if bs_id < 2:
torch.save(x0, os.path.join(out_dir, f'samples_{it}.pth'))
tvu.save_image((x0 + 1) * 0.5, os.path.join(out_dir, f'samples_{it}.png'))
xs.append(x0)
return torch.cat(xs, dim=0)
| 3,551 | 38.466667 | 105 | py |
DiffPure | DiffPure-master/runners/diffpure_ode.py | # ---------------------------------------------------------------
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the NVIDIA Source Code License
# for DiffPure. To view a copy of this license, see the LICENSE file.
# ---------------------------------------------------------------
import os
import random
import numpy as np
import torch
import torchvision.utils as tvu
from torchdiffeq import odeint_adjoint
from guided_diffusion.script_util import create_model_and_diffusion, model_and_diffusion_defaults
from score_sde.losses import get_optimizer
from score_sde.models import utils as mutils
from score_sde.models.ema import ExponentialMovingAverage
from score_sde import sde_lib
def _extract_into_tensor(arr_or_func, timesteps, broadcast_shape):
"""
Extract values from a 1-D numpy array for a batch of indices.
:param arr: the 1-D numpy array or a func.
:param timesteps: a tensor of indices into the array to extract.
:param broadcast_shape: a larger shape of K dimensions with the batch
dimension equal to the length of timesteps.
:return: a tensor of shape [batch_size, 1, ...] where the shape has K dims.
"""
if callable(arr_or_func):
res = arr_or_func(timesteps).float()
else:
res = arr_or_func.to(device=timesteps.device)[timesteps].float()
while len(res.shape) < len(broadcast_shape):
res = res[..., None]
return res.expand(broadcast_shape)
def restore_checkpoint(ckpt_dir, state, device):
loaded_state = torch.load(ckpt_dir, map_location=device)
state['optimizer'].load_state_dict(loaded_state['optimizer'])
state['model'].load_state_dict(loaded_state['model'], strict=False)
state['ema'].load_state_dict(loaded_state['ema'])
state['step'] = loaded_state['step']
class VPODE(torch.nn.Module):
def __init__(self, model, score_type='guided_diffusion', beta_min=0.1, beta_max=20, N=1000,
img_shape=(3, 256, 256), model_kwargs=None):
"""Construct a Variance Preserving SDE.
Args:
model: diffusion model
score_type: [guided_diffusion, score_sde, ddpm]
beta_min: value of beta(0)
beta_max: value of beta(1)
"""
super().__init__()
self.model = model
self.score_type = score_type
self.model_kwargs = model_kwargs
self.img_shape = img_shape
self.beta_0 = beta_min
self.beta_1 = beta_max
self.N = N
self.discrete_betas = torch.linspace(beta_min / N, beta_max / N, N)
self.alphas = 1. - self.discrete_betas
self.alphas_cumprod = torch.cumprod(self.alphas, dim=0)
self.sqrt_alphas_cumprod = torch.sqrt(self.alphas_cumprod)
self.sqrt_1m_alphas_cumprod = torch.sqrt(1. - self.alphas_cumprod)
self.alphas_cumprod_cont = lambda t: torch.exp(-0.5 * (beta_max - beta_min) * t**2 - beta_min * t)
self.sqrt_1m_alphas_cumprod_neg_recip_cont = lambda t: -1. / torch.sqrt(1. - self.alphas_cumprod_cont(t))
def _scale_timesteps(self, t):
assert torch.all(t <= 1) and torch.all(t >= 0), f't has to be in [0, 1], but get {t} with shape {t.shape}'
return (t.float() * self.N).long()
def vpsde_fn(self, t, x):
beta_t = self.beta_0 + t * (self.beta_1 - self.beta_0)
drift = -0.5 * beta_t[:, None] * x
diffusion = torch.sqrt(beta_t)
return drift, diffusion
def ode_fn(self, t, x):
"""Create the drift and diffusion functions for the reverse SDE"""
drift, diffusion = self.vpsde_fn(t, x)
assert x.ndim == 2 and np.prod(self.img_shape) == x.shape[1], x.shape
x_img = x.view(-1, *self.img_shape)
if self.score_type == 'guided_diffusion':
# model output is epsilon
if self.model_kwargs is None:
self.model_kwargs = {}
disc_steps = self._scale_timesteps(t) # (batch_size, ), from float in [0,1] to int in [0, 1000]
model_output = self.model(x_img, disc_steps, **self.model_kwargs)
# with learned sigma, so model_output contains (mean, val)
model_output, _ = torch.split(model_output, self.img_shape[0], dim=1)
assert x_img.shape == model_output.shape, f'{x_img.shape}, {model_output.shape}'
model_output = model_output.view(x.shape[0], -1)
score = _extract_into_tensor(self.sqrt_1m_alphas_cumprod_neg_recip_cont, t, x.shape) * model_output
elif self.score_type == 'score_sde':
# model output is epsilon
sde = sde_lib.VPSDE(beta_min=self.beta_0, beta_max=self.beta_1, N=self.N)
score_fn = mutils.get_score_fn(sde, self.model, train=False, continuous=True)
score = score_fn(x_img, t)
assert x_img.shape == score.shape, f'{x_img.shape}, {score.shape}'
score = score.view(x.shape[0], -1)
else:
raise NotImplementedError(f'Unknown score type in RevVPSDE: {self.score_type}!')
ode_coef = drift - 0.5 * diffusion[:, None] ** 2 * score
return ode_coef
def forward(self, t, states):
x = states[0]
t = t.expand(x.shape[0]) # (batch_size, )
dx_dt = self.ode_fn(t, x)
assert dx_dt.shape == x.shape
return dx_dt,
class OdeGuidedDiffusion(torch.nn.Module):
def __init__(self, args, config, device=None):
super().__init__()
self.args = args
self.config = config
if device is None:
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
self.device = device
# load model
if config.data.dataset == 'ImageNet':
img_shape = (3, 256, 256)
model_dir = 'pretrained/guided_diffusion'
model_config = model_and_diffusion_defaults()
model_config.update(vars(self.config.model))
print(f'model_config: {model_config}')
model, _ = create_model_and_diffusion(**model_config)
model.load_state_dict(torch.load(f'{model_dir}/256x256_diffusion_uncond.pt', map_location='cpu'))
if model_config['use_fp16']:
model.convert_to_fp16()
elif config.data.dataset == 'CIFAR10':
img_shape = (3, 32, 32)
model_dir = 'pretrained/score_sde'
print(f'model_config: {config}')
model = mutils.create_model(config)
optimizer = get_optimizer(config, model.parameters())
ema = ExponentialMovingAverage(model.parameters(), decay=config.model.ema_rate)
state = dict(step=0, optimizer=optimizer, model=model, ema=ema)
restore_checkpoint(f'{model_dir}/checkpoint_8.pth', state, device)
ema.copy_to(model.parameters())
else:
raise NotImplementedError(f'Unknown dataset {config.data.dataset}!')
model.eval().to(self.device)
self.model = model
self.vpode = VPODE(model=model, score_type=args.score_type, img_shape=img_shape,
model_kwargs=None).to(self.device)
self.betas = self.vpode.discrete_betas.float().to(self.device)
self.atol, self.rtol = 1e-3, 1e-3
self.method = 'euler'
print(f'method: {self.method}, atol: {self.atol}, rtol: {self.rtol}, step_size: {self.args.step_size}')
def image_editing_sample(self, img, bs_id=0, tag=None):
assert isinstance(img, torch.Tensor)
batch_size = img.shape[0]
if tag is None:
tag = 'rnd' + str(random.randint(0, 10000))
out_dir = os.path.join(self.args.log_dir, 'bs' + str(bs_id) + '_' + tag)
assert img.ndim == 4, img.ndim
img = img.to(self.device)
x0 = img
if bs_id < 2:
os.makedirs(out_dir, exist_ok=True)
tvu.save_image((x0 + 1) * 0.5, os.path.join(out_dir, f'original_input.png'))
xs = []
for it in range(self.args.sample_step):
if self.args.fix_rand:
# fix initial randomness
noise_fixed = torch.FloatTensor(1, *x0.shape[1:]).\
normal_(0, 1, generator=torch.manual_seed(self.args.seed)).to(self.device)
print(f'noise_fixed: {noise_fixed[0, 0, 0, :3]}')
e = noise_fixed.repeat(x0.shape[0], 1, 1, 1)
else:
e = torch.randn_like(x0).to(self.device)
assert e.shape == x0.shape
total_noise_levels = self.args.t
a = (1 - self.betas).cumprod(dim=0).to(self.device)
x = x0 * a[total_noise_levels - 1].sqrt() + e * (1.0 - a[total_noise_levels - 1]).sqrt()
if bs_id < 2:
tvu.save_image((x + 1) * 0.5, os.path.join(out_dir, f'init_{it}.png'))
epsilon_dt0, epsilon_dt1 = 0, 1e-5
t0, t1 = self.args.t * 1. / 1000 - epsilon_dt0, epsilon_dt1
t_size = 2
ts = torch.linspace(t0, t1, t_size).to(self.device)
x_ = x.view(batch_size, -1) # (batch_size, state_size)
states = (x_, )
# ODE solver
odeint = odeint_adjoint
state_t = odeint(
self.vpode,
states,
ts,
atol=self.atol,
rtol=self.rtol,
method=self.method,
options=None if self.method != 'euler' else dict(step_size=self.args.step_size) # only used for fixed-point method
) # 'euler', 'dopri5'
x0_ = state_t[0][-1]
x0 = x0_.view(x.shape) # (batch_size, c, h, w)
if bs_id < 2:
torch.save(x0, os.path.join(out_dir, f'samples_{it}.pth'))
tvu.save_image((x0 + 1) * 0.5, os.path.join(out_dir, f'samples_{it}.png'))
xs.append(x0)
return torch.cat(xs, dim=0)
| 9,933 | 38.736 | 131 | py |
DiffPure | DiffPure-master/runners/diffpure_ldsde.py | # ---------------------------------------------------------------
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the NVIDIA Source Code License
# for DiffPure. To view a copy of this license, see the LICENSE file.
# ---------------------------------------------------------------
import os
import random
import numpy as np
import torch
import torchvision.utils as tvu
import torchsde
from guided_diffusion.script_util import create_model_and_diffusion, model_and_diffusion_defaults
from score_sde.losses import get_optimizer
from score_sde.models import utils as mutils
from score_sde.models.ema import ExponentialMovingAverage
from score_sde import sde_lib
def _extract_into_tensor(arr_or_func, timesteps, broadcast_shape):
"""
Extract values from a 1-D numpy array for a batch of indices.
:param arr: the 1-D numpy array or a func.
:param timesteps: a tensor of indices into the array to extract.
:param broadcast_shape: a larger shape of K dimensions with the batch
dimension equal to the length of timesteps.
:return: a tensor of shape [batch_size, 1, ...] where the shape has K dims.
"""
if callable(arr_or_func):
res = arr_or_func(timesteps).float()
else:
res = arr_or_func.to(device=timesteps.device)[timesteps].float()
while len(res.shape) < len(broadcast_shape):
res = res[..., None]
return res.expand(broadcast_shape)
def restore_checkpoint(ckpt_dir, state, device):
loaded_state = torch.load(ckpt_dir, map_location=device)
state['optimizer'].load_state_dict(loaded_state['optimizer'])
state['model'].load_state_dict(loaded_state['model'], strict=False)
state['ema'].load_state_dict(loaded_state['ema'])
state['step'] = loaded_state['step']
class LDSDE(torch.nn.Module):
def __init__(self, model, x_init, score_type='guided_diffusion', beta_min=0.1, beta_max=20, N=1000,
img_shape=(3, 256, 256), sigma2=0.001, lambda_ld=0.01, eta=5, model_kwargs=None):
"""Construct a Variance Preserving SDE.
Args:
model: diffusion model
score_type: [guided_diffusion, score_sde, ddpm]
beta_min: value of beta(0)
beta_max: value of beta(1)
"""
super().__init__()
self.model = model
self.x_init = x_init
self.sigma2 = sigma2
self.eta = eta
self.lambda_ld = lambda_ld # damping coefficient
self.score_type = score_type
self.model_kwargs = model_kwargs
self.img_shape = img_shape
self.beta_0 = beta_min
self.beta_1 = beta_max
self.N = N
self.discrete_betas = torch.linspace(beta_min / N, beta_max / N, N)
self.alphas = 1. - self.discrete_betas
self.alphas_cumprod = torch.cumprod(self.alphas, dim=0)
self.sqrt_alphas_cumprod = torch.sqrt(self.alphas_cumprod)
self.sqrt_1m_alphas_cumprod = torch.sqrt(1. - self.alphas_cumprod)
self.alphas_cumprod_cont = lambda t: torch.exp(-0.5 * (beta_max - beta_min) * t**2 - beta_min * t)
self.sqrt_1m_alphas_cumprod_neg_recip_cont = lambda t: -1. / torch.sqrt(1. - self.alphas_cumprod_cont(t))
self.noise_type = "diagonal"
self.sde_type = "ito"
print(f'sigma2: {self.sigma2}, lambda_ld: {self.lambda_ld}, eta: {self.eta}')
def _scale_timesteps(self, t):
assert torch.all(t <= 1) and torch.all(t >= 0), f't has to be in [0, 1], but get {t} with shape {t.shape}'
return (t.float() * self.N).long()
def ldsde_fn(self, t, x, return_type='drift'):
"""Create the drift and diffusion functions for the reverse SDE"""
t = torch.zeros_like(t, dtype=torch.float, device=t.device) + 1e-2
if return_type == 'drift':
assert x.ndim == 2 and np.prod(self.img_shape) == x.shape[1], x.shape
x_img = x.view(-1, *self.img_shape)
if self.score_type == 'guided_diffusion':
# model output is epsilon
if self.model_kwargs is None:
self.model_kwargs = {}
disc_steps = self._scale_timesteps(t) # (batch_size, ), from float in [0,1] to int in [0, 1000]
model_output = self.model(x_img, disc_steps, **self.model_kwargs)
# with learned sigma, so model_output contains (mean, val)
model_output, _ = torch.split(model_output, self.img_shape[0], dim=1)
assert x_img.shape == model_output.shape, f'{x_img.shape}, {model_output.shape}'
model_output = model_output.view(x.shape[0], -1)
score = _extract_into_tensor(self.sqrt_1m_alphas_cumprod_neg_recip_cont, t, x.shape) * model_output
elif self.score_type == 'score_sde':
# model output is epsilon
sde = sde_lib.VPSDE(beta_min=self.beta_0, beta_max=self.beta_1, N=self.N)
score_fn = mutils.get_score_fn(sde, self.model, train=False, continuous=True)
score = score_fn(x_img, t)
assert x_img.shape == score.shape, f'{x_img.shape}, {score.shape}'
score = score.view(x.shape[0], -1)
else:
raise NotImplementedError(f'Unknown score type in RevVPSDE: {self.score_type}!')
drift = -0.5 * (-score + (x - self.x_init) / self.sigma2) * self.lambda_ld # TODO
return drift
else:
diffusion_coef = np.sqrt(self.lambda_ld) * self.eta
return torch.tensor([diffusion_coef], dtype=torch.float).expand(x.shape[0]).to(x.device)
def f(self, t, x):
"""Create the drift function f(x, t)
sdeint only support a 2D tensor (batch_size, c*h*w)
"""
t = t.expand(x.shape[0]) # (batch_size, )
drift = self.ldsde_fn(t, x, return_type='drift')
assert drift.shape == x.shape
return drift
def g(self, t, x):
"""Create the diffusion function g(t)
sdeint only support a 2D tensor (batch_size, c*h*w)
"""
t = t.expand(x.shape[0]) # (batch_size, )
diffusion = self.ldsde_fn(t, x, return_type='diffusion')
assert diffusion.shape == (x.shape[0], )
return diffusion[:, None].expand(x.shape)
class LDGuidedDiffusion(torch.nn.Module):
def __init__(self, args, config, device=None):
super().__init__()
self.args = args
self.config = config
if device is None:
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
self.device = device
# load model
if config.data.dataset == 'ImageNet':
img_shape = (3, 256, 256)
model_dir = 'pretrained/guided_diffusion'
model_config = model_and_diffusion_defaults()
model_config.update(vars(self.config.model))
print(f'model_config: {model_config}')
model, _ = create_model_and_diffusion(**model_config)
model.load_state_dict(torch.load(f'{model_dir}/256x256_diffusion_uncond.pt', map_location='cpu'))
if model_config['use_fp16']:
model.convert_to_fp16()
elif config.data.dataset == 'CIFAR10':
img_shape = (3, 32, 32)
model_dir = 'pretrained/score_sde'
print(f'model_config: {config}')
model = mutils.create_model(config)
optimizer = get_optimizer(config, model.parameters())
ema = ExponentialMovingAverage(model.parameters(), decay=config.model.ema_rate)
state = dict(step=0, optimizer=optimizer, model=model, ema=ema)
restore_checkpoint(f'{model_dir}/checkpoint_8.pth', state, device)
ema.copy_to(model.parameters())
else:
raise NotImplementedError(f'Unknown dataset {config.data.dataset}!')
model.eval().to(self.device)
self.model = model
self.img_shape = img_shape
print(f'use_bm: {args.use_bm}')
self.args_dict = {
'method': 'euler', # ["srk", "euler", None]
'adaptive': False,
'dt': 1e-2,
}
print(f'args_dict: {self.args_dict}')
def image_editing_sample(self, img, bs_id=0, tag=None):
assert isinstance(img, torch.Tensor)
batch_size = img.shape[0]
state_size = int(np.prod(img.shape[1:])) # c*h*w
if tag is None:
tag = 'rnd' + str(random.randint(0, 10000))
out_dir = os.path.join(self.args.log_dir, 'bs' + str(bs_id) + '_' + tag)
assert img.ndim == 4, img.ndim
img = img.to(self.device)
x0 = img
x0_ = x0.view(batch_size, -1) # (batch_size, state_size)
self.ldsde = LDSDE(model=self.model, x_init=x0_, score_type=self.args.score_type, img_shape=self.img_shape,
sigma2=self.args.sigma2, lambda_ld=self.args.lambda_ld, eta=self.args.eta,
model_kwargs=None).to(self.device)
self.betas = self.ldsde.discrete_betas.float().to(self.device)
if bs_id < 2:
os.makedirs(out_dir, exist_ok=True)
tvu.save_image((x0 + 1) * 0.5, os.path.join(out_dir, f'original_input.png'))
xs = []
for it in range(self.args.sample_step):
x = x0
if bs_id < 2:
tvu.save_image((x + 1) * 0.5, os.path.join(out_dir, f'init_{it}.png'))
epsilon_dt0, epsilon_dt1 = 0, 1e-5
t0, t1 = 1 - self.args.t * 1. / 1000 + epsilon_dt0, 1 - epsilon_dt1
t_size = 2
ts = torch.linspace(t0, t1, t_size).to(self.device)
x_ = x.view(batch_size, -1) # (batch_size, state_size)
if self.args.use_bm:
bm = torchsde.BrownianInterval(t0=t0, t1=t1, size=(batch_size, state_size), device=self.device)
xs_ = torchsde.sdeint_adjoint(self.ldsde, x_, ts, bm=bm, **self.args_dict)
else:
xs_ = torchsde.sdeint_adjoint(self.ldsde, x_, ts, **self.args_dict)
x0 = xs_[-1].view(x.shape) # (batch_size, c, h, w)
if bs_id < 2:
torch.save(x0, os.path.join(out_dir, f'samples_{it}.pth'))
tvu.save_image((x0 + 1) * 0.5, os.path.join(out_dir, f'samples_{it}.png'))
xs.append(x0)
return torch.cat(xs, dim=0)
| 10,418 | 40.181818 | 115 | py |
DiffPure | DiffPure-master/runners/diffpure_ddpm.py | # ---------------------------------------------------------------
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the NVIDIA Source Code License
# for DiffPure. To view a copy of this license, see the LICENSE file.
# ---------------------------------------------------------------
import os
import random
import numpy as np
import torch
import torchvision.utils as tvu
from ddpm.unet_ddpm import Model
def get_beta_schedule(*, beta_start, beta_end, num_diffusion_timesteps):
betas = np.linspace(beta_start, beta_end,
num_diffusion_timesteps, dtype=np.float64)
assert betas.shape == (num_diffusion_timesteps,)
return betas
def extract(a, t, x_shape):
"""Extract coefficients from a based on t and reshape to make it
broadcastable with x_shape."""
bs, = t.shape
assert x_shape[0] == bs
out = torch.gather(torch.tensor(a, dtype=torch.float, device=t.device), 0, t.long())
assert out.shape == (bs,)
out = out.reshape((bs,) + (1,) * (len(x_shape) - 1))
return out
def image_editing_denoising_step_flexible_mask(x, t, *, model, logvar, betas):
"""
Sample from p(x_{t-1} | x_t)
"""
alphas = 1.0 - betas
alphas_cumprod = alphas.cumprod(dim=0)
model_output = model(x, t)
weighted_score = betas / torch.sqrt(1 - alphas_cumprod)
mean = extract(1 / torch.sqrt(alphas), t, x.shape) * (x - extract(weighted_score, t, x.shape) * model_output)
logvar = extract(logvar, t, x.shape)
noise = torch.randn_like(x)
mask = 1 - (t == 0).float()
mask = mask.reshape((x.shape[0],) + (1,) * (len(x.shape) - 1))
sample = mean + mask * torch.exp(0.5 * logvar) * noise
sample = sample.float()
return sample
class Diffusion(torch.nn.Module):
def __init__(self, args, config, device=None):
super().__init__()
self.args = args
self.config = config
if device is None:
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
self.device = device
print("Loading model")
if self.config.data.dataset == "CelebA_HQ":
url = "https://image-editing-test-12345.s3-us-west-2.amazonaws.com/checkpoints/celeba_hq.ckpt"
else:
raise ValueError
model = Model(self.config)
ckpt = torch.hub.load_state_dict_from_url(url, map_location='cpu')
model.load_state_dict(ckpt)
model.eval()
self.model = model
self.model_var_type = config.model.var_type
betas = get_beta_schedule(
beta_start=config.diffusion.beta_start,
beta_end=config.diffusion.beta_end,
num_diffusion_timesteps=config.diffusion.num_diffusion_timesteps
)
self.betas = torch.from_numpy(betas).float()
self.num_timesteps = betas.shape[0]
alphas = 1.0 - betas
alphas_cumprod = np.cumprod(alphas, axis=0)
alphas_cumprod_prev = np.append(1.0, alphas_cumprod[:-1])
posterior_variance = betas * \
(1.0 - alphas_cumprod_prev) / (1.0 - alphas_cumprod)
if self.model_var_type == "fixedlarge":
self.logvar = np.log(np.append(posterior_variance[1], betas[1:]))
elif self.model_var_type == 'fixedsmall':
self.logvar = np.log(np.maximum(posterior_variance, 1e-20))
def image_editing_sample(self, img=None, bs_id=0, tag=None):
assert isinstance(img, torch.Tensor)
batch_size = img.shape[0]
with torch.no_grad():
if tag is None:
tag = 'rnd' + str(random.randint(0, 10000))
out_dir = os.path.join(self.args.log_dir, 'bs' + str(bs_id) + '_' + tag)
assert img.ndim == 4, img.ndim
x0 = img
if bs_id < 2:
os.makedirs(out_dir, exist_ok=True)
tvu.save_image((x0 + 1) * 0.5, os.path.join(out_dir, f'original_input.png'))
xs = []
for it in range(self.args.sample_step):
e = torch.randn_like(x0)
total_noise_levels = self.args.t
a = (1 - self.betas).cumprod(dim=0).to(x0.device)
x = x0 * a[total_noise_levels - 1].sqrt() + e * (1.0 - a[total_noise_levels - 1]).sqrt()
if bs_id < 2:
tvu.save_image((x + 1) * 0.5, os.path.join(out_dir, f'init_{it}.png'))
for i in reversed(range(total_noise_levels)):
t = torch.tensor([i] * batch_size, device=img.device)
x = image_editing_denoising_step_flexible_mask(x, t=t, model=self.model,
logvar=self.logvar,
betas=self.betas.to(img.device))
# added intermediate step vis
if (i - 49) % 50 == 0 and bs_id < 2:
tvu.save_image((x + 1) * 0.5, os.path.join(out_dir, f'noise_t_{i}_{it}.png'))
x0 = x
if bs_id < 2:
torch.save(x0, os.path.join(out_dir, f'samples_{it}.pth'))
tvu.save_image((x0 + 1) * 0.5, os.path.join(out_dir, f'samples_{it}.png'))
xs.append(x0)
return torch.cat(xs, dim=0)
| 5,358 | 36.475524 | 113 | py |
DiffPure | DiffPure-master/runners/diffpure_sde.py | # ---------------------------------------------------------------
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the NVIDIA Source Code License
# for DiffPure. To view a copy of this license, see the LICENSE file.
# ---------------------------------------------------------------
import os
import random
import numpy as np
import torch
import torchvision.utils as tvu
import torchsde
from guided_diffusion.script_util import create_model_and_diffusion, model_and_diffusion_defaults
from score_sde.losses import get_optimizer
from score_sde.models import utils as mutils
from score_sde.models.ema import ExponentialMovingAverage
from score_sde import sde_lib
def _extract_into_tensor(arr_or_func, timesteps, broadcast_shape):
"""
Extract values from a 1-D numpy array for a batch of indices.
:param arr: the 1-D numpy array or a func.
:param timesteps: a tensor of indices into the array to extract.
:param broadcast_shape: a larger shape of K dimensions with the batch
dimension equal to the length of timesteps.
:return: a tensor of shape [batch_size, 1, ...] where the shape has K dims.
"""
if callable(arr_or_func):
res = arr_or_func(timesteps).float()
else:
res = arr_or_func.to(device=timesteps.device)[timesteps].float()
while len(res.shape) < len(broadcast_shape):
res = res[..., None]
return res.expand(broadcast_shape)
def restore_checkpoint(ckpt_dir, state, device):
loaded_state = torch.load(ckpt_dir, map_location=device)
state['optimizer'].load_state_dict(loaded_state['optimizer'])
state['model'].load_state_dict(loaded_state['model'], strict=False)
state['ema'].load_state_dict(loaded_state['ema'])
state['step'] = loaded_state['step']
class RevVPSDE(torch.nn.Module):
def __init__(self, model, score_type='guided_diffusion', beta_min=0.1, beta_max=20, N=1000,
img_shape=(3, 256, 256), model_kwargs=None):
"""Construct a Variance Preserving SDE.
Args:
model: diffusion model
score_type: [guided_diffusion, score_sde, ddpm]
beta_min: value of beta(0)
beta_max: value of beta(1)
"""
super().__init__()
self.model = model
self.score_type = score_type
self.model_kwargs = model_kwargs
self.img_shape = img_shape
self.beta_0 = beta_min
self.beta_1 = beta_max
self.N = N
self.discrete_betas = torch.linspace(beta_min / N, beta_max / N, N)
self.alphas = 1. - self.discrete_betas
self.alphas_cumprod = torch.cumprod(self.alphas, dim=0)
self.sqrt_alphas_cumprod = torch.sqrt(self.alphas_cumprod)
self.sqrt_1m_alphas_cumprod = torch.sqrt(1. - self.alphas_cumprod)
self.alphas_cumprod_cont = lambda t: torch.exp(-0.5 * (beta_max - beta_min) * t**2 - beta_min * t)
self.sqrt_1m_alphas_cumprod_neg_recip_cont = lambda t: -1. / torch.sqrt(1. - self.alphas_cumprod_cont(t))
self.noise_type = "diagonal"
self.sde_type = "ito"
def _scale_timesteps(self, t):
assert torch.all(t <= 1) and torch.all(t >= 0), f't has to be in [0, 1], but get {t} with shape {t.shape}'
return (t.float() * self.N).long()
def vpsde_fn(self, t, x):
beta_t = self.beta_0 + t * (self.beta_1 - self.beta_0)
drift = -0.5 * beta_t[:, None] * x
diffusion = torch.sqrt(beta_t)
return drift, diffusion
def rvpsde_fn(self, t, x, return_type='drift'):
"""Create the drift and diffusion functions for the reverse SDE"""
drift, diffusion = self.vpsde_fn(t, x)
if return_type == 'drift':
assert x.ndim == 2 and np.prod(self.img_shape) == x.shape[1], x.shape
x_img = x.view(-1, *self.img_shape)
if self.score_type == 'guided_diffusion':
# model output is epsilon
if self.model_kwargs is None:
self.model_kwargs = {}
disc_steps = self._scale_timesteps(t) # (batch_size, ), from float in [0,1] to int in [0, 1000]
model_output = self.model(x_img, disc_steps, **self.model_kwargs)
# with learned sigma, so model_output contains (mean, val)
model_output, _ = torch.split(model_output, self.img_shape[0], dim=1)
assert x_img.shape == model_output.shape, f'{x_img.shape}, {model_output.shape}'
model_output = model_output.view(x.shape[0], -1)
score = _extract_into_tensor(self.sqrt_1m_alphas_cumprod_neg_recip_cont, t, x.shape) * model_output
elif self.score_type == 'score_sde':
# model output is epsilon
sde = sde_lib.VPSDE(beta_min=self.beta_0, beta_max=self.beta_1, N=self.N)
score_fn = mutils.get_score_fn(sde, self.model, train=False, continuous=True)
score = score_fn(x_img, t)
assert x_img.shape == score.shape, f'{x_img.shape}, {score.shape}'
score = score.view(x.shape[0], -1)
else:
raise NotImplementedError(f'Unknown score type in RevVPSDE: {self.score_type}!')
drift = drift - diffusion[:, None] ** 2 * score
return drift
else:
return diffusion
def f(self, t, x):
"""Create the drift function -f(x, 1-t) (by t' = 1 - t)
sdeint only support a 2D tensor (batch_size, c*h*w)
"""
t = t.expand(x.shape[0]) # (batch_size, )
drift = self.rvpsde_fn(1 - t, x, return_type='drift')
assert drift.shape == x.shape
return -drift
def g(self, t, x):
"""Create the diffusion function g(1-t) (by t' = 1 - t)
sdeint only support a 2D tensor (batch_size, c*h*w)
"""
t = t.expand(x.shape[0]) # (batch_size, )
diffusion = self.rvpsde_fn(1 - t, x, return_type='diffusion')
assert diffusion.shape == (x.shape[0], )
return diffusion[:, None].expand(x.shape)
class RevGuidedDiffusion(torch.nn.Module):
def __init__(self, args, config, device=None):
super().__init__()
self.args = args
self.config = config
if device is None:
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
self.device = device
# load model
if config.data.dataset == 'ImageNet':
img_shape = (3, 256, 256)
model_dir = 'pretrained/guided_diffusion'
model_config = model_and_diffusion_defaults()
model_config.update(vars(self.config.model))
print(f'model_config: {model_config}')
model, _ = create_model_and_diffusion(**model_config)
model.load_state_dict(torch.load(f'{model_dir}/256x256_diffusion_uncond.pt', map_location='cpu'))
if model_config['use_fp16']:
model.convert_to_fp16()
elif config.data.dataset == 'CIFAR10':
img_shape = (3, 32, 32)
model_dir = 'pretrained/score_sde'
print(f'model_config: {config}')
model = mutils.create_model(config)
optimizer = get_optimizer(config, model.parameters())
ema = ExponentialMovingAverage(model.parameters(), decay=config.model.ema_rate)
state = dict(step=0, optimizer=optimizer, model=model, ema=ema)
restore_checkpoint(f'{model_dir}/checkpoint_8.pth', state, device)
ema.copy_to(model.parameters())
else:
raise NotImplementedError(f'Unknown dataset {config.data.dataset}!')
model.eval().to(self.device)
self.model = model
self.rev_vpsde = RevVPSDE(model=model, score_type=args.score_type, img_shape=img_shape,
model_kwargs=None).to(self.device)
self.betas = self.rev_vpsde.discrete_betas.float().to(self.device)
print(f't: {args.t}, rand_t: {args.rand_t}, t_delta: {args.t_delta}')
print(f'use_bm: {args.use_bm}')
def image_editing_sample(self, img, bs_id=0, tag=None):
assert isinstance(img, torch.Tensor)
batch_size = img.shape[0]
state_size = int(np.prod(img.shape[1:])) # c*h*w
if tag is None:
tag = 'rnd' + str(random.randint(0, 10000))
out_dir = os.path.join(self.args.log_dir, 'bs' + str(bs_id) + '_' + tag)
assert img.ndim == 4, img.ndim
img = img.to(self.device)
x0 = img
if bs_id < 2:
os.makedirs(out_dir, exist_ok=True)
tvu.save_image((x0 + 1) * 0.5, os.path.join(out_dir, f'original_input.png'))
xs = []
for it in range(self.args.sample_step):
e = torch.randn_like(x0).to(self.device)
total_noise_levels = self.args.t
if self.args.rand_t:
total_noise_levels = self.args.t + np.random.randint(-self.args.t_delta, self.args.t_delta)
print(f'total_noise_levels: {total_noise_levels}')
a = (1 - self.betas).cumprod(dim=0).to(self.device)
x = x0 * a[total_noise_levels - 1].sqrt() + e * (1.0 - a[total_noise_levels - 1]).sqrt()
if bs_id < 2:
tvu.save_image((x + 1) * 0.5, os.path.join(out_dir, f'init_{it}.png'))
epsilon_dt0, epsilon_dt1 = 0, 1e-5
t0, t1 = 1 - self.args.t * 1. / 1000 + epsilon_dt0, 1 - epsilon_dt1
t_size = 2
ts = torch.linspace(t0, t1, t_size).to(self.device)
x_ = x.view(batch_size, -1) # (batch_size, state_size)
if self.args.use_bm:
bm = torchsde.BrownianInterval(t0=t0, t1=t1, size=(batch_size, state_size), device=self.device)
xs_ = torchsde.sdeint_adjoint(self.rev_vpsde, x_, ts, method='euler', bm=bm)
else:
xs_ = torchsde.sdeint_adjoint(self.rev_vpsde, x_, ts, method='euler')
x0 = xs_[-1].view(x.shape) # (batch_size, c, h, w)
if bs_id < 2:
torch.save(x0, os.path.join(out_dir, f'samples_{it}.pth'))
tvu.save_image((x0 + 1) * 0.5, os.path.join(out_dir, f'samples_{it}.png'))
xs.append(x0)
return torch.cat(xs, dim=0)
| 10,334 | 40.673387 | 115 | py |
DiffPure | DiffPure-master/bpda_eot/bpda_eot_attack.py | # ---------------------------------------------------------------
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# This file has been modified from ebm-defense.
#
# Source:
# https://github.com/point0bar1/ebm-defense/blob/master/bpda_eot_attack.py
#
# The license for the original version of this file can be
# found in this directory (LICENSE_BPDA).
# The modifications to this file are subject to the same license.
# ---------------------------------------------------------------
import torch
import torch.nn.functional as F
criterion = torch.nn.CrossEntropyLoss()
class BPDA_EOT_Attack():
def __init__(self, model, adv_eps=8.0/255, eot_defense_reps=150, eot_attack_reps=15):
self.model = model
self.config = {
'eot_defense_ave': 'logits',
'eot_attack_ave': 'logits',
'eot_defense_reps': eot_defense_reps,
'eot_attack_reps': eot_attack_reps,
'adv_steps': 50,
'adv_norm': 'l_inf',
'adv_eps': adv_eps,
'adv_eta': 2.0 / 255,
'log_freq': 10
}
print(f'BPDA_EOT config: {self.config}')
def purify(self, x):
return self.model(x, mode='purify')
def eot_defense_prediction(seslf, logits, reps=1, eot_defense_ave=None):
if eot_defense_ave == 'logits':
logits_pred = logits.view([reps, int(logits.shape[0]/reps), logits.shape[1]]).mean(0)
elif eot_defense_ave == 'softmax':
logits_pred = F.softmax(logits, dim=1).view([reps, int(logits.shape[0]/reps), logits.shape[1]]).mean(0)
elif eot_defense_ave == 'logsoftmax':
logits_pred = F.log_softmax(logits, dim=1).view([reps, int(logits.shape[0] / reps), logits.shape[1]]).mean(0)
elif reps == 1:
logits_pred = logits
else:
raise RuntimeError('Invalid ave_method_pred (use "logits" or "softmax" or "logsoftmax")')
_, y_pred = torch.max(logits_pred, 1)
return y_pred
def eot_attack_loss(self, logits, y, reps=1, eot_attack_ave='loss'):
if eot_attack_ave == 'logits':
logits_loss = logits.view([reps, int(logits.shape[0] / reps), logits.shape[1]]).mean(0)
y_loss = y
elif eot_attack_ave == 'softmax':
logits_loss = torch.log(F.softmax(logits, dim=1).view([reps, int(logits.shape[0] / reps), logits.shape[1]]).mean(0))
y_loss = y
elif eot_attack_ave == 'logsoftmax':
logits_loss = F.log_softmax(logits, dim=1).view([reps, int(logits.shape[0] / reps), logits.shape[1]]).mean(0)
y_loss = y
elif eot_attack_ave == 'loss':
logits_loss = logits
y_loss = y.repeat(reps)
else:
raise RuntimeError('Invalid ave_method_eot ("logits", "softmax", "logsoftmax", "loss")')
loss = criterion(logits_loss, y_loss)
return loss
def predict(self, X, y, requires_grad=True, reps=1, eot_defense_ave=None, eot_attack_ave='loss'):
if requires_grad:
logits = self.model(X, mode='classify')
else:
with torch.no_grad():
logits = self.model(X.data, mode='classify')
y_pred = self.eot_defense_prediction(logits.detach(), reps, eot_defense_ave)
correct = torch.eq(y_pred, y)
loss = self.eot_attack_loss(logits, y, reps, eot_attack_ave)
return correct.detach(), loss
def pgd_update(self, X_adv, grad, X, adv_norm, adv_eps, adv_eta, eps=1e-10):
if adv_norm == 'l_inf':
X_adv.data += adv_eta * torch.sign(grad)
X_adv = torch.clamp(torch.min(X + adv_eps, torch.max(X - adv_eps, X_adv)), min=0, max=1)
elif adv_norm == 'l_2':
X_adv.data += adv_eta * grad / grad.view(X.shape[0], -1).norm(p=2, dim=1).view(X.shape[0], 1, 1, 1)
dists = (X_adv - X).view(X.shape[0], -1).norm(dim=1, p=2).view(X.shape[0], 1, 1, 1)
X_adv = torch.clamp(X + torch.min(dists, adv_eps*torch.ones_like(dists))*(X_adv-X)/(dists+eps), min=0, max=1)
else:
raise RuntimeError('Invalid adv_norm ("l_inf" or "l_2"')
return X_adv
def purify_and_predict(self, X, y, purify_reps=1, requires_grad=True):
X_repeat = X.repeat([purify_reps, 1, 1, 1])
X_repeat_purified = self.purify(X_repeat).detach().clone()
X_repeat_purified.requires_grad_()
correct, loss = self.predict(X_repeat_purified, y, requires_grad, purify_reps,
self.config['eot_defense_ave'], self.config['eot_attack_ave'])
if requires_grad:
X_grads = torch.autograd.grad(loss, [X_repeat_purified])[0]
# average gradients over parallel samples for EOT attack
attack_grad = X_grads.view([purify_reps]+list(X.shape)).mean(dim=0)
return correct, attack_grad
else:
return correct, None
def eot_defense_verification(self, X_adv, y, correct, defended):
for verify_ind in range(correct.nelement()):
if correct[verify_ind] == 0 and defended[verify_ind] == 1:
defended[verify_ind] = self.purify_and_predict(X_adv[verify_ind].unsqueeze(0), y[verify_ind].view([1]),
self.config['eot_defense_reps'], requires_grad=False)[0]
return defended
def eval_and_bpda_eot_grad(self, X_adv, y, defended, requires_grad=True):
correct, attack_grad = self.purify_and_predict(X_adv, y, self.config['eot_attack_reps'], requires_grad)
if self.config['eot_defense_reps'] > 0:
defended = self.eot_defense_verification(X_adv, y, correct, defended)
else:
defended *= correct
return defended, attack_grad
def attack_batch(self, X, y):
# get baseline accuracy for natural images
defended = self.eval_and_bpda_eot_grad(X, y, torch.ones_like(y).bool(), False)[0]
print('Baseline: {} of {}'.format(defended.sum(), len(defended)))
class_batch = torch.zeros([self.config['adv_steps'] + 2, X.shape[0]]).bool()
class_batch[0] = defended.cpu()
ims_adv_batch = torch.zeros(X.shape)
for ind in range(defended.nelement()):
if defended[ind] == 0:
ims_adv_batch[ind] = X[ind].cpu()
X_adv = X.clone()
# adversarial attacks on a single batch of images
for step in range(self.config['adv_steps'] + 1):
defended, attack_grad = self.eval_and_bpda_eot_grad(X_adv, y, defended)
class_batch[step+1] = defended.cpu()
for ind in range(defended.nelement()):
if class_batch[step, ind] == 1 and defended[ind] == 0:
ims_adv_batch[ind] = X_adv[ind].cpu()
# update adversarial images (except on final iteration so final adv images match final eval)
if step < self.config['adv_steps']:
X_adv = self.pgd_update(X_adv, attack_grad, X, self.config['adv_norm'], self.config['adv_eps'], self.config['adv_eta'])
X_adv = X_adv.detach().clone()
if step == 1 or step % self.config['log_freq'] == 0 or step == self.config['adv_steps']:
print('Attack {} of {} Batch defended: {} of {}'.
format(step, self.config['adv_steps'], int(torch.sum(defended).cpu().numpy()), X_adv.shape[0]))
if int(torch.sum(defended).cpu().numpy()) == 0:
print('Attack successfully to the batch!')
break
for ind in range(defended.nelement()):
if defended[ind] == 1:
ims_adv_batch[ind] = X_adv[ind].cpu()
return class_batch, ims_adv_batch
def attack_all(self, X, y, batch_size):
class_path = torch.zeros([self.config['adv_steps'] + 2, 0]).bool()
ims_adv = torch.zeros(0)
n_batches = X.shape[0] // batch_size
if n_batches == 0 and X.shape[0] > 0:
n_batches = 1
for counter in range(n_batches):
X_batch = X[counter * batch_size:min((counter + 1) * batch_size, X.shape[0])].clone().to(X.device)
y_batch = y[counter * batch_size:min((counter + 1) * batch_size, X.shape[0])].clone().to(X.device)
class_batch, ims_adv_batch = self.attack_batch(X_batch.contiguous(), y_batch.contiguous())
class_path = torch.cat((class_path, class_batch), dim=1)
ims_adv = torch.cat((ims_adv, ims_adv_batch), dim=0)
print(f'finished {counter}-th batch in attack_all')
return class_path, ims_adv
| 8,620 | 45.349462 | 135 | py |
DiffPure | DiffPure-master/guided_diffusion/resample.py | # ---------------------------------------------------------------
# Taken from the following link as is from:
# https://github.com/openai/guided-diffusion/blob/main/guided_diffusion/resample.py
#
# The license for the original version of this file can be
# found in this directory (LICENSE_GUIDED_DIFFUSION).
# ---------------------------------------------------------------
from abc import ABC, abstractmethod
import numpy as np
import torch as th
import torch.distributed as dist
def create_named_schedule_sampler(name, diffusion):
"""
Create a ScheduleSampler from a library of pre-defined samplers.
:param name: the name of the sampler.
:param diffusion: the diffusion object to sample for.
"""
if name == "uniform":
return UniformSampler(diffusion)
elif name == "loss-second-moment":
return LossSecondMomentResampler(diffusion)
else:
raise NotImplementedError(f"unknown schedule sampler: {name}")
class ScheduleSampler(ABC):
"""
A distribution over timesteps in the diffusion process, intended to reduce
variance of the objective.
By default, samplers perform unbiased importance sampling, in which the
objective's mean is unchanged.
However, subclasses may override sample() to change how the resampled
terms are reweighted, allowing for actual changes in the objective.
"""
@abstractmethod
def weights(self):
"""
Get a numpy array of weights, one per diffusion step.
The weights needn't be normalized, but must be positive.
"""
def sample(self, batch_size, device):
"""
Importance-sample timesteps for a batch.
:param batch_size: the number of timesteps.
:param device: the torch device to save to.
:return: a tuple (timesteps, weights):
- timesteps: a tensor of timestep indices.
- weights: a tensor of weights to scale the resulting losses.
"""
w = self.weights()
p = w / np.sum(w)
indices_np = np.random.choice(len(p), size=(batch_size,), p=p)
indices = th.from_numpy(indices_np).long().to(device)
weights_np = 1 / (len(p) * p[indices_np])
weights = th.from_numpy(weights_np).float().to(device)
return indices, weights
class UniformSampler(ScheduleSampler):
def __init__(self, diffusion):
self.diffusion = diffusion
self._weights = np.ones([diffusion.num_timesteps])
def weights(self):
return self._weights
class LossAwareSampler(ScheduleSampler):
def update_with_local_losses(self, local_ts, local_losses):
"""
Update the reweighting using losses from a model.
Call this method from each rank with a batch of timesteps and the
corresponding losses for each of those timesteps.
This method will perform synchronization to make sure all of the ranks
maintain the exact same reweighting.
:param local_ts: an integer Tensor of timesteps.
:param local_losses: a 1D Tensor of losses.
"""
batch_sizes = [
th.tensor([0], dtype=th.int32, device=local_ts.device)
for _ in range(dist.get_world_size())
]
dist.all_gather(
batch_sizes,
th.tensor([len(local_ts)], dtype=th.int32, device=local_ts.device),
)
# Pad all_gather batches to be the maximum batch size.
batch_sizes = [x.item() for x in batch_sizes]
max_bs = max(batch_sizes)
timestep_batches = [th.zeros(max_bs).to(local_ts) for bs in batch_sizes]
loss_batches = [th.zeros(max_bs).to(local_losses) for bs in batch_sizes]
dist.all_gather(timestep_batches, local_ts)
dist.all_gather(loss_batches, local_losses)
timesteps = [
x.item() for y, bs in zip(timestep_batches, batch_sizes) for x in y[:bs]
]
losses = [x.item() for y, bs in zip(loss_batches, batch_sizes) for x in y[:bs]]
self.update_with_all_losses(timesteps, losses)
@abstractmethod
def update_with_all_losses(self, ts, losses):
"""
Update the reweighting using losses from a model.
Sub-classes should override this method to update the reweighting
using losses from the model.
This method directly updates the reweighting without synchronizing
between workers. It is called by update_with_local_losses from all
ranks with identical arguments. Thus, it should have deterministic
behavior to maintain state across workers.
:param ts: a list of int timesteps.
:param losses: a list of float losses, one per timestep.
"""
class LossSecondMomentResampler(LossAwareSampler):
def __init__(self, diffusion, history_per_term=10, uniform_prob=0.001):
self.diffusion = diffusion
self.history_per_term = history_per_term
self.uniform_prob = uniform_prob
self._loss_history = np.zeros(
[diffusion.num_timesteps, history_per_term], dtype=np.float64
)
self._loss_counts = np.zeros([diffusion.num_timesteps], dtype=np.int)
def weights(self):
if not self._warmed_up():
return np.ones([self.diffusion.num_timesteps], dtype=np.float64)
weights = np.sqrt(np.mean(self._loss_history ** 2, axis=-1))
weights /= np.sum(weights)
weights *= 1 - self.uniform_prob
weights += self.uniform_prob / len(weights)
return weights
def update_with_all_losses(self, ts, losses):
for t, loss in zip(ts, losses):
if self._loss_counts[t] == self.history_per_term:
# Shift out the oldest loss term.
self._loss_history[t, :-1] = self._loss_history[t, 1:]
self._loss_history[t, -1] = loss
else:
self._loss_history[t, self._loss_counts[t]] = loss
self._loss_counts[t] += 1
def _warmed_up(self):
return (self._loss_counts == self.history_per_term).all()
| 6,065 | 36.214724 | 87 | py |
DiffPure | DiffPure-master/guided_diffusion/losses.py | # ---------------------------------------------------------------
# Taken from the following link as is from:
# https://github.com/openai/guided-diffusion/blob/main/guided_diffusion/losses.py
#
# The license for the original version of this file can be
# found in this directory (LICENSE_GUIDED_DIFFUSION).
# ---------------------------------------------------------------
"""
Helpers for various likelihood-based losses. These are ported from the original
Ho et al. diffusion models codebase:
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/utils.py
"""
import numpy as np
import torch as th
def normal_kl(mean1, logvar1, mean2, logvar2):
"""
Compute the KL divergence between two gaussians.
Shapes are automatically broadcasted, so batches can be compared to
scalars, among other use cases.
"""
tensor = None
for obj in (mean1, logvar1, mean2, logvar2):
if isinstance(obj, th.Tensor):
tensor = obj
break
assert tensor is not None, "at least one argument must be a Tensor"
# Force variances to be Tensors. Broadcasting helps convert scalars to
# Tensors, but it does not work for th.exp().
logvar1, logvar2 = [
x if isinstance(x, th.Tensor) else th.tensor(x).to(tensor)
for x in (logvar1, logvar2)
]
return 0.5 * (
-1.0
+ logvar2
- logvar1
+ th.exp(logvar1 - logvar2)
+ ((mean1 - mean2) ** 2) * th.exp(-logvar2)
)
def approx_standard_normal_cdf(x):
"""
A fast approximation of the cumulative distribution function of the
standard normal.
"""
return 0.5 * (1.0 + th.tanh(np.sqrt(2.0 / np.pi) * (x + 0.044715 * th.pow(x, 3))))
def discretized_gaussian_log_likelihood(x, *, means, log_scales):
"""
Compute the log-likelihood of a Gaussian distribution discretizing to a
given image.
:param x: the target images. It is assumed that this was uint8 values,
rescaled to the range [-1, 1].
:param means: the Gaussian mean Tensor.
:param log_scales: the Gaussian log stddev Tensor.
:return: a tensor like x of log probabilities (in nats).
"""
assert x.shape == means.shape == log_scales.shape
centered_x = x - means
inv_stdv = th.exp(-log_scales)
plus_in = inv_stdv * (centered_x + 1.0 / 255.0)
cdf_plus = approx_standard_normal_cdf(plus_in)
min_in = inv_stdv * (centered_x - 1.0 / 255.0)
cdf_min = approx_standard_normal_cdf(min_in)
log_cdf_plus = th.log(cdf_plus.clamp(min=1e-12))
log_one_minus_cdf_min = th.log((1.0 - cdf_min).clamp(min=1e-12))
cdf_delta = cdf_plus - cdf_min
log_probs = th.where(
x < -0.999,
log_cdf_plus,
th.where(x > 0.999, log_one_minus_cdf_min, th.log(cdf_delta.clamp(min=1e-12))),
)
assert log_probs.shape == x.shape
return log_probs
| 2,908 | 32.825581 | 109 | py |
DiffPure | DiffPure-master/guided_diffusion/image_datasets.py | # ---------------------------------------------------------------
# Taken from the following link as is from:
# https://github.com/openai/guided-diffusion/blob/main/guided_diffusion/image_datasets.py
#
# The license for the original version of this file can be
# found in this directory (LICENSE_GUIDED_DIFFUSION).
# ---------------------------------------------------------------
import math
import random
from PIL import Image
import blobfile as bf
from mpi4py import MPI
import numpy as np
from torch.utils.data import DataLoader, Dataset
def load_data(
*,
data_dir,
batch_size,
image_size,
class_cond=False,
deterministic=False,
random_crop=False,
random_flip=True,
):
"""
For a dataset, create a generator over (images, kwargs) pairs.
Each images is an NCHW float tensor, and the kwargs dict contains zero or
more keys, each of which map to a batched Tensor of their own.
The kwargs dict can be used for class labels, in which case the key is "y"
and the values are integer tensors of class labels.
:param data_dir: a dataset directory.
:param batch_size: the batch size of each returned pair.
:param image_size: the size to which images are resized.
:param class_cond: if True, include a "y" key in returned dicts for class
label. If classes are not available and this is true, an
exception will be raised.
:param deterministic: if True, yield results in a deterministic order.
:param random_crop: if True, randomly crop the images for augmentation.
:param random_flip: if True, randomly flip the images for augmentation.
"""
if not data_dir:
raise ValueError("unspecified data directory")
all_files = _list_image_files_recursively(data_dir)
classes = None
if class_cond:
# Assume classes are the first part of the filename,
# before an underscore.
class_names = [bf.basename(path).split("_")[0] for path in all_files]
sorted_classes = {x: i for i, x in enumerate(sorted(set(class_names)))}
classes = [sorted_classes[x] for x in class_names]
dataset = ImageDataset(
image_size,
all_files,
classes=classes,
shard=MPI.COMM_WORLD.Get_rank(),
num_shards=MPI.COMM_WORLD.Get_size(),
random_crop=random_crop,
random_flip=random_flip,
)
if deterministic:
loader = DataLoader(
dataset, batch_size=batch_size, shuffle=False, num_workers=1, drop_last=True
)
else:
loader = DataLoader(
dataset, batch_size=batch_size, shuffle=True, num_workers=1, drop_last=True
)
while True:
yield from loader
def _list_image_files_recursively(data_dir):
results = []
for entry in sorted(bf.listdir(data_dir)):
full_path = bf.join(data_dir, entry)
ext = entry.split(".")[-1]
if "." in entry and ext.lower() in ["jpg", "jpeg", "png", "gif"]:
results.append(full_path)
elif bf.isdir(full_path):
results.extend(_list_image_files_recursively(full_path))
return results
class ImageDataset(Dataset):
def __init__(
self,
resolution,
image_paths,
classes=None,
shard=0,
num_shards=1,
random_crop=False,
random_flip=True,
):
super().__init__()
self.resolution = resolution
self.local_images = image_paths[shard:][::num_shards]
self.local_classes = None if classes is None else classes[shard:][::num_shards]
self.random_crop = random_crop
self.random_flip = random_flip
def __len__(self):
return len(self.local_images)
def __getitem__(self, idx):
path = self.local_images[idx]
with bf.BlobFile(path, "rb") as f:
pil_image = Image.open(f)
pil_image.load()
pil_image = pil_image.convert("RGB")
if self.random_crop:
arr = random_crop_arr(pil_image, self.resolution)
else:
arr = center_crop_arr(pil_image, self.resolution)
if self.random_flip and random.random() < 0.5:
arr = arr[:, ::-1]
arr = arr.astype(np.float32) / 127.5 - 1
out_dict = {}
if self.local_classes is not None:
out_dict["y"] = np.array(self.local_classes[idx], dtype=np.int64)
return np.transpose(arr, [2, 0, 1]), out_dict
def center_crop_arr(pil_image, image_size):
# We are not on a new enough PIL to support the `reducing_gap`
# argument, which uses BOX downsampling at powers of two first.
# Thus, we do it by hand to improve downsample quality.
while min(*pil_image.size) >= 2 * image_size:
pil_image = pil_image.resize(
tuple(x // 2 for x in pil_image.size), resample=Image.BOX
)
scale = image_size / min(*pil_image.size)
pil_image = pil_image.resize(
tuple(round(x * scale) for x in pil_image.size), resample=Image.BICUBIC
)
arr = np.array(pil_image)
crop_y = (arr.shape[0] - image_size) // 2
crop_x = (arr.shape[1] - image_size) // 2
return arr[crop_y : crop_y + image_size, crop_x : crop_x + image_size]
def random_crop_arr(pil_image, image_size, min_crop_frac=0.8, max_crop_frac=1.0):
min_smaller_dim_size = math.ceil(image_size / max_crop_frac)
max_smaller_dim_size = math.ceil(image_size / min_crop_frac)
smaller_dim_size = random.randrange(min_smaller_dim_size, max_smaller_dim_size + 1)
# We are not on a new enough PIL to support the `reducing_gap`
# argument, which uses BOX downsampling at powers of two first.
# Thus, we do it by hand to improve downsample quality.
while min(*pil_image.size) >= 2 * smaller_dim_size:
pil_image = pil_image.resize(
tuple(x // 2 for x in pil_image.size), resample=Image.BOX
)
scale = smaller_dim_size / min(*pil_image.size)
pil_image = pil_image.resize(
tuple(round(x * scale) for x in pil_image.size), resample=Image.BICUBIC
)
arr = np.array(pil_image)
crop_y = random.randrange(arr.shape[0] - image_size + 1)
crop_x = random.randrange(arr.shape[1] - image_size + 1)
return arr[crop_y : crop_y + image_size, crop_x : crop_x + image_size]
| 6,312 | 34.869318 | 89 | py |
DiffPure | DiffPure-master/guided_diffusion/nn.py | # ---------------------------------------------------------------
# Taken from the following link as is from:
# https://github.com/openai/guided-diffusion/blob/main/guided_diffusion/nn.py
#
# The license for the original version of this file can be
# found in this directory (LICENSE_GUIDED_DIFFUSION).
# ---------------------------------------------------------------
"""
Various utilities for neural networks.
"""
import math
import torch as th
import torch.nn as nn
# PyTorch 1.7 has SiLU, but we support PyTorch 1.5.
class SiLU(nn.Module):
def forward(self, x):
return x * th.sigmoid(x)
class GroupNorm32(nn.GroupNorm):
def forward(self, x):
return super().forward(x.float()).type(x.dtype)
def conv_nd(dims, *args, **kwargs):
"""
Create a 1D, 2D, or 3D convolution module.
"""
if dims == 1:
return nn.Conv1d(*args, **kwargs)
elif dims == 2:
return nn.Conv2d(*args, **kwargs)
elif dims == 3:
return nn.Conv3d(*args, **kwargs)
raise ValueError(f"unsupported dimensions: {dims}")
def linear(*args, **kwargs):
"""
Create a linear module.
"""
return nn.Linear(*args, **kwargs)
def avg_pool_nd(dims, *args, **kwargs):
"""
Create a 1D, 2D, or 3D average pooling module.
"""
if dims == 1:
return nn.AvgPool1d(*args, **kwargs)
elif dims == 2:
return nn.AvgPool2d(*args, **kwargs)
elif dims == 3:
return nn.AvgPool3d(*args, **kwargs)
raise ValueError(f"unsupported dimensions: {dims}")
def update_ema(target_params, source_params, rate=0.99):
"""
Update target parameters to be closer to those of source parameters using
an exponential moving average.
:param target_params: the target parameter sequence.
:param source_params: the source parameter sequence.
:param rate: the EMA rate (closer to 1 means slower).
"""
for targ, src in zip(target_params, source_params):
targ.detach().mul_(rate).add_(src, alpha=1 - rate)
def zero_module(module):
"""
Zero out the parameters of a module and return it.
"""
for p in module.parameters():
p.detach().zero_()
return module
def scale_module(module, scale):
"""
Scale the parameters of a module and return it.
"""
for p in module.parameters():
p.detach().mul_(scale)
return module
def mean_flat(tensor):
"""
Take the mean over all non-batch dimensions.
"""
return tensor.mean(dim=list(range(1, len(tensor.shape))))
def normalization(channels):
"""
Make a standard normalization layer.
:param channels: number of input channels.
:return: an nn.Module for normalization.
"""
return GroupNorm32(32, channels)
def timestep_embedding(timesteps, dim, max_period=10000):
"""
Create sinusoidal timestep embeddings.
:param timesteps: a 1-D Tensor of N indices, one per batch element.
These may be fractional.
:param dim: the dimension of the output.
:param max_period: controls the minimum frequency of the embeddings.
:return: an [N x dim] Tensor of positional embeddings.
"""
half = dim // 2
freqs = th.exp(
-math.log(max_period) * th.arange(start=0, end=half, dtype=th.float32) / half
).to(device=timesteps.device)
args = timesteps[:, None].float() * freqs[None]
embedding = th.cat([th.cos(args), th.sin(args)], dim=-1)
if dim % 2:
embedding = th.cat([embedding, th.zeros_like(embedding[:, :1])], dim=-1)
return embedding
def checkpoint(func, inputs, params, flag):
"""
Evaluate a function without caching intermediate activations, allowing for
reduced memory at the expense of extra compute in the backward pass.
:param func: the function to evaluate.
:param inputs: the argument sequence to pass to `func`.
:param params: a sequence of parameters `func` depends on but does not
explicitly take as arguments.
:param flag: if False, disable gradient checkpointing.
"""
if flag:
args = tuple(inputs) + tuple(params)
return CheckpointFunction.apply(func, len(inputs), *args)
else:
return func(*inputs)
class CheckpointFunction(th.autograd.Function):
@staticmethod
def forward(ctx, run_function, length, *args):
ctx.run_function = run_function
ctx.input_tensors = list(args[:length])
ctx.input_params = list(args[length:])
with th.no_grad():
output_tensors = ctx.run_function(*ctx.input_tensors)
return output_tensors
@staticmethod
def backward(ctx, *output_grads):
ctx.input_tensors = [x.detach().requires_grad_(True) for x in ctx.input_tensors]
with th.enable_grad():
# Fixes a bug where the first op in run_function modifies the
# Tensor storage in place, which is not allowed for detach()'d
# Tensors.
shallow_copies = [x.view_as(x) for x in ctx.input_tensors]
output_tensors = ctx.run_function(*shallow_copies)
input_grads = th.autograd.grad(
output_tensors,
ctx.input_tensors + ctx.input_params,
output_grads,
allow_unused=True,
)
del ctx.input_tensors
del ctx.input_params
del output_tensors
return (None, None) + input_grads
| 5,390 | 29.117318 | 88 | py |
DiffPure | DiffPure-master/guided_diffusion/fp16_util.py | # ---------------------------------------------------------------
# Taken from the following link as is from:
# https://github.com/openai/guided-diffusion/blob/main/guided_diffusion/fp16_util.py
#
# The license for the original version of this file can be
# found in this directory (LICENSE_GUIDED_DIFFUSION).
# ---------------------------------------------------------------
"""
Helpers to train with 16-bit precision.
"""
import numpy as np
import torch as th
import torch.nn as nn
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
from . import logger
INITIAL_LOG_LOSS_SCALE = 20.0
def convert_module_to_f16(l):
"""
Convert primitive modules to float16.
"""
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Conv3d)):
l.weight.data = l.weight.data.half()
if l.bias is not None:
l.bias.data = l.bias.data.half()
def convert_module_to_f32(l):
"""
Convert primitive modules to float32, undoing convert_module_to_f16().
"""
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Conv3d)):
l.weight.data = l.weight.data.float()
if l.bias is not None:
l.bias.data = l.bias.data.float()
def make_master_params(param_groups_and_shapes):
"""
Copy model parameters into a (differently-shaped) list of full-precision
parameters.
"""
master_params = []
for param_group, shape in param_groups_and_shapes:
master_param = nn.Parameter(
_flatten_dense_tensors(
[param.detach().float() for (_, param) in param_group]
).view(shape)
)
master_param.requires_grad = True
master_params.append(master_param)
return master_params
def model_grads_to_master_grads(param_groups_and_shapes, master_params):
"""
Copy the gradients from the model parameters into the master parameters
from make_master_params().
"""
for master_param, (param_group, shape) in zip(
master_params, param_groups_and_shapes
):
master_param.grad = _flatten_dense_tensors(
[param_grad_or_zeros(param) for (_, param) in param_group]
).view(shape)
def master_params_to_model_params(param_groups_and_shapes, master_params):
"""
Copy the master parameter data back into the model parameters.
"""
# Without copying to a list, if a generator is passed, this will
# silently not copy any parameters.
for master_param, (param_group, _) in zip(master_params, param_groups_and_shapes):
for (_, param), unflat_master_param in zip(
param_group, unflatten_master_params(param_group, master_param.view(-1))
):
param.detach().copy_(unflat_master_param)
def unflatten_master_params(param_group, master_param):
return _unflatten_dense_tensors(master_param, [param for (_, param) in param_group])
def get_param_groups_and_shapes(named_model_params):
named_model_params = list(named_model_params)
scalar_vector_named_params = (
[(n, p) for (n, p) in named_model_params if p.ndim <= 1],
(-1),
)
matrix_named_params = (
[(n, p) for (n, p) in named_model_params if p.ndim > 1],
(1, -1),
)
return [scalar_vector_named_params, matrix_named_params]
def master_params_to_state_dict(
model, param_groups_and_shapes, master_params, use_fp16
):
if use_fp16:
state_dict = model.state_dict()
for master_param, (param_group, _) in zip(
master_params, param_groups_and_shapes
):
for (name, _), unflat_master_param in zip(
param_group, unflatten_master_params(param_group, master_param.view(-1))
):
assert name in state_dict
state_dict[name] = unflat_master_param
else:
state_dict = model.state_dict()
for i, (name, _value) in enumerate(model.named_parameters()):
assert name in state_dict
state_dict[name] = master_params[i]
return state_dict
def state_dict_to_master_params(model, state_dict, use_fp16):
if use_fp16:
named_model_params = [
(name, state_dict[name]) for name, _ in model.named_parameters()
]
param_groups_and_shapes = get_param_groups_and_shapes(named_model_params)
master_params = make_master_params(param_groups_and_shapes)
else:
master_params = [state_dict[name] for name, _ in model.named_parameters()]
return master_params
def zero_master_grads(master_params):
for param in master_params:
param.grad = None
def zero_grad(model_params):
for param in model_params:
# Taken from https://pytorch.org/docs/stable/_modules/torch/optim/optimizer.html#Optimizer.add_param_group
if param.grad is not None:
param.grad.detach_()
param.grad.zero_()
def param_grad_or_zeros(param):
if param.grad is not None:
return param.grad.data.detach()
else:
return th.zeros_like(param)
class MixedPrecisionTrainer:
def __init__(
self,
*,
model,
use_fp16=False,
fp16_scale_growth=1e-3,
initial_lg_loss_scale=INITIAL_LOG_LOSS_SCALE,
):
self.model = model
self.use_fp16 = use_fp16
self.fp16_scale_growth = fp16_scale_growth
self.model_params = list(self.model.parameters())
self.master_params = self.model_params
self.param_groups_and_shapes = None
self.lg_loss_scale = initial_lg_loss_scale
if self.use_fp16:
self.param_groups_and_shapes = get_param_groups_and_shapes(
self.model.named_parameters()
)
self.master_params = make_master_params(self.param_groups_and_shapes)
self.model.convert_to_fp16()
def zero_grad(self):
zero_grad(self.model_params)
def backward(self, loss: th.Tensor):
if self.use_fp16:
loss_scale = 2 ** self.lg_loss_scale
(loss * loss_scale).backward()
else:
loss.backward()
def optimize(self, opt: th.optim.Optimizer):
if self.use_fp16:
return self._optimize_fp16(opt)
else:
return self._optimize_normal(opt)
def _optimize_fp16(self, opt: th.optim.Optimizer):
logger.logkv_mean("lg_loss_scale", self.lg_loss_scale)
model_grads_to_master_grads(self.param_groups_and_shapes, self.master_params)
grad_norm, param_norm = self._compute_norms(grad_scale=2 ** self.lg_loss_scale)
if check_overflow(grad_norm):
self.lg_loss_scale -= 1
logger.log(f"Found NaN, decreased lg_loss_scale to {self.lg_loss_scale}")
zero_master_grads(self.master_params)
return False
logger.logkv_mean("grad_norm", grad_norm)
logger.logkv_mean("param_norm", param_norm)
self.master_params[0].grad.mul_(1.0 / (2 ** self.lg_loss_scale))
opt.step()
zero_master_grads(self.master_params)
master_params_to_model_params(self.param_groups_and_shapes, self.master_params)
self.lg_loss_scale += self.fp16_scale_growth
return True
def _optimize_normal(self, opt: th.optim.Optimizer):
grad_norm, param_norm = self._compute_norms()
logger.logkv_mean("grad_norm", grad_norm)
logger.logkv_mean("param_norm", param_norm)
opt.step()
return True
def _compute_norms(self, grad_scale=1.0):
grad_norm = 0.0
param_norm = 0.0
for p in self.master_params:
with th.no_grad():
param_norm += th.norm(p, p=2, dtype=th.float32).item() ** 2
if p.grad is not None:
grad_norm += th.norm(p.grad, p=2, dtype=th.float32).item() ** 2
return np.sqrt(grad_norm) / grad_scale, np.sqrt(param_norm)
def master_params_to_state_dict(self, master_params):
return master_params_to_state_dict(
self.model, self.param_groups_and_shapes, master_params, self.use_fp16
)
def state_dict_to_master_params(self, state_dict):
return state_dict_to_master_params(self.model, state_dict, self.use_fp16)
def check_overflow(value):
return (value == float("inf")) or (value == -float("inf")) or (value != value)
| 8,318 | 32.955102 | 114 | py |
DiffPure | DiffPure-master/guided_diffusion/unet.py | # ---------------------------------------------------------------
# Taken from the following link as is from:
# https://github.com/openai/guided-diffusion/blob/main/guided_diffusion/unet.py
#
# The license for the original version of this file can be
# found in this directory (LICENSE_GUIDED_DIFFUSION).
# ---------------------------------------------------------------
from abc import abstractmethod
import math
import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as F
from .fp16_util import convert_module_to_f16, convert_module_to_f32
from .nn import (
checkpoint,
conv_nd,
linear,
avg_pool_nd,
zero_module,
normalization,
timestep_embedding,
)
class AttentionPool2d(nn.Module):
"""
Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py
"""
def __init__(
self,
spacial_dim: int,
embed_dim: int,
num_heads_channels: int,
output_dim: int = None,
):
super().__init__()
self.positional_embedding = nn.Parameter(
th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5
)
self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1)
self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1)
self.num_heads = embed_dim // num_heads_channels
self.attention = QKVAttention(self.num_heads)
def forward(self, x):
b, c, *_spatial = x.shape
x = x.reshape(b, c, -1) # NC(HW)
x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1)
x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1)
x = self.qkv_proj(x)
x = self.attention(x)
x = self.c_proj(x)
return x[:, :, 0]
class TimestepBlock(nn.Module):
"""
Any module where forward() takes timestep embeddings as a second argument.
"""
@abstractmethod
def forward(self, x, emb):
"""
Apply the module to `x` given `emb` timestep embeddings.
"""
class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
"""
A sequential module that passes timestep embeddings to the children that
support it as an extra input.
"""
def forward(self, x, emb):
for layer in self:
if isinstance(layer, TimestepBlock):
x = layer(x, emb)
else:
x = layer(x)
return x
class Upsample(nn.Module):
"""
An upsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
upsampling occurs in the inner-two dimensions.
"""
def __init__(self, channels, use_conv, dims=2, out_channels=None):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.dims = dims
if use_conv:
self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=1)
def forward(self, x):
assert x.shape[1] == self.channels
if self.dims == 3:
x = F.interpolate(
x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest"
)
else:
x = F.interpolate(x, scale_factor=2, mode="nearest")
if self.use_conv:
x = self.conv(x)
return x
class Downsample(nn.Module):
"""
A downsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
downsampling occurs in the inner-two dimensions.
"""
def __init__(self, channels, use_conv, dims=2, out_channels=None):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.dims = dims
stride = 2 if dims != 3 else (1, 2, 2)
if use_conv:
self.op = conv_nd(
dims, self.channels, self.out_channels, 3, stride=stride, padding=1
)
else:
assert self.channels == self.out_channels
self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride)
def forward(self, x):
assert x.shape[1] == self.channels
return self.op(x)
class ResBlock(TimestepBlock):
"""
A residual block that can optionally change the number of channels.
:param channels: the number of input channels.
:param emb_channels: the number of timestep embedding channels.
:param dropout: the rate of dropout.
:param out_channels: if specified, the number of out channels.
:param use_conv: if True and out_channels is specified, use a spatial
convolution instead of a smaller 1x1 convolution to change the
channels in the skip connection.
:param dims: determines if the signal is 1D, 2D, or 3D.
:param use_checkpoint: if True, use gradient checkpointing on this module.
:param up: if True, use this block for upsampling.
:param down: if True, use this block for downsampling.
"""
def __init__(
self,
channels,
emb_channels,
dropout,
out_channels=None,
use_conv=False,
use_scale_shift_norm=False,
dims=2,
use_checkpoint=False,
up=False,
down=False,
):
super().__init__()
self.channels = channels
self.emb_channels = emb_channels
self.dropout = dropout
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.use_checkpoint = use_checkpoint
self.use_scale_shift_norm = use_scale_shift_norm
self.in_layers = nn.Sequential(
normalization(channels),
nn.SiLU(),
conv_nd(dims, channels, self.out_channels, 3, padding=1),
)
self.updown = up or down
if up:
self.h_upd = Upsample(channels, False, dims)
self.x_upd = Upsample(channels, False, dims)
elif down:
self.h_upd = Downsample(channels, False, dims)
self.x_upd = Downsample(channels, False, dims)
else:
self.h_upd = self.x_upd = nn.Identity()
self.emb_layers = nn.Sequential(
nn.SiLU(),
linear(
emb_channels,
2 * self.out_channels if use_scale_shift_norm else self.out_channels,
),
)
self.out_layers = nn.Sequential(
normalization(self.out_channels),
nn.SiLU(),
nn.Dropout(p=dropout),
zero_module(
conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1)
),
)
if self.out_channels == channels:
self.skip_connection = nn.Identity()
elif use_conv:
self.skip_connection = conv_nd(
dims, channels, self.out_channels, 3, padding=1
)
else:
self.skip_connection = conv_nd(dims, channels, self.out_channels, 1)
def forward(self, x, emb):
"""
Apply the block to a Tensor, conditioned on a timestep embedding.
:param x: an [N x C x ...] Tensor of features.
:param emb: an [N x emb_channels] Tensor of timestep embeddings.
:return: an [N x C x ...] Tensor of outputs.
"""
return checkpoint(
self._forward, (x, emb), self.parameters(), self.use_checkpoint
)
def _forward(self, x, emb):
if self.updown:
in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1]
h = in_rest(x)
h = self.h_upd(h)
x = self.x_upd(x)
h = in_conv(h)
else:
h = self.in_layers(x)
emb_out = self.emb_layers(emb).type(h.dtype)
while len(emb_out.shape) < len(h.shape):
emb_out = emb_out[..., None]
if self.use_scale_shift_norm:
out_norm, out_rest = self.out_layers[0], self.out_layers[1:]
scale, shift = th.chunk(emb_out, 2, dim=1)
h = out_norm(h) * (1 + scale) + shift
h = out_rest(h)
else:
h = h + emb_out
h = self.out_layers(h)
return self.skip_connection(x) + h
class AttentionBlock(nn.Module):
"""
An attention block that allows spatial positions to attend to each other.
Originally ported from here, but adapted to the N-d case.
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66.
"""
def __init__(
self,
channels,
num_heads=1,
num_head_channels=-1,
use_checkpoint=False,
use_new_attention_order=False,
):
super().__init__()
self.channels = channels
if num_head_channels == -1:
self.num_heads = num_heads
else:
assert (
channels % num_head_channels == 0
), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}"
self.num_heads = channels // num_head_channels
self.use_checkpoint = use_checkpoint
self.norm = normalization(channels)
self.qkv = conv_nd(1, channels, channels * 3, 1)
if use_new_attention_order:
# split qkv before split heads
self.attention = QKVAttention(self.num_heads)
else:
# split heads before split qkv
self.attention = QKVAttentionLegacy(self.num_heads)
self.proj_out = zero_module(conv_nd(1, channels, channels, 1))
def forward(self, x):
return checkpoint(self._forward, (x,), self.parameters(), True)
def _forward(self, x):
b, c, *spatial = x.shape
x = x.reshape(b, c, -1)
qkv = self.qkv(self.norm(x))
h = self.attention(qkv)
h = self.proj_out(h)
return (x + h).reshape(b, c, *spatial)
def count_flops_attn(model, _x, y):
"""
A counter for the `thop` package to count the operations in an
attention operation.
Meant to be used like:
macs, params = thop.profile(
model,
inputs=(inputs, timestamps),
custom_ops={QKVAttention: QKVAttention.count_flops},
)
"""
b, c, *spatial = y[0].shape
num_spatial = int(np.prod(spatial))
# We perform two matmuls with the same number of ops.
# The first computes the weight matrix, the second computes
# the combination of the value vectors.
matmul_ops = 2 * b * (num_spatial ** 2) * c
model.total_ops += th.DoubleTensor([matmul_ops])
class QKVAttentionLegacy(nn.Module):
"""
A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping
"""
def __init__(self, n_heads):
super().__init__()
self.n_heads = n_heads
def forward(self, qkv):
"""
Apply QKV attention.
:param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs.
:return: an [N x (H * C) x T] tensor after attention.
"""
bs, width, length = qkv.shape
assert width % (3 * self.n_heads) == 0
ch = width // (3 * self.n_heads)
q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1)
scale = 1 / math.sqrt(math.sqrt(ch))
weight = th.einsum(
"bct,bcs->bts", q * scale, k * scale
) # More stable with f16 than dividing afterwards
weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
a = th.einsum("bts,bcs->bct", weight, v)
return a.reshape(bs, -1, length)
@staticmethod
def count_flops(model, _x, y):
return count_flops_attn(model, _x, y)
class QKVAttention(nn.Module):
"""
A module which performs QKV attention and splits in a different order.
"""
def __init__(self, n_heads):
super().__init__()
self.n_heads = n_heads
def forward(self, qkv):
"""
Apply QKV attention.
:param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs.
:return: an [N x (H * C) x T] tensor after attention.
"""
bs, width, length = qkv.shape
assert width % (3 * self.n_heads) == 0
ch = width // (3 * self.n_heads)
q, k, v = qkv.chunk(3, dim=1)
scale = 1 / math.sqrt(math.sqrt(ch))
weight = th.einsum(
"bct,bcs->bts",
(q * scale).view(bs * self.n_heads, ch, length),
(k * scale).view(bs * self.n_heads, ch, length),
) # More stable with f16 than dividing afterwards
weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
a = th.einsum("bts,bcs->bct", weight, v.reshape(bs * self.n_heads, ch, length))
return a.reshape(bs, -1, length)
@staticmethod
def count_flops(model, _x, y):
return count_flops_attn(model, _x, y)
class UNetModel(nn.Module):
"""
The full UNet model with attention and timestep embedding.
:param in_channels: channels in the input Tensor.
:param model_channels: base channel count for the model.
:param out_channels: channels in the output Tensor.
:param num_res_blocks: number of residual blocks per downsample.
:param attention_resolutions: a collection of downsample rates at which
attention will take place. May be a set, list, or tuple.
For example, if this contains 4, then at 4x downsampling, attention
will be used.
:param dropout: the dropout probability.
:param channel_mult: channel multiplier for each level of the UNet.
:param conv_resample: if True, use learned convolutions for upsampling and
downsampling.
:param dims: determines if the signal is 1D, 2D, or 3D.
:param num_classes: if specified (as an int), then this model will be
class-conditional with `num_classes` classes.
:param use_checkpoint: use gradient checkpointing to reduce memory usage.
:param num_heads: the number of attention heads in each attention layer.
:param num_heads_channels: if specified, ignore num_heads and instead use
a fixed channel width per attention head.
:param num_heads_upsample: works with num_heads to set a different number
of heads for upsampling. Deprecated.
:param use_scale_shift_norm: use a FiLM-like conditioning mechanism.
:param resblock_updown: use residual blocks for up/downsampling.
:param use_new_attention_order: use a different attention pattern for potentially
increased efficiency.
"""
def __init__(
self,
image_size,
in_channels,
model_channels,
out_channels,
num_res_blocks,
attention_resolutions,
dropout=0,
channel_mult=(1, 2, 4, 8),
conv_resample=True,
dims=2,
num_classes=None,
use_checkpoint=False,
use_fp16=False,
num_heads=1,
num_head_channels=-1,
num_heads_upsample=-1,
use_scale_shift_norm=False,
resblock_updown=False,
use_new_attention_order=False,
):
super().__init__()
if num_heads_upsample == -1:
num_heads_upsample = num_heads
self.image_size = image_size
self.in_channels = in_channels
self.model_channels = model_channels
self.out_channels = out_channels
self.num_res_blocks = num_res_blocks
self.attention_resolutions = attention_resolutions
self.dropout = dropout
self.channel_mult = channel_mult
self.conv_resample = conv_resample
self.num_classes = num_classes
self.use_checkpoint = use_checkpoint
self.dtype = th.float16 if use_fp16 else th.float32
self.num_heads = num_heads
self.num_head_channels = num_head_channels
self.num_heads_upsample = num_heads_upsample
time_embed_dim = model_channels * 4
self.time_embed = nn.Sequential(
linear(model_channels, time_embed_dim),
nn.SiLU(),
linear(time_embed_dim, time_embed_dim),
)
if self.num_classes is not None:
self.label_emb = nn.Embedding(num_classes, time_embed_dim)
ch = input_ch = int(channel_mult[0] * model_channels)
self.input_blocks = nn.ModuleList(
[TimestepEmbedSequential(conv_nd(dims, in_channels, ch, 3, padding=1))]
)
self._feature_size = ch
input_block_chans = [ch]
ds = 1
for level, mult in enumerate(channel_mult):
for _ in range(num_res_blocks):
layers = [
ResBlock(
ch,
time_embed_dim,
dropout,
out_channels=int(mult * model_channels),
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
)
]
ch = int(mult * model_channels)
if ds in attention_resolutions:
layers.append(
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads,
num_head_channels=num_head_channels,
use_new_attention_order=use_new_attention_order,
)
)
self.input_blocks.append(TimestepEmbedSequential(*layers))
self._feature_size += ch
input_block_chans.append(ch)
if level != len(channel_mult) - 1:
out_ch = ch
self.input_blocks.append(
TimestepEmbedSequential(
ResBlock(
ch,
time_embed_dim,
dropout,
out_channels=out_ch,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
down=True,
)
if resblock_updown
else Downsample(
ch, conv_resample, dims=dims, out_channels=out_ch
)
)
)
ch = out_ch
input_block_chans.append(ch)
ds *= 2
self._feature_size += ch
self.middle_block = TimestepEmbedSequential(
ResBlock(
ch,
time_embed_dim,
dropout,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
),
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads,
num_head_channels=num_head_channels,
use_new_attention_order=use_new_attention_order,
),
ResBlock(
ch,
time_embed_dim,
dropout,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
),
)
self._feature_size += ch
self.output_blocks = nn.ModuleList([])
for level, mult in list(enumerate(channel_mult))[::-1]:
for i in range(num_res_blocks + 1):
ich = input_block_chans.pop()
layers = [
ResBlock(
ch + ich,
time_embed_dim,
dropout,
out_channels=int(model_channels * mult),
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
)
]
ch = int(model_channels * mult)
if ds in attention_resolutions:
layers.append(
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads_upsample,
num_head_channels=num_head_channels,
use_new_attention_order=use_new_attention_order,
)
)
if level and i == num_res_blocks:
out_ch = ch
layers.append(
ResBlock(
ch,
time_embed_dim,
dropout,
out_channels=out_ch,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
up=True,
)
if resblock_updown
else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch)
)
ds //= 2
self.output_blocks.append(TimestepEmbedSequential(*layers))
self._feature_size += ch
self.out = nn.Sequential(
normalization(ch),
nn.SiLU(),
zero_module(conv_nd(dims, input_ch, out_channels, 3, padding=1)),
)
def convert_to_fp16(self):
"""
Convert the torso of the model to float16.
"""
self.input_blocks.apply(convert_module_to_f16)
self.middle_block.apply(convert_module_to_f16)
self.output_blocks.apply(convert_module_to_f16)
def convert_to_fp32(self):
"""
Convert the torso of the model to float32.
"""
self.input_blocks.apply(convert_module_to_f32)
self.middle_block.apply(convert_module_to_f32)
self.output_blocks.apply(convert_module_to_f32)
def forward(self, x, timesteps, y=None):
"""
Apply the model to an input batch.
:param x: an [N x C x ...] Tensor of inputs.
:param timesteps: a 1-D batch of timesteps.
:param y: an [N] Tensor of labels, if class-conditional.
:return: an [N x C x ...] Tensor of outputs.
"""
assert (y is not None) == (
self.num_classes is not None
), "must specify y if and only if the model is class-conditional"
hs = []
emb = self.time_embed(timestep_embedding(timesteps, self.model_channels))
if self.num_classes is not None:
assert y.shape == (x.shape[0],)
emb = emb + self.label_emb(y)
h = x.type(self.dtype)
for module in self.input_blocks:
h = module(h, emb)
hs.append(h)
h = self.middle_block(h, emb)
for module in self.output_blocks:
h = th.cat([h, hs.pop()], dim=1)
h = module(h, emb)
h = h.type(x.dtype)
return self.out(h)
class SuperResModel(UNetModel):
"""
A UNetModel that performs super-resolution.
Expects an extra kwarg `low_res` to condition on a low-resolution image.
"""
def __init__(self, image_size, in_channels, *args, **kwargs):
super().__init__(image_size, in_channels * 2, *args, **kwargs)
def forward(self, x, timesteps, low_res=None, **kwargs):
_, _, new_height, new_width = x.shape
upsampled = F.interpolate(low_res, (new_height, new_width), mode="bilinear")
x = th.cat([x, upsampled], dim=1)
return super().forward(x, timesteps, **kwargs)
class EncoderUNetModel(nn.Module):
"""
The half UNet model with attention and timestep embedding.
For usage, see UNet.
"""
def __init__(
self,
image_size,
in_channels,
model_channels,
out_channels,
num_res_blocks,
attention_resolutions,
dropout=0,
channel_mult=(1, 2, 4, 8),
conv_resample=True,
dims=2,
use_checkpoint=False,
use_fp16=False,
num_heads=1,
num_head_channels=-1,
num_heads_upsample=-1,
use_scale_shift_norm=False,
resblock_updown=False,
use_new_attention_order=False,
pool="adaptive",
):
super().__init__()
if num_heads_upsample == -1:
num_heads_upsample = num_heads
self.in_channels = in_channels
self.model_channels = model_channels
self.out_channels = out_channels
self.num_res_blocks = num_res_blocks
self.attention_resolutions = attention_resolutions
self.dropout = dropout
self.channel_mult = channel_mult
self.conv_resample = conv_resample
self.use_checkpoint = use_checkpoint
self.dtype = th.float16 if use_fp16 else th.float32
self.num_heads = num_heads
self.num_head_channels = num_head_channels
self.num_heads_upsample = num_heads_upsample
time_embed_dim = model_channels * 4
self.time_embed = nn.Sequential(
linear(model_channels, time_embed_dim),
nn.SiLU(),
linear(time_embed_dim, time_embed_dim),
)
ch = int(channel_mult[0] * model_channels)
self.input_blocks = nn.ModuleList(
[TimestepEmbedSequential(conv_nd(dims, in_channels, ch, 3, padding=1))]
)
self._feature_size = ch
input_block_chans = [ch]
ds = 1
for level, mult in enumerate(channel_mult):
for _ in range(num_res_blocks):
layers = [
ResBlock(
ch,
time_embed_dim,
dropout,
out_channels=int(mult * model_channels),
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
)
]
ch = int(mult * model_channels)
if ds in attention_resolutions:
layers.append(
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads,
num_head_channels=num_head_channels,
use_new_attention_order=use_new_attention_order,
)
)
self.input_blocks.append(TimestepEmbedSequential(*layers))
self._feature_size += ch
input_block_chans.append(ch)
if level != len(channel_mult) - 1:
out_ch = ch
self.input_blocks.append(
TimestepEmbedSequential(
ResBlock(
ch,
time_embed_dim,
dropout,
out_channels=out_ch,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
down=True,
)
if resblock_updown
else Downsample(
ch, conv_resample, dims=dims, out_channels=out_ch
)
)
)
ch = out_ch
input_block_chans.append(ch)
ds *= 2
self._feature_size += ch
self.middle_block = TimestepEmbedSequential(
ResBlock(
ch,
time_embed_dim,
dropout,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
),
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads,
num_head_channels=num_head_channels,
use_new_attention_order=use_new_attention_order,
),
ResBlock(
ch,
time_embed_dim,
dropout,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
),
)
self._feature_size += ch
self.pool = pool
if pool == "adaptive":
self.out = nn.Sequential(
normalization(ch),
nn.SiLU(),
nn.AdaptiveAvgPool2d((1, 1)),
zero_module(conv_nd(dims, ch, out_channels, 1)),
nn.Flatten(),
)
elif pool == "attention":
assert num_head_channels != -1
self.out = nn.Sequential(
normalization(ch),
nn.SiLU(),
AttentionPool2d(
(image_size // ds), ch, num_head_channels, out_channels
),
)
elif pool == "spatial":
self.out = nn.Sequential(
nn.Linear(self._feature_size, 2048),
nn.ReLU(),
nn.Linear(2048, self.out_channels),
)
elif pool == "spatial_v2":
self.out = nn.Sequential(
nn.Linear(self._feature_size, 2048),
normalization(2048),
nn.SiLU(),
nn.Linear(2048, self.out_channels),
)
else:
raise NotImplementedError(f"Unexpected {pool} pooling")
def convert_to_fp16(self):
"""
Convert the torso of the model to float16.
"""
self.input_blocks.apply(convert_module_to_f16)
self.middle_block.apply(convert_module_to_f16)
def convert_to_fp32(self):
"""
Convert the torso of the model to float32.
"""
self.input_blocks.apply(convert_module_to_f32)
self.middle_block.apply(convert_module_to_f32)
def forward(self, x, timesteps):
"""
Apply the model to an input batch.
:param x: an [N x C x ...] Tensor of inputs.
:param timesteps: a 1-D batch of timesteps.
:return: an [N x K] Tensor of outputs.
"""
emb = self.time_embed(timestep_embedding(timesteps, self.model_channels))
results = []
h = x.type(self.dtype)
for module in self.input_blocks:
h = module(h, emb)
if self.pool.startswith("spatial"):
results.append(h.type(x.dtype).mean(dim=(2, 3)))
h = self.middle_block(h, emb)
if self.pool.startswith("spatial"):
results.append(h.type(x.dtype).mean(dim=(2, 3)))
h = th.cat(results, axis=-1)
return self.out(h)
else:
h = h.type(x.dtype)
return self.out(h)
| 31,605 | 34.001107 | 124 | py |
DiffPure | DiffPure-master/guided_diffusion/gaussian_diffusion.py | # ---------------------------------------------------------------
# Taken from the following link as is from:
# https://github.com/openai/guided-diffusion/blob/main/guided_diffusion/gaussian_diffusion.py
#
# The license for the original version of this file can be
# found in this directory (LICENSE_GUIDED_DIFFUSION).
# ---------------------------------------------------------------
"""
This code started out as a PyTorch port of Ho et al's diffusion models:
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/diffusion_utils_2.py
Docstrings have been added, as well as DDIM sampling and a new collection of beta schedules.
"""
import enum
import math
import numpy as np
import torch as th
from .nn import mean_flat
from .losses import normal_kl, discretized_gaussian_log_likelihood
def get_named_beta_schedule(schedule_name, num_diffusion_timesteps):
"""
Get a pre-defined beta schedule for the given name.
The beta schedule library consists of beta schedules which remain similar
in the limit of num_diffusion_timesteps.
Beta schedules may be added, but should not be removed or changed once
they are committed to maintain backwards compatibility.
"""
if schedule_name == "linear":
# Linear schedule from Ho et al, extended to work for any number of
# diffusion steps.
scale = 1000 / num_diffusion_timesteps
beta_start = scale * 0.0001
beta_end = scale * 0.02
return np.linspace(
beta_start, beta_end, num_diffusion_timesteps, dtype=np.float64
)
elif schedule_name == "cosine":
return betas_for_alpha_bar(
num_diffusion_timesteps,
lambda t: math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2,
)
else:
raise NotImplementedError(f"unknown beta schedule: {schedule_name}")
def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999):
"""
Create a beta schedule that discretizes the given alpha_t_bar function,
which defines the cumulative product of (1-beta) over time from t = [0,1].
:param num_diffusion_timesteps: the number of betas to produce.
:param alpha_bar: a lambda that takes an argument t from 0 to 1 and
produces the cumulative product of (1-beta) up to that
part of the diffusion process.
:param max_beta: the maximum beta to use; use values lower than 1 to
prevent singularities.
"""
betas = []
for i in range(num_diffusion_timesteps):
t1 = i / num_diffusion_timesteps
t2 = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
return np.array(betas)
class ModelMeanType(enum.Enum):
"""
Which type of output the model predicts.
"""
PREVIOUS_X = enum.auto() # the model predicts x_{t-1}
START_X = enum.auto() # the model predicts x_0
EPSILON = enum.auto() # the model predicts epsilon
class ModelVarType(enum.Enum):
"""
What is used as the model's output variance.
The LEARNED_RANGE option has been added to allow the model to predict
values between FIXED_SMALL and FIXED_LARGE, making its job easier.
"""
LEARNED = enum.auto()
FIXED_SMALL = enum.auto()
FIXED_LARGE = enum.auto()
LEARNED_RANGE = enum.auto()
class LossType(enum.Enum):
MSE = enum.auto() # use raw MSE loss (and KL when learning variances)
RESCALED_MSE = (
enum.auto()
) # use raw MSE loss (with RESCALED_KL when learning variances)
KL = enum.auto() # use the variational lower-bound
RESCALED_KL = enum.auto() # like KL, but rescale to estimate the full VLB
def is_vb(self):
return self == LossType.KL or self == LossType.RESCALED_KL
class GaussianDiffusion:
"""
Utilities for training and sampling diffusion models.
Ported directly from here, and then adapted over time to further experimentation.
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/diffusion_utils_2.py#L42
:param betas: a 1-D numpy array of betas for each diffusion timestep,
starting at T and going to 1.
:param model_mean_type: a ModelMeanType determining what the model outputs.
:param model_var_type: a ModelVarType determining how variance is output.
:param loss_type: a LossType determining the loss function to use.
:param rescale_timesteps: if True, pass floating point timesteps into the
model so that they are always scaled like in the
original paper (0 to 1000).
"""
def __init__(
self,
*,
betas,
model_mean_type,
model_var_type,
loss_type,
rescale_timesteps=False,
):
self.model_mean_type = model_mean_type
self.model_var_type = model_var_type
self.loss_type = loss_type
self.rescale_timesteps = rescale_timesteps
# Use float64 for accuracy.
betas = np.array(betas, dtype=np.float64)
self.betas = betas
assert len(betas.shape) == 1, "betas must be 1-D"
assert (betas > 0).all() and (betas <= 1).all()
self.num_timesteps = int(betas.shape[0])
alphas = 1.0 - betas
self.alphas_cumprod = np.cumprod(alphas, axis=0)
self.alphas_cumprod_prev = np.append(1.0, self.alphas_cumprod[:-1])
self.alphas_cumprod_next = np.append(self.alphas_cumprod[1:], 0.0)
assert self.alphas_cumprod_prev.shape == (self.num_timesteps,)
# calculations for diffusion q(x_t | x_{t-1}) and others
self.sqrt_alphas_cumprod = np.sqrt(self.alphas_cumprod)
self.sqrt_one_minus_alphas_cumprod = np.sqrt(1.0 - self.alphas_cumprod)
self.log_one_minus_alphas_cumprod = np.log(1.0 - self.alphas_cumprod)
self.sqrt_recip_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod)
self.sqrt_recipm1_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod - 1)
# calculations for posterior q(x_{t-1} | x_t, x_0)
self.posterior_variance = (
betas * (1.0 - self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod)
)
# log calculation clipped because the posterior variance is 0 at the
# beginning of the diffusion chain.
self.posterior_log_variance_clipped = np.log(
np.append(self.posterior_variance[1], self.posterior_variance[1:])
)
self.posterior_mean_coef1 = (
betas * np.sqrt(self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod)
)
self.posterior_mean_coef2 = (
(1.0 - self.alphas_cumprod_prev)
* np.sqrt(alphas)
/ (1.0 - self.alphas_cumprod)
)
def q_mean_variance(self, x_start, t):
"""
Get the distribution q(x_t | x_0).
:param x_start: the [N x C x ...] tensor of noiseless inputs.
:param t: the number of diffusion steps (minus 1). Here, 0 means one step.
:return: A tuple (mean, variance, log_variance), all of x_start's shape.
"""
mean = (
_extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
)
variance = _extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape)
log_variance = _extract_into_tensor(
self.log_one_minus_alphas_cumprod, t, x_start.shape
)
return mean, variance, log_variance
def q_sample(self, x_start, t, noise=None):
"""
Diffuse the data for a given number of diffusion steps.
In other words, sample from q(x_t | x_0).
:param x_start: the initial data batch.
:param t: the number of diffusion steps (minus 1). Here, 0 means one step.
:param noise: if specified, the split-out normal noise.
:return: A noisy version of x_start.
"""
if noise is None:
noise = th.randn_like(x_start)
assert noise.shape == x_start.shape
return (
_extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
+ _extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape)
* noise
)
def q_posterior_mean_variance(self, x_start, x_t, t):
"""
Compute the mean and variance of the diffusion posterior:
q(x_{t-1} | x_t, x_0)
"""
assert x_start.shape == x_t.shape
posterior_mean = (
_extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start
+ _extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t
)
posterior_variance = _extract_into_tensor(self.posterior_variance, t, x_t.shape)
posterior_log_variance_clipped = _extract_into_tensor(
self.posterior_log_variance_clipped, t, x_t.shape
)
assert (
posterior_mean.shape[0]
== posterior_variance.shape[0]
== posterior_log_variance_clipped.shape[0]
== x_start.shape[0]
)
return posterior_mean, posterior_variance, posterior_log_variance_clipped
def p_mean_variance(
self, model, x, t, clip_denoised=True, denoised_fn=None, model_kwargs=None
):
"""
Apply the model to get p(x_{t-1} | x_t), as well as a prediction of
the initial x, x_0.
:param model: the model, which takes a signal and a batch of timesteps
as input.
:param x: the [N x C x ...] tensor at time t.
:param t: a 1-D Tensor of timesteps.
:param clip_denoised: if True, clip the denoised signal into [-1, 1].
:param denoised_fn: if not None, a function which applies to the
x_start prediction before it is used to sample. Applies before
clip_denoised.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:return: a dict with the following keys:
- 'mean': the model mean output.
- 'variance': the model variance output.
- 'log_variance': the log of 'variance'.
- 'pred_xstart': the prediction for x_0.
"""
if model_kwargs is None:
model_kwargs = {}
B, C = x.shape[:2]
assert t.shape == (B,)
model_output = model(x, self._scale_timesteps(t), **model_kwargs)
if self.model_var_type in [ModelVarType.LEARNED, ModelVarType.LEARNED_RANGE]:
assert model_output.shape == (B, C * 2, *x.shape[2:])
model_output, model_var_values = th.split(model_output, C, dim=1)
if self.model_var_type == ModelVarType.LEARNED:
model_log_variance = model_var_values
model_variance = th.exp(model_log_variance)
else:
min_log = _extract_into_tensor(
self.posterior_log_variance_clipped, t, x.shape
)
max_log = _extract_into_tensor(np.log(self.betas), t, x.shape)
# The model_var_values is [-1, 1] for [min_var, max_var].
frac = (model_var_values + 1) / 2
model_log_variance = frac * max_log + (1 - frac) * min_log
model_variance = th.exp(model_log_variance)
else:
model_variance, model_log_variance = {
# for fixedlarge, we set the initial (log-)variance like so
# to get a better decoder log likelihood.
ModelVarType.FIXED_LARGE: (
np.append(self.posterior_variance[1], self.betas[1:]),
np.log(np.append(self.posterior_variance[1], self.betas[1:])),
),
ModelVarType.FIXED_SMALL: (
self.posterior_variance,
self.posterior_log_variance_clipped,
),
}[self.model_var_type]
model_variance = _extract_into_tensor(model_variance, t, x.shape)
model_log_variance = _extract_into_tensor(model_log_variance, t, x.shape)
def process_xstart(x):
if denoised_fn is not None:
x = denoised_fn(x)
if clip_denoised:
return x.clamp(-1, 1)
return x
if self.model_mean_type == ModelMeanType.PREVIOUS_X:
pred_xstart = process_xstart(
self._predict_xstart_from_xprev(x_t=x, t=t, xprev=model_output)
)
model_mean = model_output
elif self.model_mean_type in [ModelMeanType.START_X, ModelMeanType.EPSILON]:
if self.model_mean_type == ModelMeanType.START_X:
pred_xstart = process_xstart(model_output)
else:
pred_xstart = process_xstart(
self._predict_xstart_from_eps(x_t=x, t=t, eps=model_output)
)
model_mean, _, _ = self.q_posterior_mean_variance(
x_start=pred_xstart, x_t=x, t=t
)
else:
raise NotImplementedError(self.model_mean_type)
assert (
model_mean.shape == model_log_variance.shape == pred_xstart.shape == x.shape
)
return {
"mean": model_mean,
"variance": model_variance,
"log_variance": model_log_variance,
"pred_xstart": pred_xstart,
}
def _predict_xstart_from_eps(self, x_t, t, eps):
assert x_t.shape == eps.shape
return (
_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t
- _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * eps
)
def _predict_xstart_from_xprev(self, x_t, t, xprev):
assert x_t.shape == xprev.shape
return ( # (xprev - coef2*x_t) / coef1
_extract_into_tensor(1.0 / self.posterior_mean_coef1, t, x_t.shape) * xprev
- _extract_into_tensor(
self.posterior_mean_coef2 / self.posterior_mean_coef1, t, x_t.shape
)
* x_t
)
def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
return (
_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t
- pred_xstart
) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
def _scale_timesteps(self, t):
if self.rescale_timesteps:
return t.float() * (1000.0 / self.num_timesteps)
return t
def condition_mean(self, cond_fn, p_mean_var, x, t, model_kwargs=None):
"""
Compute the mean for the previous step, given a function cond_fn that
computes the gradient of a conditional log probability with respect to
x. In particular, cond_fn computes grad(log(p(y|x))), and we want to
condition on y.
This uses the conditioning strategy from Sohl-Dickstein et al. (2015).
"""
gradient = cond_fn(x, self._scale_timesteps(t), **model_kwargs)
new_mean = (
p_mean_var["mean"].float() + p_mean_var["variance"] * gradient.float()
)
return new_mean
def condition_score(self, cond_fn, p_mean_var, x, t, model_kwargs=None):
"""
Compute what the p_mean_variance output would have been, should the
model's score function be conditioned by cond_fn.
See condition_mean() for details on cond_fn.
Unlike condition_mean(), this instead uses the conditioning strategy
from Song et al (2020).
"""
alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
eps = self._predict_eps_from_xstart(x, t, p_mean_var["pred_xstart"])
eps = eps - (1 - alpha_bar).sqrt() * cond_fn(
x, self._scale_timesteps(t), **model_kwargs
)
out = p_mean_var.copy()
out["pred_xstart"] = self._predict_xstart_from_eps(x, t, eps)
out["mean"], _, _ = self.q_posterior_mean_variance(
x_start=out["pred_xstart"], x_t=x, t=t
)
return out
def p_sample(
self,
model,
x,
t,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
):
"""
Sample x_{t-1} from the model at the given timestep.
:param model: the model to sample from.
:param x: the current tensor at x_{t-1}.
:param t: the value of t, starting at 0 for the first diffusion step.
:param clip_denoised: if True, clip the x_start prediction to [-1, 1].
:param denoised_fn: if not None, a function which applies to the
x_start prediction before it is used to sample.
:param cond_fn: if not None, this is a gradient function that acts
similarly to the model.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:return: a dict containing the following keys:
- 'sample': a random sample from the model.
- 'pred_xstart': a prediction of x_0.
"""
out = self.p_mean_variance(
model,
x,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
model_kwargs=model_kwargs,
)
noise = th.randn_like(x)
nonzero_mask = (
(t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))
) # no noise when t == 0
if cond_fn is not None:
out["mean"] = self.condition_mean(
cond_fn, out, x, t, model_kwargs=model_kwargs
)
sample = out["mean"] + nonzero_mask * th.exp(0.5 * out["log_variance"]) * noise
return {"sample": sample, "pred_xstart": out["pred_xstart"]}
def p_sample_loop(
self,
model,
shape,
noise=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
device=None,
progress=False,
):
"""
Generate samples from the model.
:param model: the model module.
:param shape: the shape of the samples, (N, C, H, W).
:param noise: if specified, the noise from the encoder to sample.
Should be of the same shape as `shape`.
:param clip_denoised: if True, clip x_start predictions to [-1, 1].
:param denoised_fn: if not None, a function which applies to the
x_start prediction before it is used to sample.
:param cond_fn: if not None, this is a gradient function that acts
similarly to the model.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:param device: if specified, the device to create the samples on.
If not specified, use a model parameter's device.
:param progress: if True, show a tqdm progress bar.
:return: a non-differentiable batch of samples.
"""
final = None
for sample in self.p_sample_loop_progressive(
model,
shape,
noise=noise,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
cond_fn=cond_fn,
model_kwargs=model_kwargs,
device=device,
progress=progress,
):
final = sample
return final["sample"]
def p_sample_loop_progressive(
self,
model,
shape,
noise=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
device=None,
progress=False,
):
"""
Generate samples from the model and yield intermediate samples from
each timestep of diffusion.
Arguments are the same as p_sample_loop().
Returns a generator over dicts, where each dict is the return value of
p_sample().
"""
if device is None:
device = next(model.parameters()).device
assert isinstance(shape, (tuple, list))
if noise is not None:
img = noise
else:
img = th.randn(*shape, device=device)
indices = list(range(self.num_timesteps))[::-1]
if progress:
# Lazy import so that we don't depend on tqdm.
from tqdm.auto import tqdm
indices = tqdm(indices)
for i in indices:
t = th.tensor([i] * shape[0], device=device)
with th.no_grad():
out = self.p_sample(
model,
img,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
cond_fn=cond_fn,
model_kwargs=model_kwargs,
)
yield out
img = out["sample"]
def ddim_sample(
self,
model,
x,
t,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
eta=0.0,
):
"""
Sample x_{t-1} from the model using DDIM.
Same usage as p_sample().
"""
out = self.p_mean_variance(
model,
x,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
model_kwargs=model_kwargs,
)
if cond_fn is not None:
out = self.condition_score(cond_fn, out, x, t, model_kwargs=model_kwargs)
# Usually our model outputs epsilon, but we re-derive it
# in case we used x_start or x_prev prediction.
eps = self._predict_eps_from_xstart(x, t, out["pred_xstart"])
alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
alpha_bar_prev = _extract_into_tensor(self.alphas_cumprod_prev, t, x.shape)
sigma = (
eta
* th.sqrt((1 - alpha_bar_prev) / (1 - alpha_bar))
* th.sqrt(1 - alpha_bar / alpha_bar_prev)
)
# Equation 12.
noise = th.randn_like(x)
mean_pred = (
out["pred_xstart"] * th.sqrt(alpha_bar_prev)
+ th.sqrt(1 - alpha_bar_prev - sigma ** 2) * eps
)
nonzero_mask = (
(t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))
) # no noise when t == 0
sample = mean_pred + nonzero_mask * sigma * noise
return {"sample": sample, "pred_xstart": out["pred_xstart"]}
def ddim_reverse_sample(
self,
model,
x,
t,
clip_denoised=True,
denoised_fn=None,
model_kwargs=None,
eta=0.0,
):
"""
Sample x_{t+1} from the model using DDIM reverse ODE.
"""
assert eta == 0.0, "Reverse ODE only for deterministic path"
out = self.p_mean_variance(
model,
x,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
model_kwargs=model_kwargs,
)
# Usually our model outputs epsilon, but we re-derive it
# in case we used x_start or x_prev prediction.
eps = (
_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x.shape) * x
- out["pred_xstart"]
) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x.shape)
alpha_bar_next = _extract_into_tensor(self.alphas_cumprod_next, t, x.shape)
# Equation 12. reversed
mean_pred = (
out["pred_xstart"] * th.sqrt(alpha_bar_next)
+ th.sqrt(1 - alpha_bar_next) * eps
)
return {"sample": mean_pred, "pred_xstart": out["pred_xstart"]}
def ddim_sample_loop(
self,
model,
shape,
noise=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
device=None,
progress=False,
eta=0.0,
):
"""
Generate samples from the model using DDIM.
Same usage as p_sample_loop().
"""
final = None
for sample in self.ddim_sample_loop_progressive(
model,
shape,
noise=noise,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
cond_fn=cond_fn,
model_kwargs=model_kwargs,
device=device,
progress=progress,
eta=eta,
):
final = sample
return final["sample"]
def ddim_sample_loop_progressive(
self,
model,
shape,
noise=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
device=None,
progress=False,
eta=0.0,
):
"""
Use DDIM to sample from the model and yield intermediate samples from
each timestep of DDIM.
Same usage as p_sample_loop_progressive().
"""
if device is None:
device = next(model.parameters()).device
assert isinstance(shape, (tuple, list))
if noise is not None:
img = noise
else:
img = th.randn(*shape, device=device)
indices = list(range(self.num_timesteps))[::-1]
if progress:
# Lazy import so that we don't depend on tqdm.
from tqdm.auto import tqdm
indices = tqdm(indices)
for i in indices:
t = th.tensor([i] * shape[0], device=device)
with th.no_grad():
out = self.ddim_sample(
model,
img,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
cond_fn=cond_fn,
model_kwargs=model_kwargs,
eta=eta,
)
yield out
img = out["sample"]
def _vb_terms_bpd(
self, model, x_start, x_t, t, clip_denoised=True, model_kwargs=None
):
"""
Get a term for the variational lower-bound.
The resulting units are bits (rather than nats, as one might expect).
This allows for comparison to other papers.
:return: a dict with the following keys:
- 'output': a shape [N] tensor of NLLs or KLs.
- 'pred_xstart': the x_0 predictions.
"""
true_mean, _, true_log_variance_clipped = self.q_posterior_mean_variance(
x_start=x_start, x_t=x_t, t=t
)
out = self.p_mean_variance(
model, x_t, t, clip_denoised=clip_denoised, model_kwargs=model_kwargs
)
kl = normal_kl(
true_mean, true_log_variance_clipped, out["mean"], out["log_variance"]
)
kl = mean_flat(kl) / np.log(2.0)
decoder_nll = -discretized_gaussian_log_likelihood(
x_start, means=out["mean"], log_scales=0.5 * out["log_variance"]
)
assert decoder_nll.shape == x_start.shape
decoder_nll = mean_flat(decoder_nll) / np.log(2.0)
# At the first timestep return the decoder NLL,
# otherwise return KL(q(x_{t-1}|x_t,x_0) || p(x_{t-1}|x_t))
output = th.where((t == 0), decoder_nll, kl)
return {"output": output, "pred_xstart": out["pred_xstart"]}
def training_losses(self, model, x_start, t, model_kwargs=None, noise=None):
"""
Compute training losses for a single timestep.
:param model: the model to evaluate loss on.
:param x_start: the [N x C x ...] tensor of inputs.
:param t: a batch of timestep indices.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:param noise: if specified, the specific Gaussian noise to try to remove.
:return: a dict with the key "loss" containing a tensor of shape [N].
Some mean or variance settings may also have other keys.
"""
if model_kwargs is None:
model_kwargs = {}
if noise is None:
noise = th.randn_like(x_start)
x_t = self.q_sample(x_start, t, noise=noise)
terms = {}
if self.loss_type == LossType.KL or self.loss_type == LossType.RESCALED_KL:
terms["loss"] = self._vb_terms_bpd(
model=model,
x_start=x_start,
x_t=x_t,
t=t,
clip_denoised=False,
model_kwargs=model_kwargs,
)["output"]
if self.loss_type == LossType.RESCALED_KL:
terms["loss"] *= self.num_timesteps
elif self.loss_type == LossType.MSE or self.loss_type == LossType.RESCALED_MSE:
model_output = model(x_t, self._scale_timesteps(t), **model_kwargs)
if self.model_var_type in [
ModelVarType.LEARNED,
ModelVarType.LEARNED_RANGE,
]:
B, C = x_t.shape[:2]
assert model_output.shape == (B, C * 2, *x_t.shape[2:])
model_output, model_var_values = th.split(model_output, C, dim=1)
# Learn the variance using the variational bound, but don't let
# it affect our mean prediction.
frozen_out = th.cat([model_output.detach(), model_var_values], dim=1)
terms["vb"] = self._vb_terms_bpd(
model=lambda *args, r=frozen_out: r,
x_start=x_start,
x_t=x_t,
t=t,
clip_denoised=False,
)["output"]
if self.loss_type == LossType.RESCALED_MSE:
# Divide by 1000 for equivalence with initial implementation.
# Without a factor of 1/1000, the VB term hurts the MSE term.
terms["vb"] *= self.num_timesteps / 1000.0
target = {
ModelMeanType.PREVIOUS_X: self.q_posterior_mean_variance(
x_start=x_start, x_t=x_t, t=t
)[0],
ModelMeanType.START_X: x_start,
ModelMeanType.EPSILON: noise,
}[self.model_mean_type]
assert model_output.shape == target.shape == x_start.shape
terms["mse"] = mean_flat((target - model_output) ** 2)
if "vb" in terms:
terms["loss"] = terms["mse"] + terms["vb"]
else:
terms["loss"] = terms["mse"]
else:
raise NotImplementedError(self.loss_type)
return terms
def _prior_bpd(self, x_start):
"""
Get the prior KL term for the variational lower-bound, measured in
bits-per-dim.
This term can't be optimized, as it only depends on the encoder.
:param x_start: the [N x C x ...] tensor of inputs.
:return: a batch of [N] KL values (in bits), one per batch element.
"""
batch_size = x_start.shape[0]
t = th.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)
qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)
kl_prior = normal_kl(
mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0
)
return mean_flat(kl_prior) / np.log(2.0)
def calc_bpd_loop(self, model, x_start, clip_denoised=True, model_kwargs=None):
"""
Compute the entire variational lower-bound, measured in bits-per-dim,
as well as other related quantities.
:param model: the model to evaluate loss on.
:param x_start: the [N x C x ...] tensor of inputs.
:param clip_denoised: if True, clip denoised samples.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:return: a dict containing the following keys:
- total_bpd: the total variational lower-bound, per batch element.
- prior_bpd: the prior term in the lower-bound.
- vb: an [N x T] tensor of terms in the lower-bound.
- xstart_mse: an [N x T] tensor of x_0 MSEs for each timestep.
- mse: an [N x T] tensor of epsilon MSEs for each timestep.
"""
device = x_start.device
batch_size = x_start.shape[0]
vb = []
xstart_mse = []
mse = []
for t in list(range(self.num_timesteps))[::-1]:
t_batch = th.tensor([t] * batch_size, device=device)
noise = th.randn_like(x_start)
x_t = self.q_sample(x_start=x_start, t=t_batch, noise=noise)
# Calculate VLB term at the current timestep
with th.no_grad():
out = self._vb_terms_bpd(
model,
x_start=x_start,
x_t=x_t,
t=t_batch,
clip_denoised=clip_denoised,
model_kwargs=model_kwargs,
)
vb.append(out["output"])
xstart_mse.append(mean_flat((out["pred_xstart"] - x_start) ** 2))
eps = self._predict_eps_from_xstart(x_t, t_batch, out["pred_xstart"])
mse.append(mean_flat((eps - noise) ** 2))
vb = th.stack(vb, dim=1)
xstart_mse = th.stack(xstart_mse, dim=1)
mse = th.stack(mse, dim=1)
prior_bpd = self._prior_bpd(x_start)
total_bpd = vb.sum(dim=1) + prior_bpd
return {
"total_bpd": total_bpd,
"prior_bpd": prior_bpd,
"vb": vb,
"xstart_mse": xstart_mse,
"mse": mse,
}
def _extract_into_tensor(arr, timesteps, broadcast_shape):
"""
Extract values from a 1-D numpy array for a batch of indices.
:param arr: the 1-D numpy array.
:param timesteps: a tensor of indices into the array to extract.
:param broadcast_shape: a larger shape of K dimensions with the batch
dimension equal to the length of timesteps.
:return: a tensor of shape [batch_size, 1, ...] where the shape has K dims.
"""
res = th.from_numpy(arr).to(device=timesteps.device)[timesteps].float()
while len(res.shape) < len(broadcast_shape):
res = res[..., None]
return res.expand(broadcast_shape)
| 34,721 | 36.864776 | 129 | py |
DiffPure | DiffPure-master/guided_diffusion/train_util.py | # ---------------------------------------------------------------
# Taken from the following link as is from:
# https://github.com/openai/guided-diffusion/blob/main/guided_diffusion/train_util.py
#
# The license for the original version of this file can be
# found in this directory (LICENSE_GUIDED_DIFFUSION).
# ---------------------------------------------------------------
import copy
import functools
import os
import blobfile as bf
import torch as th
import torch.distributed as dist
from torch.nn.parallel.distributed import DistributedDataParallel as DDP
from torch.optim import AdamW
from . import dist_util, logger
from .fp16_util import MixedPrecisionTrainer
from .nn import update_ema
from .resample import LossAwareSampler, UniformSampler
# For ImageNet experiments, this was a good default value.
# We found that the lg_loss_scale quickly climbed to
# 20-21 within the first ~1K steps of training.
INITIAL_LOG_LOSS_SCALE = 20.0
class TrainLoop:
def __init__(
self,
*,
model,
diffusion,
data,
batch_size,
microbatch,
lr,
ema_rate,
log_interval,
save_interval,
resume_checkpoint,
use_fp16=False,
fp16_scale_growth=1e-3,
schedule_sampler=None,
weight_decay=0.0,
lr_anneal_steps=0,
):
self.model = model
self.diffusion = diffusion
self.data = data
self.batch_size = batch_size
self.microbatch = microbatch if microbatch > 0 else batch_size
self.lr = lr
self.ema_rate = (
[ema_rate]
if isinstance(ema_rate, float)
else [float(x) for x in ema_rate.split(",")]
)
self.log_interval = log_interval
self.save_interval = save_interval
self.resume_checkpoint = resume_checkpoint
self.use_fp16 = use_fp16
self.fp16_scale_growth = fp16_scale_growth
self.schedule_sampler = schedule_sampler or UniformSampler(diffusion)
self.weight_decay = weight_decay
self.lr_anneal_steps = lr_anneal_steps
self.step = 0
self.resume_step = 0
self.global_batch = self.batch_size * dist.get_world_size()
self.sync_cuda = th.cuda.is_available()
self._load_and_sync_parameters()
self.mp_trainer = MixedPrecisionTrainer(
model=self.model,
use_fp16=self.use_fp16,
fp16_scale_growth=fp16_scale_growth,
)
self.opt = AdamW(
self.mp_trainer.master_params, lr=self.lr, weight_decay=self.weight_decay
)
if self.resume_step:
self._load_optimizer_state()
# Model was resumed, either due to a restart or a checkpoint
# being specified at the command line.
self.ema_params = [
self._load_ema_parameters(rate) for rate in self.ema_rate
]
else:
self.ema_params = [
copy.deepcopy(self.mp_trainer.master_params)
for _ in range(len(self.ema_rate))
]
if th.cuda.is_available():
self.use_ddp = True
self.ddp_model = DDP(
self.model,
device_ids=[dist_util.dev()],
output_device=dist_util.dev(),
broadcast_buffers=False,
bucket_cap_mb=128,
find_unused_parameters=False,
)
else:
if dist.get_world_size() > 1:
logger.warn(
"Distributed training requires CUDA. "
"Gradients will not be synchronized properly!"
)
self.use_ddp = False
self.ddp_model = self.model
def _load_and_sync_parameters(self):
resume_checkpoint = find_resume_checkpoint() or self.resume_checkpoint
if resume_checkpoint:
self.resume_step = parse_resume_step_from_filename(resume_checkpoint)
if dist.get_rank() == 0:
logger.log(f"loading model from checkpoint: {resume_checkpoint}...")
self.model.load_state_dict(
dist_util.load_state_dict(
resume_checkpoint, map_location=dist_util.dev()
)
)
dist_util.sync_params(self.model.parameters())
def _load_ema_parameters(self, rate):
ema_params = copy.deepcopy(self.mp_trainer.master_params)
main_checkpoint = find_resume_checkpoint() or self.resume_checkpoint
ema_checkpoint = find_ema_checkpoint(main_checkpoint, self.resume_step, rate)
if ema_checkpoint:
if dist.get_rank() == 0:
logger.log(f"loading EMA from checkpoint: {ema_checkpoint}...")
state_dict = dist_util.load_state_dict(
ema_checkpoint, map_location=dist_util.dev()
)
ema_params = self.mp_trainer.state_dict_to_master_params(state_dict)
dist_util.sync_params(ema_params)
return ema_params
def _load_optimizer_state(self):
main_checkpoint = find_resume_checkpoint() or self.resume_checkpoint
opt_checkpoint = bf.join(
bf.dirname(main_checkpoint), f"opt{self.resume_step:06}.pt"
)
if bf.exists(opt_checkpoint):
logger.log(f"loading optimizer state from checkpoint: {opt_checkpoint}")
state_dict = dist_util.load_state_dict(
opt_checkpoint, map_location=dist_util.dev()
)
self.opt.load_state_dict(state_dict)
def run_loop(self):
while (
not self.lr_anneal_steps
or self.step + self.resume_step < self.lr_anneal_steps
):
batch, cond = next(self.data)
self.run_step(batch, cond)
if self.step % self.log_interval == 0:
logger.dumpkvs()
if self.step % self.save_interval == 0:
self.save()
# Run for a finite amount of time in integration tests.
if os.environ.get("DIFFUSION_TRAINING_TEST", "") and self.step > 0:
return
self.step += 1
# Save the last checkpoint if it wasn't already saved.
if (self.step - 1) % self.save_interval != 0:
self.save()
def run_step(self, batch, cond):
self.forward_backward(batch, cond)
took_step = self.mp_trainer.optimize(self.opt)
if took_step:
self._update_ema()
self._anneal_lr()
self.log_step()
def forward_backward(self, batch, cond):
self.mp_trainer.zero_grad()
for i in range(0, batch.shape[0], self.microbatch):
micro = batch[i : i + self.microbatch].to(dist_util.dev())
micro_cond = {
k: v[i : i + self.microbatch].to(dist_util.dev())
for k, v in cond.items()
}
last_batch = (i + self.microbatch) >= batch.shape[0]
t, weights = self.schedule_sampler.sample(micro.shape[0], dist_util.dev())
compute_losses = functools.partial(
self.diffusion.training_losses,
self.ddp_model,
micro,
t,
model_kwargs=micro_cond,
)
if last_batch or not self.use_ddp:
losses = compute_losses()
else:
with self.ddp_model.no_sync():
losses = compute_losses()
if isinstance(self.schedule_sampler, LossAwareSampler):
self.schedule_sampler.update_with_local_losses(
t, losses["loss"].detach()
)
loss = (losses["loss"] * weights).mean()
log_loss_dict(
self.diffusion, t, {k: v * weights for k, v in losses.items()}
)
self.mp_trainer.backward(loss)
def _update_ema(self):
for rate, params in zip(self.ema_rate, self.ema_params):
update_ema(params, self.mp_trainer.master_params, rate=rate)
def _anneal_lr(self):
if not self.lr_anneal_steps:
return
frac_done = (self.step + self.resume_step) / self.lr_anneal_steps
lr = self.lr * (1 - frac_done)
for param_group in self.opt.param_groups:
param_group["lr"] = lr
def log_step(self):
logger.logkv("step", self.step + self.resume_step)
logger.logkv("samples", (self.step + self.resume_step + 1) * self.global_batch)
def save(self):
def save_checkpoint(rate, params):
state_dict = self.mp_trainer.master_params_to_state_dict(params)
if dist.get_rank() == 0:
logger.log(f"saving model {rate}...")
if not rate:
filename = f"model{(self.step+self.resume_step):06d}.pt"
else:
filename = f"ema_{rate}_{(self.step+self.resume_step):06d}.pt"
with bf.BlobFile(bf.join(get_blob_logdir(), filename), "wb") as f:
th.save(state_dict, f)
save_checkpoint(0, self.mp_trainer.master_params)
for rate, params in zip(self.ema_rate, self.ema_params):
save_checkpoint(rate, params)
if dist.get_rank() == 0:
with bf.BlobFile(
bf.join(get_blob_logdir(), f"opt{(self.step+self.resume_step):06d}.pt"),
"wb",
) as f:
th.save(self.opt.state_dict(), f)
dist.barrier()
def parse_resume_step_from_filename(filename):
"""
Parse filenames of the form path/to/modelNNNNNN.pt, where NNNNNN is the
checkpoint's number of steps.
"""
split = filename.split("model")
if len(split) < 2:
return 0
split1 = split[-1].split(".")[0]
try:
return int(split1)
except ValueError:
return 0
def get_blob_logdir():
# You can change this to be a separate path to save checkpoints to
# a blobstore or some external drive.
return logger.get_dir()
def find_resume_checkpoint():
# On your infrastructure, you may want to override this to automatically
# discover the latest checkpoint on your blob storage, etc.
return None
def find_ema_checkpoint(main_checkpoint, step, rate):
if main_checkpoint is None:
return None
filename = f"ema_{rate}_{(step):06d}.pt"
path = bf.join(bf.dirname(main_checkpoint), filename)
if bf.exists(path):
return path
return None
def log_loss_dict(diffusion, ts, losses):
for key, values in losses.items():
logger.logkv_mean(key, values.mean().item())
# Log the quantiles (four quartiles, in particular).
for sub_t, sub_loss in zip(ts.cpu().numpy(), values.detach().cpu().numpy()):
quartile = int(4 * sub_t / diffusion.num_timesteps)
logger.logkv_mean(f"{key}_q{quartile}", sub_loss)
| 10,982 | 34.429032 | 88 | py |
DiffPure | DiffPure-master/guided_diffusion/respace.py | # ---------------------------------------------------------------
# Taken from the following link as is from:
# https://github.com/openai/guided-diffusion/blob/main/guided_diffusion/respace.py
#
# The license for the original version of this file can be
# found in this directory (LICENSE_GUIDED_DIFFUSION).
# ---------------------------------------------------------------
import numpy as np
import torch as th
from .gaussian_diffusion import GaussianDiffusion
def space_timesteps(num_timesteps, section_counts):
"""
Create a list of timesteps to use from an original diffusion process,
given the number of timesteps we want to take from equally-sized portions
of the original process.
For example, if there's 300 timesteps and the section counts are [10,15,20]
then the first 100 timesteps are strided to be 10 timesteps, the second 100
are strided to be 15 timesteps, and the final 100 are strided to be 20.
If the stride is a string starting with "ddim", then the fixed striding
from the DDIM paper is used, and only one section is allowed.
:param num_timesteps: the number of diffusion steps in the original
process to divide up.
:param section_counts: either a list of numbers, or a string containing
comma-separated numbers, indicating the step count
per section. As a special case, use "ddimN" where N
is a number of steps to use the striding from the
DDIM paper.
:return: a set of diffusion steps from the original process to use.
"""
if isinstance(section_counts, str):
if section_counts.startswith("ddim"):
desired_count = int(section_counts[len("ddim") :])
for i in range(1, num_timesteps):
if len(range(0, num_timesteps, i)) == desired_count:
return set(range(0, num_timesteps, i))
raise ValueError(
f"cannot create exactly {num_timesteps} steps with an integer stride"
)
section_counts = [int(x) for x in section_counts.split(",")]
size_per = num_timesteps // len(section_counts)
extra = num_timesteps % len(section_counts)
start_idx = 0
all_steps = []
for i, section_count in enumerate(section_counts):
size = size_per + (1 if i < extra else 0)
if size < section_count:
raise ValueError(
f"cannot divide section of {size} steps into {section_count}"
)
if section_count <= 1:
frac_stride = 1
else:
frac_stride = (size - 1) / (section_count - 1)
cur_idx = 0.0
taken_steps = []
for _ in range(section_count):
taken_steps.append(start_idx + round(cur_idx))
cur_idx += frac_stride
all_steps += taken_steps
start_idx += size
return set(all_steps)
class SpacedDiffusion(GaussianDiffusion):
"""
A diffusion process which can skip steps in a base diffusion process.
:param use_timesteps: a collection (sequence or set) of timesteps from the
original diffusion process to retain.
:param kwargs: the kwargs to create the base diffusion process.
"""
def __init__(self, use_timesteps, **kwargs):
self.use_timesteps = set(use_timesteps)
self.timestep_map = []
self.original_num_steps = len(kwargs["betas"])
base_diffusion = GaussianDiffusion(**kwargs) # pylint: disable=missing-kwoa
last_alpha_cumprod = 1.0
new_betas = []
for i, alpha_cumprod in enumerate(base_diffusion.alphas_cumprod):
if i in self.use_timesteps:
new_betas.append(1 - alpha_cumprod / last_alpha_cumprod)
last_alpha_cumprod = alpha_cumprod
self.timestep_map.append(i)
kwargs["betas"] = np.array(new_betas)
super().__init__(**kwargs)
def p_mean_variance(
self, model, *args, **kwargs
): # pylint: disable=signature-differs
return super().p_mean_variance(self._wrap_model(model), *args, **kwargs)
def training_losses(
self, model, *args, **kwargs
): # pylint: disable=signature-differs
return super().training_losses(self._wrap_model(model), *args, **kwargs)
def condition_mean(self, cond_fn, *args, **kwargs):
return super().condition_mean(self._wrap_model(cond_fn), *args, **kwargs)
def condition_score(self, cond_fn, *args, **kwargs):
return super().condition_score(self._wrap_model(cond_fn), *args, **kwargs)
def _wrap_model(self, model):
if isinstance(model, _WrappedModel):
return model
return _WrappedModel(
model, self.timestep_map, self.rescale_timesteps, self.original_num_steps
)
def _scale_timesteps(self, t):
# Scaling is done by the wrapped model.
return t
class _WrappedModel:
def __init__(self, model, timestep_map, rescale_timesteps, original_num_steps):
self.model = model
self.timestep_map = timestep_map
self.rescale_timesteps = rescale_timesteps
self.original_num_steps = original_num_steps
def __call__(self, x, ts, **kwargs):
map_tensor = th.tensor(self.timestep_map, device=ts.device, dtype=ts.dtype)
new_ts = map_tensor[ts]
if self.rescale_timesteps:
new_ts = new_ts.float() * (1000.0 / self.original_num_steps)
return self.model(x, new_ts, **kwargs)
| 5,568 | 39.649635 | 85 | py |
DiffPure | DiffPure-master/guided_diffusion/dist_util.py | # ---------------------------------------------------------------
# Taken from the following link as is from:
# https://github.com/openai/guided-diffusion/blob/main/guided_diffusion/dist_util.py
#
# The license for the original version of this file can be
# found in this directory (LICENSE_GUIDED_DIFFUSION).
# ---------------------------------------------------------------
"""
Helpers for distributed training.
"""
import io
import os
import socket
import blobfile as bf
from mpi4py import MPI
import torch as th
import torch.distributed as dist
# Change this to reflect your cluster layout.
# The GPU for a given rank is (rank % GPUS_PER_NODE).
GPUS_PER_NODE = 8
SETUP_RETRY_COUNT = 3
def setup_dist():
"""
Setup a distributed process group.
"""
if dist.is_initialized():
return
os.environ["CUDA_VISIBLE_DEVICES"] = f"{MPI.COMM_WORLD.Get_rank() % GPUS_PER_NODE}"
comm = MPI.COMM_WORLD
backend = "gloo" if not th.cuda.is_available() else "nccl"
if backend == "gloo":
hostname = "localhost"
else:
hostname = socket.gethostbyname(socket.getfqdn())
os.environ["MASTER_ADDR"] = comm.bcast(hostname, root=0)
os.environ["RANK"] = str(comm.rank)
os.environ["WORLD_SIZE"] = str(comm.size)
port = comm.bcast(_find_free_port(), root=0)
os.environ["MASTER_PORT"] = str(port)
dist.init_process_group(backend=backend, init_method="env://")
def dev():
"""
Get the device to use for torch.distributed.
"""
if th.cuda.is_available():
return th.device(f"cuda")
return th.device("cpu")
def load_state_dict(path, **kwargs):
"""
Load a PyTorch file without redundant fetches across MPI ranks.
"""
chunk_size = 2 ** 30 # MPI has a relatively small size limit
if MPI.COMM_WORLD.Get_rank() == 0:
with bf.BlobFile(path, "rb") as f:
data = f.read()
num_chunks = len(data) // chunk_size
if len(data) % chunk_size:
num_chunks += 1
MPI.COMM_WORLD.bcast(num_chunks)
for i in range(0, len(data), chunk_size):
MPI.COMM_WORLD.bcast(data[i : i + chunk_size])
else:
num_chunks = MPI.COMM_WORLD.bcast(None)
data = bytes()
for _ in range(num_chunks):
data += MPI.COMM_WORLD.bcast(None)
return th.load(io.BytesIO(data), **kwargs)
def sync_params(params):
"""
Synchronize a sequence of Tensors across ranks from rank 0.
"""
for p in params:
with th.no_grad():
dist.broadcast(p, 0)
def _find_free_port():
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("", 0))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return s.getsockname()[1]
finally:
s.close()
| 2,801 | 26.470588 | 87 | py |
DiffPure | DiffPure-master/classifiers/cifar10_resnet.py | # ---------------------------------------------------------------
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the NVIDIA Source Code License
# for DiffPure. To view a copy of this license, see the LICENSE file.
# ---------------------------------------------------------------
import math
import torch
import torch.nn.functional as F
import torch.nn as nn
# ---------------------------- ResNet ----------------------------
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion * planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion * planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 64
num_input_channels = 3
mean = (0.4914, 0.4822, 0.4465)
std = (0.2471, 0.2435, 0.2616)
self.mean = torch.tensor(mean).view(num_input_channels, 1, 1)
self.std = torch.tensor(std).view(num_input_channels, 1, 1)
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512 * block.expansion, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = (x - self.mean.to(x.device)) / self.std.to(x.device)
out = F.relu(self.bn1(self.conv1(out)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def ResNet50():
return ResNet(Bottleneck, [3, 4, 6, 3])
# ---------------------------- ResNet ----------------------------
# ---------------------------- WideResNet ----------------------------
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, dropRate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.droprate = dropRate
self.equalInOut = (in_planes == out_planes)
self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0, bias=False) or None
def forward(self, x):
if not self.equalInOut:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
out = self.conv2(out)
return torch.add(x if self.equalInOut else self.convShortcut(x), out)
class NetworkBlock(nn.Module):
def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate):
layers = []
for i in range(int(nb_layers)):
layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, dropRate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class WideResNet(nn.Module):
""" Based on code from https://github.com/yaodongyu/TRADES """
def __init__(self, depth=28, num_classes=10, widen_factor=10, sub_block1=False, dropRate=0.0, bias_last=True):
super(WideResNet, self).__init__()
num_input_channels = 3
mean = (0.4914, 0.4822, 0.4465)
std = (0.2471, 0.2435, 0.2616)
self.mean = torch.tensor(mean).view(num_input_channels, 1, 1)
self.std = torch.tensor(std).view(num_input_channels, 1, 1)
nChannels = [16, 16 * widen_factor, 32 * widen_factor, 64 * widen_factor]
assert ((depth - 4) % 6 == 0)
n = (depth - 4) / 6
block = BasicBlock
# 1st conv before any network block
self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1,
padding=1, bias=False)
# 1st block
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)
if sub_block1:
# 1st sub-block
self.sub_block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)
# 2nd block
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate)
# 3rd block
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(nChannels[3])
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(nChannels[3], num_classes, bias=bias_last)
self.nChannels = nChannels[3]
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear) and not m.bias is None:
m.bias.data.zero_()
def forward(self, x):
out = (x - self.mean.to(x.device)) / self.std.to(x.device)
out = self.conv1(out)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.nChannels)
return self.fc(out)
def WideResNet_70_16():
return WideResNet(depth=70, widen_factor=16, dropRate=0.0)
def WideResNet_70_16_dropout():
return WideResNet(depth=70, widen_factor=16, dropRate=0.3)
# ---------------------------- WideResNet ----------------------------
| 7,977 | 38.89 | 116 | py |
DiffPure | DiffPure-master/classifiers/attribute_classifier.py | # ---------------------------------------------------------------
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the NVIDIA Source Code License
# for DiffPure. To view a copy of this license, see the LICENSE file.
# ---------------------------------------------------------------
import torch
import os
from . import attribute_net
softmax = torch.nn.Softmax(dim=1)
def downsample(images, size=256):
# Downsample to 256x256. The attribute classifiers were built for 256x256.
# follows https://github.com/NVlabs/stylegan/blob/master/metrics/linear_separability.py#L127
if images.shape[2] > size:
factor = images.shape[2] // size
assert (factor * size == images.shape[2])
images = images.view(
[-1, images.shape[1], images.shape[2] // factor, factor, images.shape[3] // factor, factor])
images = images.mean(dim=[3, 5])
return images
else:
assert (images.shape[-1] == 256)
return images
def get_logit(net, im):
im_256 = downsample(im)
logit = net(im_256)
return logit
def get_softmaxed(net, im):
logit = get_logit(net, im)
logits = torch.cat([logit, -logit], dim=1)
softmaxed = softmax(torch.cat([logit, -logit], dim=1))[:, 1]
return logits, softmaxed
def load_attribute_classifier(attribute, ckpt_path=None):
if ckpt_path is None:
base_path = 'pretrained/celebahq'
attribute_pkl = os.path.join(base_path, attribute, 'net_best.pth')
ckpt = torch.load(attribute_pkl)
else:
ckpt = torch.load(ckpt_path)
print("Using classifier at epoch: %d" % ckpt['epoch'])
if 'valacc' in ckpt.keys():
print("Validation acc on raw images: %0.5f" % ckpt['valacc'])
detector = attribute_net.from_state_dict(
ckpt['state_dict'], fixed_size=True, use_mbstd=False).cuda().eval()
return detector
class ClassifierWrapper(torch.nn.Module):
def __init__(self, classifier_name, ckpt_path=None, device='cuda'):
super(ClassifierWrapper, self).__init__()
self.net = load_attribute_classifier(classifier_name, ckpt_path).eval().to(device)
def forward(self, ims):
out = (ims - 0.5) / 0.5
return get_softmaxed(self.net, out)[0]
| 2,276 | 33.5 | 104 | py |
DiffPure | DiffPure-master/classifiers/attribute_net.py | # ---------------------------------------------------------------
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the NVIDIA Source Code License
# for DiffPure. To view a copy of this license, see the LICENSE file.
# ---------------------------------------------------------------
import torch
import torch.nn as nn
import numpy as np
def lerp_clip(a, b, t):
return a + (b - a) * torch.clamp(t, 0.0, 1.0)
class WScaleLayer(nn.Module):
def __init__(self, size, fan_in, gain=np.sqrt(2), bias=True):
super(WScaleLayer, self).__init__()
self.scale = gain / np.sqrt(fan_in) # No longer a parameter
if bias:
self.b = nn.Parameter(torch.randn(size))
else:
self.b = 0
self.size = size
def forward(self, x):
x_size = x.size()
x = x * self.scale
# modified to remove warning
if type(self.b) == nn.Parameter and len(x_size) == 4:
x = x + self.b.view(1, -1, 1, 1).expand(
x_size[0], self.size, x_size[2], x_size[3])
if type(self.b) == nn.Parameter and len(x_size) == 2:
x = x + self.b.view(1, -1).expand(
x_size[0], self.size)
return x
class WScaleConv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, padding=0,
bias=True, gain=np.sqrt(2)):
super().__init__()
self.conv = nn.Conv2d(in_channels, out_channels,
kernel_size=kernel_size,
padding=padding,
bias=False)
fan_in = in_channels * kernel_size * kernel_size
self.wscale = WScaleLayer(out_channels, fan_in, gain=gain, bias=bias)
def forward(self, x):
return self.wscale(self.conv(x))
class WScaleLinear(nn.Module):
def __init__(self, in_channels, out_channels, bias=True, gain=np.sqrt(2)):
super().__init__()
self.linear = nn.Linear(in_channels, out_channels, bias=False)
self.wscale = WScaleLayer(out_channels, in_channels, gain=gain,
bias=bias)
def forward(self, x):
return self.wscale(self.linear(x))
class FromRGB(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size,
act=nn.LeakyReLU(0.2), bias=True):
super().__init__()
self.conv = WScaleConv2d(in_channels, out_channels, kernel_size,
padding=0, bias=bias)
self.act = act
def forward(self, x):
return self.act(self.conv(x))
class Downscale2d(nn.Module):
def __init__(self, factor=2):
super().__init__()
self.downsample = nn.AvgPool2d(kernel_size=factor, stride=factor)
def forward(self, x):
return self.downsample(x)
class DownscaleConvBlock(nn.Module):
def __init__(self, in_channels, conv0_channels, conv1_channels,
kernel_size, padding, bias=True, act=nn.LeakyReLU(0.2)):
super().__init__()
self.downscale = Downscale2d()
self.conv0 = WScaleConv2d(in_channels, conv0_channels,
kernel_size=kernel_size,
padding=padding,
bias=bias)
self.conv1 = WScaleConv2d(conv0_channels, conv1_channels,
kernel_size=kernel_size,
padding=padding,
bias=bias)
self.act = act
def forward(self, x):
x = self.act(self.conv0(x))
# conv2d_downscale2d applies downscaling before activation
# the order matters here! has to be conv -> bias -> downscale -> act
x = self.conv1(x)
x = self.downscale(x)
x = self.act(x)
return x
class MinibatchStdLayer(nn.Module):
def __init__(self, group_size=4):
super().__init__()
self.group_size = group_size
def forward(self, x):
group_size = min(self.group_size, x.shape[0])
s = x.shape
y = x.view([group_size, -1, s[1], s[2], s[3]])
y = y.float()
y = y - torch.mean(y, dim=0, keepdim=True)
y = torch.mean(y * y, dim=0)
y = torch.sqrt(y + 1e-8)
y = torch.mean(torch.mean(torch.mean(y, dim=3, keepdim=True),
dim=2, keepdim=True), dim=1, keepdim=True)
y = y.type(x.type())
y = y.repeat(group_size, 1, s[2], s[3])
return torch.cat([x, y], dim=1)
class PredictionBlock(nn.Module):
def __init__(self, in_channels, dense0_feat, dense1_feat, out_feat,
pool_size=2, act=nn.LeakyReLU(0.2), use_mbstd=True):
super().__init__()
self.use_mbstd = use_mbstd # attribute classifiers don't have this
if self.use_mbstd:
self.mbstd_layer = MinibatchStdLayer()
# MinibatchStdLayer adds an additional feature dimension
self.conv = WScaleConv2d(in_channels + int(self.use_mbstd),
dense0_feat, kernel_size=3, padding=1)
self.dense0 = WScaleLinear(dense0_feat * pool_size * pool_size, dense1_feat)
self.dense1 = WScaleLinear(dense1_feat, out_feat, gain=1)
self.act = act
def forward(self, x):
if self.use_mbstd:
x = self.mbstd_layer(x)
x = self.act(self.conv(x))
x = x.view([x.shape[0], -1])
x = self.act(self.dense0(x))
x = self.dense1(x)
return x
class D(nn.Module):
def __init__(
self,
num_channels=3, # Number of input color channels. Overridden based on dataset.
resolution=128, # Input resolution. Overridden based on dataset.
fmap_base=8192, # Overall multiplier for the number of feature maps.
fmap_decay=1.0, # log2 feature map reduction when doubling the resolution.
fmap_max=512, # Maximum number of feature maps in any layer.
fixed_size=False, # True = load fromrgb_lod0 weights only
use_mbstd=True, # False = no mbstd layer in PredictionBlock
**kwargs): # Ignore unrecognized keyword args.
super().__init__()
self.resolution_log2 = resolution_log2 = int(np.log2(resolution))
assert resolution == 2 ** resolution_log2 and resolution >= 4
def nf(stage):
return min(int(fmap_base / (2.0 ** (stage * fmap_decay))), fmap_max)
self.register_buffer('lod_in', torch.from_numpy(np.array(0.0)))
res = resolution_log2
setattr(self, 'fromrgb_lod0', FromRGB(num_channels, nf(res - 1), 1))
for i, res in enumerate(range(resolution_log2, 2, -1), 1):
lod = resolution_log2 - res
block = DownscaleConvBlock(nf(res - 1), nf(res - 1), nf(res - 2),
kernel_size=3, padding=1)
setattr(self, '%dx%d' % (2 ** res, 2 ** res), block)
fromrgb = FromRGB(3, nf(res - 2), 1)
if not fixed_size:
setattr(self, 'fromrgb_lod%d' % i, fromrgb)
res = 2
pool_size = 2 ** res
block = PredictionBlock(nf(res + 1 - 2), nf(res - 1), nf(res - 2), 1,
pool_size, use_mbstd=use_mbstd)
setattr(self, '%dx%d' % (pool_size, pool_size), block)
self.downscale = Downscale2d()
self.fixed_size = fixed_size
def forward(self, img):
x = self.fromrgb_lod0(img)
for i, res in enumerate(range(self.resolution_log2, 2, -1), 1):
lod = self.resolution_log2 - res
x = getattr(self, '%dx%d' % (2 ** res, 2 ** res))(x)
if not self.fixed_size:
img = self.downscale(img)
y = getattr(self, 'fromrgb_lod%d' % i)(img)
x = lerp_clip(x, y, self.lod_in - lod)
res = 2
pool_size = 2 ** res
out = getattr(self, '%dx%d' % (pool_size, pool_size))(x)
return out
def max_res_from_state_dict(state_dict):
for i in range(3, 12):
if '%dx%d.conv0.conv.weight' % (2 ** i, 2 ** i) not in state_dict:
break
return 2 ** (i - 1)
def from_state_dict(state_dict, fixed_size=False, use_mbstd=True):
res = max_res_from_state_dict(state_dict)
print(f'res: {res}')
d = D(num_channels=3, resolution=res, fixed_size=fixed_size,
use_mbstd=use_mbstd)
d.load_state_dict(state_dict)
return d
| 8,507 | 36.315789 | 91 | py |
DiffPure | DiffPure-master/data/datasets.py | # ---------------------------------------------------------------
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the NVIDIA Source Code License
# for DiffPure. To view a copy of this license, see the LICENSE file.
# ---------------------------------------------------------------
import os, sys
import io
import lmdb
import pandas as pd
import numpy as np
from PIL import Image
import torch
import torchvision
from torch.utils.data import Dataset, Subset
import torchvision.transforms as transforms
from torchvision.datasets.vision import VisionDataset
from torchvision.datasets import folder, ImageFolder
# ---------------------------------------------------------------------------------------------------
def remove_prefix(s, prefix):
if s.startswith(prefix):
s = s[len(prefix):]
return s
class ImageDataset(VisionDataset):
"""
modified from: https://pytorch.org/docs/stable/_modules/torchvision/datasets/folder.html#ImageFolder
uses cached directory listing if available rather than walking directory
Attributes:
classes (list): List of the class names.
class_to_idx (dict): Dict with items (class_name, class_index).
samples (list): List of (sample path, class_index) tuples
targets (list): The class_index value for each image in the dataset
"""
def __init__(self, root, loader=folder.default_loader,
extensions=folder.IMG_EXTENSIONS, transform=None,
target_transform=None, is_valid_file=None, return_path=False):
super(ImageDataset, self).__init__(root, transform=transform,
target_transform=target_transform)
classes, class_to_idx = self._find_classes(self.root)
cache = self.root.rstrip('/') + '.txt'
if os.path.isfile(cache):
print("Using directory list at: %s" % cache)
with open(cache) as f:
samples = []
for line in f:
(path, idx) = line.strip().split(';')
samples.append((os.path.join(self.root, path), int(idx)))
else:
print("Walking directory: %s" % self.root)
samples = folder.make_dataset(self.root, class_to_idx, extensions, is_valid_file)
with open(cache, 'w') as f:
for line in samples:
path, label = line
f.write('%s;%d\n' % (remove_prefix(path, self.root).lstrip('/'), label))
if len(samples) == 0:
raise (RuntimeError(
"Found 0 files in subfolders of: " + self.root + "\nSupported extensions are: " + ",".join(extensions)))
self.loader = loader
self.classes = classes
self.class_to_idx = class_to_idx
self.samples = samples
self.return_path = return_path
def _find_classes(self, dir):
"""
Finds the class folders in a dataset.
Ensures:
No class is a subdirectory of another.
"""
if sys.version_info >= (3, 5):
# Faster and available in Python 3.5 and above
classes = [d.name for d in os.scandir(dir) if d.is_dir()]
else:
classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))]
classes.sort()
class_to_idx = {classes[i]: i for i in range(len(classes))}
return classes, class_to_idx
def __getitem__(self, index):
path, target = self.samples[index]
sample = self.loader(path)
if self.transform is not None:
sample = self.transform(sample)
if self.target_transform is not None:
target = self.target_transform(target)
if self.return_path:
return sample, target, path
return sample, target
def __len__(self):
return len(self.samples)
# ---------------------------------------------------------------------------------------------------
# get the attributes from celebahq subset
def make_table(root):
filenames = sorted(os.listdir(f'{root}/images'))
# filter out non-png files, rename it to jpg to match entries in list_attr_celeba.txt
celebahq = [os.path.basename(f).replace('png', 'jpg')
if f.endswith('png') else os.path.basename(f) for f in filenames]
attr_gt = pd.read_csv(f'{root}/list_attr_celeba.txt',
skiprows=1, delim_whitespace=True, index_col=0)
attr_celebahq = attr_gt.reindex(index=celebahq).replace(-1, 0)
# get the train/test/val partitions
partitions = {}
with open(f'{root}/list_eval_partition.txt') as f:
for line in f:
filename, part = line.strip().split(' ')
partitions[filename] = int(part)
partitions_list = [partitions[fname] for fname in attr_celebahq.index]
attr_celebahq['partition'] = partitions_list
return attr_celebahq
###### dataset functions ######
class CelebAHQDataset(Dataset):
def __init__(self, partition, attribute, root=None, fraction=None, data_seed=1,
chunk_length=None, chunk_idx=-1, **kwargs):
if root is None:
root = './dataset/celebahq'
self.fraction = fraction
self.dset = ImageDataset(root, **kwargs)
# make table
attr_celebahq = make_table(root)
# convert from train/val/test to partition numbers
part_to_int = dict(train=0, val=1, test=2)
def get_partition_indices(part):
return np.where(attr_celebahq['partition'] == part_to_int[part])[0]
partition_idx = get_partition_indices(partition)
# if we want to further subsample the dataset, just subsample
# partition_idx and Subset() once
if fraction is not None:
print("Using a fraction of the original dataset")
print("The original dataset has length %d" % len(partition_idx))
new_length = int(fraction / 100 * len(partition_idx))
rng = np.random.RandomState(data_seed)
new_indices = rng.choice(partition_idx, new_length, replace=False)
partition_idx = new_indices
print("The subsetted dataset has length %d" % len(partition_idx))
elif chunk_length is not None and chunk_idx > 0:
print(f"Using a fraction of the original dataset with chunk_length: {chunk_length}, chunk_idx: {chunk_idx}")
print("The original dataset has length %d" % len(partition_idx))
new_indices = partition_idx[chunk_length * chunk_idx: chunk_length * (chunk_idx + 1)]
partition_idx = new_indices
print("The subsetted dataset has length %d" % len(partition_idx))
self.dset = Subset(self.dset, partition_idx)
attr_subset = attr_celebahq.iloc[partition_idx]
self.attr_subset = attr_subset[attribute]
print('attribute freq: %0.4f (%d / %d)' % (self.attr_subset.mean(),
self.attr_subset.sum(),
len(self.attr_subset)))
def __len__(self):
return len(self.dset)
def __getitem__(self, idx):
data = self.dset[idx]
# first element is the class, replace it
label = self.attr_subset[idx]
return (data[0], label, *data[2:])
###### transformation functions ######
def get_transform(dataset, transform_type, base_size=256):
if dataset.lower() == "celebahq":
assert base_size == 256, base_size
if transform_type == 'imtrain':
return transforms.Compose([
transforms.Resize(base_size),
transforms.RandomHorizontalFlip(p=0.5),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
elif transform_type == 'imval':
return transforms.Compose([
transforms.Resize(base_size),
# no horizontal flip for standard validation
transforms.ToTensor(),
# transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
elif transform_type == 'imcolor':
return transforms.Compose([
transforms.Resize(base_size),
transforms.RandomHorizontalFlip(p=0.5),
transforms.ColorJitter(brightness=.05, contrast=.05,
saturation=.05, hue=.05),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
elif transform_type == 'imcrop':
return transforms.Compose([
# 1024 + 32, or 256 + 8
transforms.Resize(int(1.03125 * base_size)),
transforms.RandomCrop(base_size),
transforms.RandomHorizontalFlip(p=0.5),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
elif transform_type == 'tensorbase':
# dummy transform for compatibility with other datasets
return transforms.Lambda(lambda x: x)
else:
raise NotImplementedError
elif "imagenet" in dataset.lower():
assert base_size == 224, base_size
if transform_type == 'imtrain':
return transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(base_size),
transforms.RandomHorizontalFlip(p=0.5),
transforms.ToTensor(),
# transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
])
elif transform_type == 'imval':
return transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(base_size),
# no horizontal flip for standard validation
transforms.ToTensor(),
# transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
])
else:
raise NotImplementedError
else:
raise NotImplementedError
################################################################################
# ImageNet - LMDB
###############################################################################
def lmdb_loader(path, lmdb_data):
# In-memory binary streams
with lmdb_data.begin(write=False, buffers=True) as txn:
bytedata = txn.get(path.encode('ascii'))
img = Image.open(io.BytesIO(bytedata))
return img.convert('RGB')
def imagenet_lmdb_dataset(
root, transform=None, target_transform=None,
loader=lmdb_loader):
"""
You can create this dataloader using:
train_data = imagenet_lmdb_dataset(traindir, transform=train_transform)
valid_data = imagenet_lmdb_dataset(validdir, transform=val_transform)
"""
if root.endswith('/'):
root = root[:-1]
pt_path = os.path.join(
root + '_faster_imagefolder.lmdb.pt')
lmdb_path = os.path.join(
root + '_faster_imagefolder.lmdb')
if os.path.isfile(pt_path) and os.path.isdir(lmdb_path):
print('Loading pt {} and lmdb {}'.format(pt_path, lmdb_path))
data_set = torch.load(pt_path)
else:
data_set = ImageFolder(
root, None, None, None)
torch.save(data_set, pt_path, pickle_protocol=4)
print('Saving pt to {}'.format(pt_path))
print('Building lmdb to {}'.format(lmdb_path))
env = lmdb.open(lmdb_path, map_size=1e12)
with env.begin(write=True) as txn:
for path, class_index in data_set.imgs:
with open(path, 'rb') as f:
data = f.read()
txn.put(path.encode('ascii'), data)
data_set.lmdb_data = lmdb.open(
lmdb_path, readonly=True, max_readers=1, lock=False, readahead=False,
meminit=False)
# reset transform and target_transform
data_set.samples = data_set.imgs
data_set.transform = transform
data_set.target_transform = target_transform
data_set.loader = lambda path: loader(path, data_set.lmdb_data)
return data_set
def imagenet_lmdb_dataset_sub(
root, transform=None, target_transform=None,
loader=lmdb_loader, num_sub=-1, data_seed=0):
data_set = imagenet_lmdb_dataset(
root, transform=transform, target_transform=target_transform,
loader=loader)
if num_sub > 0:
partition_idx = np.random.RandomState(data_seed).choice(len(data_set), num_sub, replace=False)
data_set = Subset(data_set, partition_idx)
return data_set
################################################################################
# CIFAR-10
###############################################################################
def cifar10_dataset_sub(root, transform=None, num_sub=-1, data_seed=0):
val_data = torchvision.datasets.CIFAR10(root=root, transform=transform, download=True, train=False)
if num_sub > 0:
partition_idx = np.random.RandomState(data_seed).choice(len(val_data), num_sub, replace=False)
val_data = Subset(val_data, partition_idx)
return val_data
| 13,203 | 38.181009 | 120 | py |
Tencent_wsdm_cup2023 | Tencent_wsdm_cup2023-main/pytorch_unbias/pretrain/dataset.py | # -*- coding: utf-8 -*-
import sys,os
import random
import collections
from models.utils import SPECIAL_TOKENS
import logging
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from dataclasses import dataclass
from typing import List, Dict, Any
logger = logging.getLogger(__name__)
MaskedLmInstance = collections.namedtuple("MaskedLmInstance", ["index", "label"])
class MaskingOp():
def __init__(self, masked_lm_prob, max_predictions_per_seq, vocab_list):
self.masked_lm_prob = masked_lm_prob
self.max_predictions_per_seq = max_predictions_per_seq
self.vocab_list = vocab_list
def create_masked_lm_predictions(self, tokens):
"""Creates the predictions for the masked LM objective. This is mostly copied from the Google BERT repo, but
with several refactors to clean it up and remove a lot of unnecessary variables."""
cand_indices = []
START_FROM_DOC = False
for (i, token) in enumerate(tokens): # token_ids
if token == SPECIAL_TOKENS['SEP']: # SEP
START_FROM_DOC = True
continue
if token == SPECIAL_TOKENS['CLS']: # CLS
# if token in SPECIAL_TOKENS.values():
continue
if not START_FROM_DOC:
continue
cand_indices.append([i])
num_to_mask = min(self.max_predictions_per_seq, max(1, int(round(len(cand_indices) * self.masked_lm_prob))))
random.shuffle(cand_indices)
masked_lms = []
covered_indexes = set()
for index_set in cand_indices:
if len(masked_lms) >= num_to_mask:
break
# If adding a whole-word mask would exceed the maximum number of
# predictions, then just skip this candidate.
if len(masked_lms) + len(index_set) > num_to_mask:
continue
is_any_index_covered = False
for index in index_set:
if index in covered_indexes:
is_any_index_covered = True
break
if is_any_index_covered:
continue
for index in index_set:
covered_indexes.add(index)
# 80% of the time, replace with [MASK]
if random.random() < 0.8:
masked_token = SPECIAL_TOKENS['MASK'] #103
else:
# 10% of the time, keep original
if random.random() < 0.5:
masked_token = tokens[index]
# 10% of the time, replace with random word
else:
masked_token = random.choice(self.vocab_list)
masked_lms.append(MaskedLmInstance(index=index, label=tokens[index]))
tokens[index] = masked_token
assert len(masked_lms) <= num_to_mask
masked_lms = sorted(masked_lms, key=lambda x: x.index)
mask_indices = [p.index for p in masked_lms]
masked_token_labels = [p.label for p in masked_lms]
return tokens, masked_token_labels, mask_indices
def process_data(query, title, content, max_seq_len, masking_obj=None):
""" process [query, title, content] into a tensor
[CLS] + query + [SEP] + title + [SEP] + content + [SEP] + [PAD]
"""
data = [SPECIAL_TOKENS['CLS']]
segment = [0]
data = data + [int(item) + 10 for item in query.split(b'\x01')] # query
data = data + [SPECIAL_TOKENS['SEP']]
segment = segment + [0] * (len(query.split(b'\x01')) + 1)
data = data + [int(item) + 10 for item in title.split(b'\x01')] # content
data = data + [SPECIAL_TOKENS['SEP']] # sep defined as 1
segment = segment + [1] * (len(title.split(b'\x01')) + 1)
data = data + [int(item) + 10 for item in content.split(b'\x01')] # content
data = data + [SPECIAL_TOKENS['SEP']]
segment = segment + [1] * (len(content.split(b'\x01')) + 1)
#padding_mask = [False] * len(data)
if len(data) < max_seq_len:
#padding_mask += [True] * (max_seq_len - len(data))
data += [SPECIAL_TOKENS['PAD']] * (max_seq_len - len(data))
else:
#padding_mask = padding_mask[:max_seq_len]
data = data[:max_seq_len]
# segment id
if len(segment) < max_seq_len:
segment += [1] * (max_seq_len - len(segment))
else:
segment = segment[:max_seq_len]
#padding_mask = torch.BoolTensor(padding_mask)
#data = torch.LongTensor(data)
#segment = torch.LongTensor(segment)
if masking_obj is not None:
token_ids, masked_lm_ids, masked_lm_positions = masking_obj.create_masked_lm_predictions(data)
lm_label_array = np.full(max_seq_len, dtype=np.int, fill_value=-1)
lm_label_array[masked_lm_positions] = masked_lm_ids
return token_ids, segment, lm_label_array
else:
return data, segment
class TrainDatasetBase(IterableDataset):
def __init__(self, directory_path, fea_d, fea_c, args):
self.directory_path = directory_path
self.files = [f for f in os.listdir(self.directory_path) if f.startswith('part')]
random.shuffle(self.files)
self.cur_query = "#"
self.max_seq_len = args.max_seq_len#128
self.num_candidates = args.num_candidates # -1 for no debias
self.vocab_size = args.vocab_size#22000
self.vocab_list = list(range(self.vocab_size))
self.masking_obj = MaskingOp(args.masked_lm_prob, args.max_predictions_per_seq, self.vocab_list)
self.fea_d = fea_d
self.fea_c = fea_c
if self.num_candidates > -1:
assert "rank_pos" in self.fea_d
def get_normal_feature(self, partition_num, partition_l, min_v, max_v, value):
# TODO
if value < min_v:
return 0
elif value >= max_v:
return partition_num - 1
else:
return int((value - min_v) / partition_l) + 1
def __iter__(self):
buffer_per_query = []
#sample_count = 0
info = torch.utils.data.get_worker_info()
if info is None:
worker_num = 1
worker_id = 0
else:
worker_num = info.num_workers
worker_id = info.id
## each worker parses one file
local_files = [f for i, f in enumerate(self.files) if i % worker_num == worker_id]
for i, file in enumerate(local_files):
logger.info(f'load file: {file}')
if file == 'part-00000': # part-00000.gz is for evaluation
continue
for line in open(os.path.join(self.directory_path, file), 'rb'):
line_list = line.strip(b'\n').split(b'\t')
if len(line_list) == 3: # new query
self.cur_query = line_list[1]
if len(buffer_per_query) > 0:
if self.num_candidates > 0:
buffer_per_query = buffer_per_query[:self.num_candidates]
data = self.yield_data(buffer_per_query, self.num_candidates)
if data is not None:
yield data
buffer_per_query = []
elif len(line_list) > 6: # urls
position, title, content, click_label = line_list[0], line_list[2], line_list[3], line_list[5]
if self.num_candidates > 0:
if int(position) > 10:
continue
try:
src_input, segment, masked_lm_labels = process_data(self.cur_query, title, content, self.max_seq_len, self.masking_obj)
sample = {'src_input': src_input, 'segment': segment,
'masked_lm_labels': masked_lm_labels, 'click_label': float(click_label),
'rank_pos': int(position) - 1,
}
except:
continue
for fea_name in self.fea_d:
if fea_name == 'rank_pos':
continue
fea = line_list[self.fea_d[fea_name][-1]]
emb_idx = self.fea_d[fea_name][1][fea]
sample.update({fea_name: emb_idx})
for fea_name in self.fea_c:
fea = float(line_list[self.fea_c[fea_name][-1]])
min_v = self.fea_c[fea_name][1]
max_v = self.fea_c[fea_name][2]
partition_l = self.fea_c[fea_name][3]
partition_num = self.fea_c[fea_name][0]
emb_idx = self.get_normal_feature(partition_num, partition_l, min_v, max_v, fea)
sample.update({fea_name: emb_idx})
buffer_per_query.append(sample)
def yield_data(self, buffer_per_query, num_candidates):
pass
class PreTrainDatasetGroupwise(TrainDatasetBase):
def __init__(self, directory_path, train_group_size, fea_d, fea_c, args):
super(PreTrainDatasetGroupwise, self).__init__(directory_path, fea_d, fea_c, args)
self.train_group_size = train_group_size
def yield_data(self, buffer_per_query, num_candidates):
# pad item to num candidates
buffer_out = buffer_per_query.copy()
random.shuffle(buffer_per_query)
pos_buffer, neg_buffer = [], []
for record in buffer_per_query:
if record['click_label'] > 0:
pos_buffer.append(record)
else:
neg_buffer.append(record)
if len(pos_buffer) == 0 or len(neg_buffer) == 0:
return None
pos_record = random.choice(pos_buffer)
if len(neg_buffer) < self.train_group_size - 1:
negs = random.choices(neg_buffer, k=self.train_group_size - 1)
else:
negs = random.sample(neg_buffer, k=self.train_group_size - 1)
group = [pos_record] + negs
pad_size = num_candidates - len(buffer_out)
for _ in range(pad_size):
sample_pad = {}
for name in self.fea_d:
sample_pad.update({name: self.fea_d[name][0]})
for name in self.fea_c:
sample_pad.update({name: self.fea_c[name][0]})
buffer_out.append(sample_pad)
return group, buffer_out
@dataclass
class DataCollator:
def __call__(self, features) -> Dict[str, Any]:
if isinstance(features[0], tuple):
relative_fea = [f[0] for f in features]
relative_fea = sum(relative_fea, [])
token_ids = torch.LongTensor([f['src_input'] for f in relative_fea])
segment = torch.LongTensor([f['segment'] for f in relative_fea])
batch_data = {
'input_ids': token_ids,
'attention_mask': (token_ids > 0).float(), # torch.FloatTensor, 0 is PAD
'token_type_ids': segment
}
for name in relative_fea[0]:
if name in ["qid", "label", "freq", "src_input", "segment"]:
continue
else:
fea = torch.LongTensor([f[name] for f in relative_fea])
batch_data.update({name: fea})
debias_fea = [f[1] for f in features]
debias_fea = sum(debias_fea, [])
debias_dict = collections.OrderedDict()
for name in debias_fea[0]:
if name in ["qid", "label", "freq", "src_input", "segment", "src_input", "segment", "masked_lm_labels", "click_label"]:
continue
else:
fea = torch.LongTensor([f[name] for f in debias_fea])
debias_dict.update({name: fea})
batch_data.update({"debias_fea": debias_dict})
else:
token_ids = torch.LongTensor([f['src_input'] for f in features])
segment = torch.LongTensor([f['segment'] for f in features])
batch_data = {
'input_ids': token_ids,
'attention_mask': (token_ids > 0).float(), # torch.FloatTensor, 0 is PAD
'token_type_ids': segment
}
for name in features[0]:
if name in ["qid", "label", "freq", "src_input", "segment"]:
continue
else:
fea = torch.LongTensor([f[name] for f in features])
batch_data.update({name: fea})
if 'qid' in features[0]:
qids = torch.LongTensor([f['qid'] for f in features])
batch_data.update({'qids': qids})
if 'label' in features[0]:
labels = torch.LongTensor([f['label'] for f in features])
batch_data.update({'labels': labels})
if 'freq' in features[0]:
freqs = torch.LongTensor([f['freq'] for f in features])
batch_data.update({'freqs': freqs})
return batch_data
###################Test dataset################################
class TestDataset(Dataset):
def __init__(self, fpath, max_seq_len, data_type, buffer_size=300000):
self.buffer_size = buffer_size
self.max_seq_len = max_seq_len
self.data_type = data_type
if data_type == 'annotate':
self.buffer = self.load_annotate_data(fpath)
#self.buffer = self.buffer[:10000]
elif data_type == 'click':
self.buffer = self.load_click_data(fpath)
#self.total_freqs = None
def __len__(self):
return len(self.buffer)
def __getitem__(self, index):
#return self.buffer[index]
if len(self.buffer[index]) == 4:
src_input, segment, qid, label= self.buffer[index]
sample = {'src_input': src_input, 'segment': segment,
'qid': qid, 'label': label}
elif len(self.buffer[index]) == 5:
src_input, segment, qid, label, freq = self.buffer[index]
sample = {'src_input': src_input, 'segment': segment,
'qid': qid, 'label': label, 'freq': freq}
return sample
def load_annotate_data(self, fpath):
logger.info(f'load annotated data from {fpath}')
buffer = []
for line in open(fpath, 'rb'):
line_list = line.strip(b'\n').split(b'\t')
qid, query, title, content, label, freq = line_list
if 0 <= int(freq) <= 2: # high freq
freq = 0
elif 3 <= int(freq) <= 6: # mid freq
freq = 1
elif 7 <= int(freq): # tail
freq = 2
src_input, src_segment = process_data(query, title, content, self.max_seq_len)
buffer.append([src_input, src_segment, int(qid), int(label), freq])
return buffer#, total_qids, total_labels, total_freqs
def load_click_data(self, fpath):
logger.info(f'load logged click data from {fpath}')
buffer = []
cur_qids = 0
for line in open(fpath, 'rb'):
line_list = line.strip(b'\n').split(b'\t')
if len(line_list) == 3: # new query
self.cur_query = line_list[1]
cur_qids += 1
elif len(line_list) > 6: # urls
position, title, content, click_label = line_list[0], line_list[2], line_list[3], line_list[5]
try:
src_input, src_segment = process_data(self.cur_query, title, content,
self.max_seq_len)
buffer.append([src_input, src_segment, cur_qids, int(click_label)])
except:
pass
if len(buffer) >= self.buffer_size: # we use 300,000 click records for test
break
return buffer#, total_qids, total_labels
| 15,976 | 41.155673 | 143 | py |
Tencent_wsdm_cup2023 | Tencent_wsdm_cup2023-main/pytorch_unbias/pretrain/trainer.py | # -*- coding: utf-8 -*-
import os
from typing import Dict, List, Tuple, Optional, Any, Union
import torch
from transformers.trainer import Trainer
from transformers.trainer_pt_utils import nested_detach
import logging
logger = logging.getLogger(__name__)
class Pretrainer(Trainer):
def compute_loss(self, model, inputs):
outputs, mlm_loss, ctr_loss = model(**inputs)
if self.state.global_step % self.args.logging_steps == 0:
self.log({'MLM Loss':mlm_loss.item(), 'CTR Loss': ctr_loss.item()})
# loss = mlm_loss + ctr_loss
loss = mlm_loss + ctr_loss
return loss
def _maybe_log_save_evaluate(self, tr_loss, model, trial, epoch, ignore_keys_for_eval):
if self.control.should_log:
'''
if is_torch_tpu_available():
xm.mark_step()
'''
logs: Dict[str, float] = {}
# all_gather + mean() to get average loss over all processes
tr_loss_scalar = self._nested_gather(tr_loss).mean().item()
# reset tr_loss to zero
tr_loss -= tr_loss
logs["loss"] = round(tr_loss_scalar / (self.state.global_step - self._globalstep_last_logged), 4)
logs["learning_rate"] = self._get_learning_rate()
self._total_loss_scalar += tr_loss_scalar
self._globalstep_last_logged = self.state.global_step
self.store_flos()
self.log(logs)
metrics = {}
if self.control.should_evaluate:
if isinstance(self.eval_dataset, dict):
for eval_dataset_name, eval_dataset in self.eval_dataset.items():
if eval_dataset_name == 'click':
self.label_names = ['qids', 'labels']
else:
self.label_names = ['qids', 'labels', 'freqs']
local_metrics = self.evaluate(
eval_dataset=eval_dataset,
ignore_keys=ignore_keys_for_eval,
metric_key_prefix=f"eval_{eval_dataset_name}",
)
#logger.info(local_metrics)
metrics.update(local_metrics)
else:
metrics = self.evaluate(ignore_keys=ignore_keys_for_eval)
#logger.info(metrics)
self._report_to_hp_search(trial, epoch, metrics)#self.state.global_step
#
'''
if self.control.should_evaluate:
metrics = self.evaluate(ignore_keys=ignore_keys_for_eval)
self._report_to_hp_search(trial, epoch, metrics)
'''
if self.control.should_save:
self._save_checkpoint(model, trial, metrics=metrics)
self.control = self.callback_handler.on_save(self.args, self.state, self.control)
def prediction_step(
self,
model,
inputs: Dict[str, Union[torch.Tensor, Any]],
prediction_loss_only: bool,
ignore_keys: Optional[List[str]] = None,
) -> Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]:
assert prediction_loss_only == False
assert ignore_keys is None
#inputs = self._prepare_inputs(inputs)
has_labels = all(inputs.get(k) is not None for k in self.label_names)
inputs = self._prepare_inputs(inputs)
# labels may be popped when computing the loss (label smoothing for instance) so we grab them first.
if has_labels:
labels = nested_detach(tuple(inputs.get(name) for name in self.label_names))
if len(labels) == 1:
labels = labels[0]
else:
labels = None
with torch.no_grad():
loss = None
with self.autocast_smart_context_manager():
logits = model(**inputs).detach()
return (loss, logits, labels)
| 3,918 | 36.32381 | 109 | py |
Tencent_wsdm_cup2023 | Tencent_wsdm_cup2023-main/pytorch_unbias/models/modeling.py | # -*- coding: utf-8 -*-
import torch
from torch import nn
import torch.distributed as dist
import torch.nn.functional as F
from transformers import BertModel, BertPreTrainedModel
from torch.nn import CrossEntropyLoss
import logging
logger = logging.getLogger(__name__)
from transformers.activations import ACT2FN
from models.debias_model import DenoisingNetMultiFeature
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
class CTRPretrainingModel(BertPreTrainedModel):
def __init__(
self,
config,
model_args, data_args, training_args, fea_d=None, fea_c=None
):
super(CTRPretrainingModel, self).__init__(config)
self.bert = BertModel(config)
self.num_candidates = data_args.num_candidates
if self.num_candidates >= 0:
fea_list = data_args.feature_list.strip().split(",")
self.fea_name = [fea.strip().split("-") for fea in fea_list]
self.emb_size = training_args.emb_size
self.propensity_net = DenoisingNetMultiFeature(self.fea_name, self.emb_size, self.num_candidates,
training_args.per_device_train_batch_size,
training_args.train_group_size,
fea_d, fea_c)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.domlp = model_args.do_maxmeanmlp
self.cls = nn.Linear(config.hidden_size, 1)
self.predictions = BertLMPredictionHead(config) # self.bert.embeddings.word_embeddings.weight
self.config = config
self.model_args, self.data_args, self.training_args = model_args, data_args, training_args
self.mlm_loss_fct = CrossEntropyLoss(ignore_index=-1)
self.cross_entropy = nn.CrossEntropyLoss(reduction='mean')
self.logits_to_prob = nn.Softmax(dim=-1)
self.init_weights()
if training_args.local_rank >= 0:
self.world_size = dist.get_world_size()
def get_group_data(self, inputs):
inputs.view(
self.training_args.per_device_train_batch_size,
self.training_args.train_group_size
)
def forward(self, input_ids, attention_mask, token_type_ids, masked_lm_labels=None, click_labels=None,
**kwargs):
outputs = self.bert(input_ids, attention_mask, token_type_ids, return_dict=True)
sequence_output = outputs.last_hidden_state # (bs, seq_len, dim)
if self.domlp:
out = self.cls(sequence_output).squeeze()
prediction_scores = out.max(dim=1).values + out.mean(dim=1)
else:
pooler_output = outputs.pooler_output # (bs, dim)
prediction_scores = self.cls(pooler_output) # (bs, 1)
mlm_loss = 0
if masked_lm_labels is not None:
lm_prediction_scores = self.predictions(sequence_output)
mlm_loss += self.mlm_loss_fct(lm_prediction_scores.view(-1, self.config.vocab_size),
masked_lm_labels.view(-1)) # if config.MLM else 0.
if click_labels is not None:
# pointwise
ctr_loss = self.pointwise_ctr_loss(prediction_scores, click_labels)
else:
# pairwise
if self.num_candidates >= 0:
# use propensity score
select_pos = kwargs['rank_pos']
propensity_scores = self.propensity_net(kwargs["debias_fea"], select_pos) # (bs, 1)
ctr_loss = self.groupwise_ctr_loss_with_dla(prediction_scores, propensity_scores)
else:
ctr_loss = self.groupwise_ctr_loss(prediction_scores)
#loss = ctr_loss + mlm_loss
return prediction_scores, mlm_loss, ctr_loss
else:
return prediction_scores
def pointwise_ctr_loss(self, prediction_scores, click_labels):
# TODO
return None
def groupwise_ctr_loss(self, prediction_scores):
logits = prediction_scores.view(-1, 2)
if self.training_args.temperature is not None:
assert self.training_args.temperature > 0
logits = logits / self.training_args.temperature
logits = logits.view(
self.training_args.per_device_train_batch_size,
self.training_args.train_group_size
)
target_label = torch.zeros(self.training_args.per_device_train_batch_size,
dtype=torch.long,
device=logits.device)
loss = self.cross_entropy(logits, target_label)
return loss
def groupwise_ctr_loss_with_dla(self, prediction_scores, propensity_scores_pos):
logits = prediction_scores.view(-1, 2)
if self.training_args.temperature is not None:
assert self.training_args.temperature > 0
logits = logits / self.training_args.temperature
logits = logits.view(
self.training_args.per_device_train_batch_size,
self.training_args.train_group_size
)
propensity_scores_pos = propensity_scores_pos.view(
self.training_args.per_device_train_batch_size,
self.training_args.train_group_size
)
propensity_scores_pos[:, 1:] = 0.1
propensity_scores = propensity_scores_pos
logits = logits * propensity_scores
target_label = torch.zeros(self.training_args.per_device_train_batch_size,
dtype=torch.long,
device=logits.device)
loss = self.cross_entropy(logits, target_label)
return loss | 7,146 | 42.054217 | 116 | py |
Tencent_wsdm_cup2023 | Tencent_wsdm_cup2023-main/pytorch_unbias/models/debias_model.py | # -*- coding: utf-8 -*-
import torch
from torch import nn
from torch.nn import BatchNorm1d
class DenoisingNetMultiFeature(nn.Module):
def __init__(self, fea_name, emb_size, num_candidates, per_device_train_batch_size, train_group_size, fea_d, fea_c):
super(DenoisingNetMultiFeature, self).__init__()
input_vec_size = len(fea_name) * emb_size
self.num_candidates = num_candidates
self.per_device_train_batch_size = per_device_train_batch_size
self.train_group_size = train_group_size
######################### Create MLP model #########################
self.relu_layer = nn.ReLU()
self.norm = BatchNorm1d(input_vec_size)
self.dropout = nn.Dropout(0.1)
self.linear_layer_1 = nn.Linear(input_vec_size, input_vec_size)
self.linear_layer_2 = nn.Linear(input_vec_size, input_vec_size)
self.linear_layer_3 = nn.Linear(input_vec_size, int(input_vec_size / 2))
self.linear_layer_4 = nn.Linear(int(input_vec_size / 2), int(input_vec_size / 2))
self.linear_layer_5 = nn.Linear(int(input_vec_size / 2), 1)
self.propensity_net = nn.Sequential(
self.linear_layer_1, BatchNorm1d(input_vec_size), self.relu_layer,
self.linear_layer_2, BatchNorm1d(input_vec_size), self.relu_layer,
self.linear_layer_3, BatchNorm1d(int(input_vec_size / 2)), self.relu_layer,
self.linear_layer_4, BatchNorm1d(int(input_vec_size / 2)), self.relu_layer,
self.linear_layer_5,
).cuda()
############# Create Embedding for discrete feature ################
self.fea_emb = {}
for name in fea_d:
self.fea_emb[name] = torch.nn.Embedding(fea_d[name][0]+1, emb_size).cuda()
for name in fea_c:
self.fea_emb[name] = torch.nn.Embedding(fea_c[name][0]+1, emb_size).cuda()
self.logits_to_prob = nn.Softmax(dim=1)
def forward(self, debias_fea, select_pos):
# get embedding for discrete feature
fea = []
for name in debias_fea:
fea.append(self.fea_emb[name](debias_fea[name]))
# concat all features
fea = torch.cat(fea, dim=1)
fea = self.norm(fea)
# cal bias score
bias_scores = self.propensity_net(fea)
bias_scores = bias_scores.view(
self.per_device_train_batch_size,
self.num_candidates
)
bias_scores = self.logits_to_prob(bias_scores)
select_pos = select_pos.view(
self.per_device_train_batch_size,
self.train_group_size
)
select_bias_scores = []
for bs in range(self.per_device_train_batch_size):
select_bias_scores.append(bias_scores[bs].index_select(dim=0, index=select_pos[bs]))
select_bias_scores = torch.cat(select_bias_scores)
return select_bias_scores
| 2,884 | 44.078125 | 120 | py |
Tencent_wsdm_cup2023 | Tencent_wsdm_cup2023-main/paddle_pretrain/convert/convert-onnx.py | # -*- coding: utf-8 -*-
# @Time : 2023/1/3 23:32
# @Author : Xiangsheng Li
# @File : convert-onnx.py
import sys
import numpy as np
sys.path.append('../../pytorch_pretrain')
from transformers import AutoConfig
from models.modeling import CTRPretrainingModel
input_names = ["input_ids","attention_mask","token_type_ids"]
output_names = ["output_0"]
input_ids = torch.tensor(np.random.rand(1, 128).astype("int64"))
attention_mask = torch.tensor(np.random.rand(1, 128).astype("float32"))
token_type_ids = torch.tensor(np.random.rand(1, 128).astype("int64"))
#Path to torch checkpoint
bert_name_path = 'DATA_ROOT/outputs/finetune/large_group2_wwm/checkpoint-590000/checkpoint-4180'
torch_config = AutoConfig.from_pretrained(bert_name_path)
bert_model = CTRPretrainingModel.from_pretrained(bert_name_path,config=torch_config,model_args=None,data_args=None,training_args=None)
torch.onnx.export(bert_model, (input_ids,
attention_mask,
token_type_ids), 'model.onnx', opset_version=11, input_names=input_names,
output_names=output_names, dynamic_axes={'input_ids': [0],'attention_mask': [0],'token_type_ids': [0], 'output_0': [0]})
#python -m onnxsim model.onnx model_sim.onnx
#x2paddle --framework=onnx --model=model_sim.onnx --save_dir=pd_model
| 1,335 | 40.75 | 138 | py |
Tencent_wsdm_cup2023 | Tencent_wsdm_cup2023-main/paddle_pretrain/finetune/dataset.py | # -*- coding: utf-8 -*-
# @Time : 2022/12/28 23:20
# @Author : Xiangsheng Li
# @File : dataset.py
import sys,os
import random
import collections
from models.utils import SPECIAL_TOKENS
import logging
import numpy as np
import paddle
from paddle.io import Dataset, IterableDataset
from dataclasses import dataclass
from typing import List, Dict, Any
from collections import defaultdict
from tqdm.auto import tqdm
from pretrain.dataset import (
process_data,
TestDataset
)
class PairwiseFinetuneDataset(Dataset):
def __init__(self, fpath):
self.queries = {}
self.docs = dict()
self.q_rels = defaultdict(list)
self.q_irrels = defaultdict(list)
self._load_annotated_data(fpath)
self.idx2qids = {i:qid for i,qid in enumerate(self.q_rels.keys())}
self.all_docids = list(self.docs.keys())
def _load_annotated_data(self, fpath):
#buffer = []
doc2docid = {}
for line in tqdm(open(fpath, 'rb'),
desc='load annotated data'):
line_list = line.strip(b'\n').split(b'\t')
qid, query, title, content, label, freq = line_list
if qid not in self.queries:
self.queries[qid] = query
if (title, content) not in doc2docid:
doc2docid[(title, content)] = len(doc2docid)
docid = doc2docid[(title, content)]
if docid not in self.docs:
self.docs[docid] = (title, content)
label = int(label)
if label >= 2: #2,1
self.q_rels[qid].append((docid, label))
else:
self.q_irrels[qid].append((docid, label))
def __len__(self):
return len(self.q_rels)
def __getitem__(self, index):
qid = self.idx2qids[index]
query = self.queries[qid]
rel_doc_id, rel_label = random.choice(self.q_rels[qid])
all_doc_list = self.q_rels[qid] + self.q_irrels[qid]
rest_docids = [docid for (docid, label) in all_doc_list if (rel_doc_id != docid and rel_label > label)]
if len(rest_docids) == 0:
irrel_doc_id = random.choice(self.all_docids)
else:
irrel_doc_id = random.choice(rest_docids)
return {'query':query, 'rel_doc': self.docs[rel_doc_id], 'irrel_doc': self.docs[irrel_doc_id]}
@dataclass
class PairwiseDataCollator:
def __init__(self, max_seq_len):
self.max_seq_len = max_seq_len
def __call__(self, features: Dict[str, Any]) -> Dict[str, Any]:
batch_src_input, batch_segment = [], []
for sample in features:
src_input, segment = process_data(sample['query'], sample['rel_doc'][0], sample['rel_doc'][1],
self.max_seq_len)
batch_src_input.append(src_input)
batch_segment.append(segment)
src_input, segment = process_data(sample['query'], sample['irrel_doc'][0], sample['irrel_doc'][1],
self.max_seq_len)
batch_src_input.append(src_input)
batch_segment.append(segment)
batch_src_input = np.array(batch_src_input,dtype=np.int64) #torch.LongTensor(batch_src_input)
batch_segment = np.array(batch_segment,dtype=np.int64) #torch.LongTensor(batch_segment)
batch_data = {
'input_ids': paddle.to_tensor(batch_src_input, dtype="int64"),
'attention_mask': paddle.to_tensor((batch_src_input > 0).astype('float32'), dtype='float32'), # torch.FloatTensor, 0 is PAD
'token_type_ids': paddle.to_tensor(batch_segment, dtype="int64")
}
return batch_data
| 3,711 | 35.038835 | 136 | py |
Tencent_wsdm_cup2023 | Tencent_wsdm_cup2023-main/paddle_pretrain/finetune/trainer.py | # -*- coding: utf-8 -*-
# @Time : 2022/12/28 23:27
# @Author : Xiangsheng Li
# @File : trainer.py
import os
from typing import Dict, List, Tuple, Optional, Any, Union
'''
import torch
from torch.utils.data import DataLoader
from torch.nn import Softmax, MarginRankingLoss
'''
import paddle
from paddle.io import DataLoader
from paddle.nn import Softmax, MarginRankingLoss
from pretrain.trainer import Pretrainer as Trainer
from pretrain.dataset import DataCollator
import logging
logger = logging.getLogger(__name__)
class Finetuner(Trainer):
def __init__(self, *args, **kwargs):
super(Finetuner, self).__init__(*args, **kwargs)
self.softmax = Softmax(axis=1)
def compute_loss(self, model, inputs):
return self.compute_pair_loss(model, inputs)
def compute_pair_loss(self, model, inputs):
prediction_scores = model(**inputs)
logits = prediction_scores.reshape((-1, 2))
if self.args.temperature is not None:
assert self.args.temperature > 0
logits = logits / self.args.temperature
logits = self.softmax(logits)
pos_logits = logits[:, 0]
neg_logits = logits[:, 1]
marginloss = MarginRankingLoss(margin=1.0)
pair_label = paddle.ones_like(pos_logits)
pair_loss = marginloss(pos_logits, neg_logits, pair_label)
return pair_loss
def get_eval_dataloader(self, eval_dataset) -> DataLoader:
# use DataCollator for eval dataset
eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset
data_collator = DataCollator()
eval_sampler = self._get_eval_sampler(eval_dataset)
return DataLoader(
eval_dataset,
batch_sampler=eval_sampler,
collate_fn=data_collator,
num_workers=self.args.dataloader_num_workers,
)
| 1,876 | 26.202899 | 86 | py |
Tencent_wsdm_cup2023 | Tencent_wsdm_cup2023-main/paddle_pretrain/models/modeling.py | # -*- coding: utf-8 -*-
# @Time : 2022/10/25 12:02
# @Author : Xiangsheng Li
# @File : modeling.py
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle.nn import Layer
from paddlenlp.transformers import (
BertPretrainedModel as BertPreTrainedModel,
BertModel,
ACT2FN
)
from paddle.nn import CrossEntropyLoss
import logging
logger = logging.getLogger(__name__)
class BertPredictionHeadTransform(Layer):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = nn.LayerNorm(config.hidden_size, epsilon=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(Layer):
def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
#self.bias = nn.Parameter(torch.zeros(config.vocab_size))
#self.decoder.bias = self.bias
bias_attr = paddle.ParamAttr(
name="bias",
initializer=paddle.nn.initializer.Constant(value=0.0))
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias_attr=bias_attr) # , bias=False
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
class CTRPretrainingModel(BertPreTrainedModel):
def __init__(
self,
config,
model_args, data_args, training_args
):
super(CTRPretrainingModel, self).__init__(config)
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.cls = nn.Linear(config.hidden_size, 1)
self.predictions = BertLMPredictionHead(config)#self.bert.embeddings.word_embeddings.weight
self.config = config
self.model_args, self.data_args, self.training_args = model_args, data_args, training_args
self.mlm_loss_fct = CrossEntropyLoss(ignore_index=-1)
self.cross_entropy = nn.CrossEntropyLoss(reduction='mean')
def forward(self, input_ids, attention_mask, token_type_ids, masked_lm_labels=None):
outputs = self.bert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, return_dict=True)
sequence_output = outputs.last_hidden_state #(bs, seq_len, dim),outputs.last_hidden_state
pooler_output = outputs.pooler_output #(bs, dim),outputs.pooler_output
prediction_scores = self.cls(pooler_output) #(bs, 1)
mlm_loss, ctr_loss = None, None
if masked_lm_labels is not None:
lm_prediction_scores = self.predictions(sequence_output)
mlm_loss = self.mlm_loss_fct(lm_prediction_scores.reshape((-1, self.config.vocab_size)),
masked_lm_labels.reshape((-1,))
)# if config.MLM else 0.
ctr_loss = self.groupwise_ctr_loss(prediction_scores)
if mlm_loss and ctr_loss:
return prediction_scores, mlm_loss, ctr_loss
else:
return prediction_scores
def groupwise_ctr_loss(self, prediction_scores):
logits = prediction_scores
if self.training_args.temperature is not None:
assert self.training_args.temperature > 0
logits = logits / self.training_args.temperature
logits = logits.reshape(
(self.training_args.per_device_train_batch_size,
self.training_args.train_group_size)
)
target_label = paddle.zeros((self.training_args.per_device_train_batch_size,),
dtype='int64')
loss = self.cross_entropy(logits, target_label)
return loss
| 4,428 | 36.218487 | 118 | py |
Tencent_wsdm_cup2023 | Tencent_wsdm_cup2023-main/pytorch_pretrain/mt_pretrain/dataset.py | # -*- coding: utf-8 -*-
# @Time : 2022/10/25 21:07
# @Author : Xiangsheng Li
# @File : dataset.py
import sys,os
import random
import collections
from models.utils import SPECIAL_TOKENS
import logging
import numpy as np
import torch
from torch.utils.data import Dataset,IterableDataset
from dataclasses import dataclass
from typing import List, Dict, Any
logger = logging.getLogger(__name__)
MaskedLmInstance = collections.namedtuple("MaskedLmInstance", ["index", "label"])
class MaskingOp():
def __init__(self, masked_lm_prob, max_predictions_per_seq, vocab_list, unigram_set=None):
self.masked_lm_prob = masked_lm_prob
self.max_predictions_per_seq = max_predictions_per_seq
self.vocab_list = vocab_list
self.unigram_set = unigram_set
def create_masked_lm_predictions(self, tokens):
"""Creates the predictions for the masked LM objective. This is mostly copied from the Google BERT repo, but
with several refactors to clean it up and remove a lot of unnecessary variables."""
cand_indices = []
START_FROM_DOC = False
start = 0
#for (i, token) in enumerate(tokens): # token_ids
while start < len(tokens):
token = tokens[start]
if token == SPECIAL_TOKENS['SEP']: # SEP
START_FROM_DOC = True
start += 1
continue
if token in SPECIAL_TOKENS.values(): # CLS
start += 1
continue
if not START_FROM_DOC:
start += 1
continue
if self.unigram_set is not None:
end = len(tokens)
while start < end:
if tuple(tokens[start:end]) in self.unigram_set or (start + 1 == end):
cand_indices.append(list(range(start,end)))
break
end -= 1
start = end
else:
cand_indices.append([start])
start += 1
num_to_mask = min(self.max_predictions_per_seq, max(1, int(round(len(cand_indices) * self.masked_lm_prob))))
random.shuffle(cand_indices)
masked_lms = []
covered_indexes = set()
for index_set in cand_indices:
if len(masked_lms) >= num_to_mask:
break
# If adding a whole-word mask would exceed the maximum number of
# predictions, then just skip this candidate.
if len(masked_lms) + len(index_set) > num_to_mask:
continue
is_any_index_covered = False
for index in index_set:
if index in covered_indexes:
is_any_index_covered = True
break
if is_any_index_covered:
continue
for index in index_set:
covered_indexes.add(index)
# 80% of the time, replace with [MASK]
if random.random() < 0.8:
masked_token = SPECIAL_TOKENS['MASK']#103
else:
# 10% of the time, keep original
if random.random() < 0.5:
masked_token = tokens[index]
# 10% of the time, replace with random word
else:
masked_token = random.choice(self.vocab_list)
masked_lms.append(MaskedLmInstance(index=index, label=tokens[index]))
tokens[index] = masked_token
assert len(masked_lms) <= num_to_mask
masked_lms = sorted(masked_lms, key=lambda x: x.index)
mask_indices = [p.index for p in masked_lms]
masked_token_labels = [p.label for p in masked_lms]
return tokens, masked_token_labels, mask_indices
def process_data(query, title, content, max_seq_len, masking_obj=None):
""" process [query, title, content] into a tensor
[CLS] + query + [SEP] + title + [SEP] + content + [SEP] + [PAD]
"""
data = [SPECIAL_TOKENS['CLS']]
segment = [0]
query_splits = [item for item in query.split(b'\x01') if len(item.strip()) > 0]
data = data + [int(item) + 10 for item in query_splits] # query
data = data + [SPECIAL_TOKENS['SEP']]
segment = segment + [0] * (len(query_splits) + 1)
title_splits = [item for item in title.split(b'\x01') if len(item.strip()) > 0]
data = data + [int(item) + 10 for item in title_splits] # content
data = data + [SPECIAL_TOKENS['SEP']] # sep defined as 1
segment = segment + [1] * (len(title_splits) + 1)
content_splits = [item for item in content.split(b'\x01') if len(item.strip()) > 0]
data = data + [int(item) + 10 for item in content_splits] # content
data = data + [SPECIAL_TOKENS['SEP']]
segment = segment + [1] * (len(content_splits) + 1)
#padding_mask = [False] * len(data)
if len(data) < max_seq_len:
#padding_mask += [True] * (max_seq_len - len(data))
data += [SPECIAL_TOKENS['PAD']] * (max_seq_len - len(data))
else:
#padding_mask = padding_mask[:max_seq_len]
data = data[:max_seq_len]
# segment id
if len(segment) < max_seq_len:
segment += [1] * (max_seq_len - len(segment))
else:
segment = segment[:max_seq_len]
#padding_mask = torch.BoolTensor(padding_mask)
#data = torch.LongTensor(data)
#segment = torch.LongTensor(segment)
#print("data", data)
if masking_obj is not None:
token_ids, masked_lm_ids, masked_lm_positions = masking_obj.create_masked_lm_predictions(data)
lm_label_array = np.full(max_seq_len, dtype=np.int, fill_value=-1)
lm_label_array[masked_lm_positions] = masked_lm_ids
return token_ids, segment, lm_label_array
else:
return data, segment
class TrainDatasetBase(IterableDataset):
def __init__(self, directory_path, args):
self.directory_path = directory_path
self.files = [f for f in os.listdir(self.directory_path) if f.startswith('part')]
random.shuffle(self.files)
self.cur_query = "#"
self.max_seq_len = args.max_seq_len#128
self.vocab_size = args.vocab_size#22000
self.vocab_list = list(range(self.vocab_size))
#self.unigram_set = set([tuple(map(lambda t:int(t) + 10, line.strip().split('\x01'))) for line in open(args.unigram_dict_addr) if len(line.strip()) > 0])
if args.unigram_dict_addr is not None:
self.unigram_set = set(
[tuple(map(lambda t:int(t) + 10, line.strip().split('\x01')))
for line in open(args.unigram_dict_addr) if len(line.strip()) > 0]
)
else:
self.unigram_set = None
self.masking_obj = MaskingOp(args.masked_lm_prob, args.max_predictions_per_seq, self.vocab_list, self.unigram_set)
def __iter__(self):
buffer_per_query = []
#sample_count = 0
info = torch.utils.data.get_worker_info()
if info is None:
worker_num = 1
worker_id = 0
else:
worker_num = info.num_workers
worker_id = info.id
## each worker parses one file
local_files = [f for i, f in enumerate(self.files) if i % worker_num == worker_id]
for i, file in enumerate(local_files):
logger.info(f'load file: {file}')
if file == 'part-00000': # part-00000.gz is for evaluation
continue
for line in open(os.path.join(self.directory_path, file), 'rb'):
line_list = line.strip(b'\n').split(b'\t')
if len(line_list) == 3: # new query
self.cur_query = line_list[1]
#print("line_list: ", buffer_per_query)
if len(buffer_per_query) > 0:
(norm_pair, pos_pair) = self.yield_data(buffer_per_query)
if norm_pair is not None:
yield norm_pair
if pos_pair is not None:
yield pos_pair
buffer_per_query = []
elif len(line_list) > 6: # urls
position, title, content, click_label = line_list[0], line_list[2], line_list[3], line_list[5]
dwell_time = float(line_list[16])
try:
src_input, segment, masked_lm_labels = process_data(self.cur_query, title, content,
self.max_seq_len, self.masking_obj)
sample = {'src_input': src_input, 'segment': segment,
'masked_lm_labels': masked_lm_labels, 'click_label': float(click_label),
'dwell_time':dwell_time}
buffer_per_query.append(sample)
except Exception as e:
#print(e)
pass
def yield_data(self,buffer_per_query):
pass
class PreTrainDatasetGroupwise(TrainDatasetBase):
def __init__(self, directory_path, train_group_size, args):
super(PreTrainDatasetGroupwise, self).__init__(directory_path, args)
self.train_group_size = train_group_size
def yield_data(self, buffer_per_query):
random.shuffle(buffer_per_query)
pos_buffer, neg_buffer = [], []
for record in buffer_per_query:
if record['click_label'] > 0:
pos_buffer.append(record)
else:
neg_buffer.append(record)
if len(pos_buffer) == 0 or len(neg_buffer) == 0:
return None, None
pos_record = random.choice(pos_buffer)
if len(neg_buffer) < self.train_group_size - 1:
negs = random.choices(neg_buffer, k=self.train_group_size - 1)
else:
negs = random.sample(neg_buffer, k=self.train_group_size - 1)
group = [pos_record] + negs
pos_pair = None
if len(pos_buffer) >= 2:
pos_pair = random.choices(pos_buffer, k=2)
diff = pos_pair[0]['dwell_time'] - pos_pair[1]['dwell_time']
if abs(diff) < 8: #diff time larger than 8
pos_pair = None
elif diff < 0:
pos_pair = [pos_pair[1], pos_pair[0]]
return (group, pos_pair)
@dataclass
class DataCollator:
def __call__(self, features) -> Dict[str, Any]:
if isinstance(features[0], list):
features = sum(features, [])#groupwise dataset
token_ids = torch.LongTensor([f['src_input'] for f in features])
segment = torch.LongTensor([f['segment'] for f in features])
#For test data
batch_data = {
'input_ids':token_ids,
'attention_mask':(token_ids > 0).float(),#torch.FloatTensor, 0 is PAD
'token_type_ids':segment
}
#For training data, pointwise or groupwise
if 'masked_lm_labels' in features[0]:
masked_lm_labels = torch.LongTensor([f['masked_lm_labels'] for f in features])
batch_data.update({'masked_lm_labels': masked_lm_labels})
if 'click_label' in features[0]:
click_labels = torch.LongTensor([f['click_label'] for f in features])
batch_data.update({'click_labels': click_labels})
return batch_data
@dataclass
class TestDataCollator:
def __call__(self, features) -> Dict[str, Any]:
token_ids = torch.LongTensor([f['src_input'] for f in features])
segment = torch.LongTensor([f['segment'] for f in features])
#For test data
batch_data = {
'input_ids':token_ids,
'attention_mask':(token_ids > 0).float(),#torch.FloatTensor, 0 is PAD
'token_type_ids':segment
}
#For testing data
if 'qid' in features[0]:
qids = torch.LongTensor([f['qid'] for f in features])
batch_data.update({'qids': qids})
if 'label' in features[0]:
labels = torch.LongTensor([f['label'] for f in features])
batch_data.update({'labels': labels})
if 'freq' in features[0]:
freqs = torch.LongTensor([f['freq'] for f in features])
batch_data.update({'freqs': freqs})
return batch_data
###################Test dataset################################
class TestDataset(Dataset):
def __init__(self, fpath, max_seq_len, data_type, buffer_size=300000):
self.buffer_size = buffer_size
self.max_seq_len = max_seq_len
self.data_type = data_type
if data_type == 'annotate':
self.buffer = self.load_annotate_data(fpath)
#self.buffer = self.buffer[:10000]
elif data_type == 'click':
self.buffer = self.load_click_data(fpath)
#self.total_freqs = None
def __len__(self):
return len(self.buffer)
def __getitem__(self, index):
#return self.buffer[index]
if len(self.buffer[index]) == 4:
src_input, segment, qid, label = self.buffer[index]
sample = {'src_input': src_input, 'segment': segment,
'qid': qid, 'label': label}
else:
src_input, segment, qid, label, freq = self.buffer[index]
sample = {'src_input': src_input, 'segment': segment,
'qid':qid, 'label':label, 'freq':freq}
return sample
def load_annotate_data(self, fpath):
logger.info(f'load annotated data from {fpath}')
#total_qids = []
buffer = []
#total_labels = []
#total_freqs = []
for line in open(fpath, 'rb'):
line_list = line.strip(b'\n').split(b'\t')
qid, query, title, content, label, freq = line_list
if 0 <= int(freq) <= 2: # high freq
freq = 0
elif 3 <= int(freq) <= 6: # mid freq
freq = 1
elif 7 <= int(freq): # tail
freq = 2
#total_qids.append(int(qid))
#total_labels.append(int(label))
#total_freqs.append(freq)
src_input, src_segment = process_data(query, title, content, self.max_seq_len)
buffer.append([src_input, src_segment, int(qid), int(label), freq])
return buffer#, total_qids, total_labels, total_freqs
def load_click_data(self, fpath):
logger.info(f'load logged click data from {fpath}')
buffer = []
#total_qids = []
#total_labels = []
cur_qids = 0
for line in open(fpath, 'rb'):
line_list = line.strip(b'\n').split(b'\t')
if len(line_list) == 3: # new query
self.cur_query = line_list[1]
cur_qids += 1
elif len(line_list) > 6: # urls
position, title, content, click_label = line_list[0], line_list[2], line_list[3], line_list[5]
try:
src_input, src_segment = process_data(self.cur_query, title, content,
self.max_seq_len)
buffer.append([src_input, src_segment, cur_qids, int(click_label)])
#total_qids.append(cur_qids)
#total_labels.append(int(click_label))
except:
pass
if len(buffer) >= self.buffer_size: # we use 300,000 click records for test
break
return buffer#, total_qids, total_labels
| 15,597 | 39.201031 | 161 | py |
Tencent_wsdm_cup2023 | Tencent_wsdm_cup2023-main/pytorch_pretrain/mt_pretrain/trainer.py | # -*- coding: utf-8 -*-
# @Time : 2022/10/26 16:51
# @Author : Xiangsheng Li
# @File : trainer.py
import os
from typing import Dict, List, Tuple, Optional, Any, Union
import torch
import torch.distributed as dist
from torch import nn, Tensor
from torch.cuda.amp import autocast
import torch.nn.functional as F
from transformers.trainer import Trainer
from transformers.trainer_utils import EvalLoopOutput, has_length
from transformers.trainer_pt_utils import nested_concat, nested_numpify, nested_detach
from torch.utils.data import DataLoader
from mt_pretrain.dataset import TestDataCollator
import logging
from tqdm.auto import tqdm
from models.metrics import evaluate_all_metric
logger = logging.getLogger(__name__)
class Pretrainer(Trainer):
def compute_loss(self,model, inputs):
outputs, mlm_loss, ctr_loss = model(**inputs)
if self.state.global_step % self.args.logging_steps == 0:
self.log({'MLM Loss':mlm_loss.item(), 'CTR Loss': ctr_loss.item()})
loss = mlm_loss + ctr_loss
return loss
def _maybe_log_save_evaluate(self, tr_loss, model, trial, epoch, ignore_keys_for_eval):
if self.control.should_log:
logs: Dict[str, float] = {}
# all_gather + mean() to get average loss over all processes
tr_loss_scalar = self._nested_gather(tr_loss).mean().item()
# reset tr_loss to zero
tr_loss -= tr_loss
logs["loss"] = round(tr_loss_scalar / (self.state.global_step - self._globalstep_last_logged), 4)
logs["learning_rate"] = self._get_learning_rate()
self._total_loss_scalar += tr_loss_scalar
self._globalstep_last_logged = self.state.global_step
self.store_flos()
self.log(logs)
metrics = {}
if self.control.should_evaluate:
if isinstance(self.eval_dataset, dict):
for eval_dataset_name, eval_dataset in self.eval_dataset.items():
if eval_dataset_name == 'click':
self.label_names = ['qids', 'labels']
else:
self.label_names = ['qids', 'labels', 'freqs']
local_metrics = self.evaluate(
eval_dataset=eval_dataset,
ignore_keys=ignore_keys_for_eval,
metric_key_prefix=f"eval_{eval_dataset_name}",
)
#logger.info(local_metrics)
metrics.update(local_metrics)
else:
metrics = self.evaluate(ignore_keys=ignore_keys_for_eval)
#logger.info(metrics)
self._report_to_hp_search(trial, epoch, metrics)#self.state.global_step
if self.control.should_save:
self._save_checkpoint(model, trial, metrics=metrics)
self.control = self.callback_handler.on_save(self.args, self.state, self.control)
def prediction_step(
self,
model,
inputs: Dict[str, Union[torch.Tensor, Any]],
prediction_loss_only: bool,
ignore_keys: Optional[List[str]] = None,
) -> Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]:
assert prediction_loss_only == False
assert ignore_keys is None
#inputs = self._prepare_inputs(inputs)
has_labels = all(inputs.get(k) is not None for k in self.label_names)
inputs = self._prepare_inputs(inputs)
# labels may be popped when computing the loss (label smoothing for instance) so we grab them first.
if has_labels:
labels = nested_detach(tuple(inputs.get(name) for name in self.label_names))
if len(labels) == 1:
labels = labels[0]
else:
labels = None
with torch.no_grad():
loss = None
with self.autocast_smart_context_manager():
logits = model(**inputs).detach().contiguous()
return (loss, logits, labels)
def get_eval_dataloader(self, eval_dataset) -> DataLoader:
# use DataCollator for eval dataset
eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset
data_collator = TestDataCollator()
eval_sampler = self._get_eval_sampler(eval_dataset)
return DataLoader(
eval_dataset,
sampler=eval_sampler,
batch_size=self.args.eval_batch_size,
collate_fn=data_collator,
drop_last=self.args.dataloader_drop_last,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
| 4,701 | 37.540984 | 109 | py |
Tencent_wsdm_cup2023 | Tencent_wsdm_cup2023-main/pytorch_pretrain/pretrain/dataset.py | # -*- coding: utf-8 -*-
# @Time : 2022/10/25 21:07
# @Author : Xiangsheng Li
# @File : dataset.py
import sys,os
import random
import collections
from models.utils import SPECIAL_TOKENS
import logging
import numpy as np
import torch
from torch.utils.data import Dataset,IterableDataset
from dataclasses import dataclass
from typing import List, Dict, Any
logger = logging.getLogger(__name__)
MaskedLmInstance = collections.namedtuple("MaskedLmInstance", ["index", "label"])
class MaskingOp():
def __init__(self, masked_lm_prob, max_predictions_per_seq, vocab_list, unigram_set=None):
self.masked_lm_prob = masked_lm_prob
self.max_predictions_per_seq = max_predictions_per_seq
self.vocab_list = vocab_list
self.unigram_set = unigram_set
def create_masked_lm_predictions(self, tokens):
"""Creates the predictions for the masked LM objective. This is mostly copied from the Google BERT repo, but
with several refactors to clean it up and remove a lot of unnecessary variables."""
cand_indices = []
START_FROM_DOC = False
start = 0
#for (i, token) in enumerate(tokens): # token_ids
while start < len(tokens):
token = tokens[start]
if token == SPECIAL_TOKENS['SEP']: # SEP
START_FROM_DOC = True
start += 1
continue
if token in SPECIAL_TOKENS.values(): # CLS
start += 1
continue
if not START_FROM_DOC:
start += 1
continue
if self.unigram_set is not None:
end = len(tokens)
while start < end:
if tuple(tokens[start:end]) in self.unigram_set or (start + 1 == end):
cand_indices.append(list(range(start,end)))
break
end -= 1
start = end
else:
cand_indices.append([start])
start += 1
num_to_mask = min(self.max_predictions_per_seq, max(1, int(round(len(cand_indices) * self.masked_lm_prob))))
random.shuffle(cand_indices)
masked_lms = []
covered_indexes = set()
for index_set in cand_indices:
if len(masked_lms) >= num_to_mask:
break
# If adding a whole-word mask would exceed the maximum number of
# predictions, then just skip this candidate.
if len(masked_lms) + len(index_set) > num_to_mask:
continue
is_any_index_covered = False
for index in index_set:
if index in covered_indexes:
is_any_index_covered = True
break
if is_any_index_covered:
continue
for index in index_set:
covered_indexes.add(index)
# 80% of the time, replace with [MASK]
if random.random() < 0.8:
masked_token = SPECIAL_TOKENS['MASK']#103
else:
# 10% of the time, keep original
if random.random() < 0.5:
masked_token = tokens[index]
# 10% of the time, replace with random word
else:
masked_token = random.choice(self.vocab_list)
masked_lms.append(MaskedLmInstance(index=index, label=tokens[index]))
tokens[index] = masked_token
assert len(masked_lms) <= num_to_mask
masked_lms = sorted(masked_lms, key=lambda x: x.index)
mask_indices = [p.index for p in masked_lms]
masked_token_labels = [p.label for p in masked_lms]
return tokens, masked_token_labels, mask_indices
def process_data(query, title, content, max_seq_len, masking_obj=None):
""" process [query, title, content] into a tensor
[CLS] + query + [SEP] + title + [SEP] + content + [SEP] + [PAD]
"""
data = [SPECIAL_TOKENS['CLS']]
segment = [0]
query_splits = [item for item in query.split(b'\x01') if len(item.strip()) > 0]
data = data + [int(item) + 10 for item in query_splits] # query
data = data + [SPECIAL_TOKENS['SEP']]
segment = segment + [0] * (len(query_splits) + 1)
title_splits = [item for item in title.split(b'\x01') if len(item.strip()) > 0]
data = data + [int(item) + 10 for item in title_splits] # content
data = data + [SPECIAL_TOKENS['SEP']] # sep defined as 1
segment = segment + [1] * (len(title_splits) + 1)
content_splits = [item for item in content.split(b'\x01') if len(item.strip()) > 0]
data = data + [int(item) + 10 for item in content_splits] # content
data = data + [SPECIAL_TOKENS['SEP']]
segment = segment + [1] * (len(content_splits) + 1)
#padding_mask = [False] * len(data)
if len(data) < max_seq_len:
#padding_mask += [True] * (max_seq_len - len(data))
data += [SPECIAL_TOKENS['PAD']] * (max_seq_len - len(data))
else:
#padding_mask = padding_mask[:max_seq_len]
data = data[:max_seq_len]
# segment id
if len(segment) < max_seq_len:
segment += [1] * (max_seq_len - len(segment))
else:
segment = segment[:max_seq_len]
if masking_obj is not None:
token_ids, masked_lm_ids, masked_lm_positions = masking_obj.create_masked_lm_predictions(data)
lm_label_array = np.full(max_seq_len, dtype=np.int, fill_value=-1)
lm_label_array[masked_lm_positions] = masked_lm_ids
return token_ids, segment, lm_label_array
else:
return data, segment
class TrainDatasetBase(IterableDataset):
def __init__(self, directory_path, args):
self.directory_path = directory_path
self.files = [f for f in os.listdir(self.directory_path) if f.startswith('part')]
random.shuffle(self.files)
self.cur_query = "#"
self.max_seq_len = args.max_seq_len#128
self.vocab_size = args.vocab_size#22000
self.vocab_list = list(range(self.vocab_size))
self.unigram_set = set([tuple(map(lambda t:int(t) + 10, line.strip().split('\x01'))) for line in open(args.unigram_dict_addr) if len(line.strip()) > 0])
self.masking_obj = MaskingOp(args.masked_lm_prob, args.max_predictions_per_seq, self.vocab_list, self.unigram_set)
def __iter__(self):
buffer_per_query = []
#sample_count = 0
info = torch.utils.data.get_worker_info()
if info is None:
worker_num = 1
worker_id = 0
else:
worker_num = info.num_workers
worker_id = info.id
## each worker parses one file
local_files = [f for i, f in enumerate(self.files) if i % worker_num == worker_id]
for i, file in enumerate(local_files):
logger.info(f'load file: {file}')
if file == 'part-00000': # part-00000.gz is for evaluation
continue
for line in open(os.path.join(self.directory_path, file), 'rb'):
line_list = line.strip(b'\n').split(b'\t')
if len(line_list) == 3: # new query
self.cur_query = line_list[1]
#print("line_list: ", buffer_per_query)
if len(buffer_per_query) > 0:
data = self.yield_data(buffer_per_query)
#print("buffer_per_query: ", buffer_per_query , data)
if data is not None:
yield data
buffer_per_query = []
elif len(line_list) > 6: # urls
position, title, content, click_label = line_list[0], line_list[2], line_list[3], line_list[5]
try:
src_input, segment, masked_lm_labels = process_data(self.cur_query, title, content,
self.max_seq_len, self.masking_obj)
sample = {'src_input': src_input, 'segment': segment,
'masked_lm_labels': masked_lm_labels, 'click_label': float(click_label)}
buffer_per_query.append(sample)
except Exception as e:
#print(e)
pass
def yield_data(self,buffer_per_query):
pass
class PreTrainDatasetGroupwise(TrainDatasetBase):
def __init__(self, directory_path, train_group_size, args):
super(PreTrainDatasetGroupwise, self).__init__(directory_path, args)
self.train_group_size = train_group_size
def yield_data(self, buffer_per_query):
random.shuffle(buffer_per_query)
pos_buffer, neg_buffer = [], []
for record in buffer_per_query:
if record['click_label'] > 0:
pos_buffer.append(record)
else:
neg_buffer.append(record)
if len(pos_buffer) == 0 or len(neg_buffer) == 0:
return None
pos_record = random.choice(pos_buffer)
if len(neg_buffer) < self.train_group_size - 1:
negs = random.choices(neg_buffer, k=self.train_group_size - 1)
else:
negs = random.sample(neg_buffer, k=self.train_group_size - 1)
group = [pos_record] + negs
return group
@dataclass
class DataCollator:
def __call__(self, features) -> Dict[str, Any]:
if isinstance(features[0], list):
features = sum(features, [])#groupwise dataset
token_ids = torch.LongTensor([f['src_input'] for f in features])
segment = torch.LongTensor([f['segment'] for f in features])
#For train and test data
batch_data = {
'input_ids':token_ids,
'attention_mask':(token_ids > 0).float(),#torch.FloatTensor, 0 is PAD
'token_type_ids':segment
}
#For training data, pointwise or groupwise
if 'masked_lm_labels' in features[0]:
masked_lm_labels = torch.LongTensor([f['masked_lm_labels'] for f in features])
batch_data.update({'masked_lm_labels': masked_lm_labels})
#For testing data
if 'qid' in features[0]:
qids = torch.LongTensor([f['qid'] for f in features])
batch_data.update({'qids': qids})
if 'label' in features[0]:
labels = torch.LongTensor([f['label'] for f in features])
batch_data.update({'labels': labels})
if 'freq' in features[0]:
freqs = torch.LongTensor([f['freq'] for f in features])
batch_data.update({'freqs': freqs})
return batch_data
###################Test dataset################################
class TestDataset(Dataset):
def __init__(self, fpath, max_seq_len, data_type, buffer_size=300000):
self.buffer_size = buffer_size
self.max_seq_len = max_seq_len
self.data_type = data_type
if data_type == 'annotate':
self.buffer = self.load_annotate_data(fpath)
elif data_type == 'click':
self.buffer = self.load_click_data(fpath)
def __len__(self):
return len(self.buffer)
def __getitem__(self, index):
if len(self.buffer[index]) == 4:
src_input, segment, qid, label = self.buffer[index]
sample = {'src_input': src_input, 'segment': segment,
'qid': qid, 'label': label}
else:
src_input, segment, qid, label, freq = self.buffer[index]
sample = {'src_input': src_input, 'segment': segment,
'qid':qid, 'label':label, 'freq':freq}
return sample
def load_annotate_data(self, fpath):
logger.info(f'load annotated data from {fpath}')
buffer = []
for line in open(fpath, 'rb'):
line_list = line.strip(b'\n').split(b'\t')
qid, query, title, content, label, freq = line_list
if 0 <= int(freq) <= 2: # high freq
freq = 0
elif 3 <= int(freq) <= 6: # mid freq
freq = 1
elif 7 <= int(freq): # tail
freq = 2
src_input, src_segment = process_data(query, title, content, self.max_seq_len)
buffer.append([src_input, src_segment, int(qid), int(label), freq])
return buffer
def load_click_data(self, fpath):
logger.info(f'load logged click data from {fpath}')
buffer = []
cur_qids = 0
for line in open(fpath, 'rb'):
line_list = line.strip(b'\n').split(b'\t')
if len(line_list) == 3: # new query
self.cur_query = line_list[1]
cur_qids += 1
elif len(line_list) > 6: # urls
position, title, content, click_label = line_list[0], line_list[2], line_list[3], line_list[5]
try:
src_input, src_segment = process_data(self.cur_query, title, content,
self.max_seq_len)
buffer.append([src_input, src_segment, cur_qids, int(click_label)])
except:
pass
if len(buffer) >= self.buffer_size: # we use 300,000 click records for test
break
return buffer
| 13,414 | 39.044776 | 160 | py |
Tencent_wsdm_cup2023 | Tencent_wsdm_cup2023-main/pytorch_pretrain/pretrain/trainer.py | # -*- coding: utf-8 -*-
# @Time : 2022/10/26 16:51
# @Author : Xiangsheng Li
# @File : trainer.py
import os
from typing import Dict, List, Tuple, Optional, Any, Union
import torch
import torch.distributed as dist
from torch import nn, Tensor
from torch.cuda.amp import autocast
import torch.nn.functional as F
from transformers.trainer import Trainer
from transformers.trainer_utils import EvalLoopOutput, has_length
from transformers.trainer_pt_utils import nested_concat, nested_numpify, nested_detach
from torch.utils.data import DataLoader
import logging
from tqdm.auto import tqdm
from models.metrics import evaluate_all_metric
logger = logging.getLogger(__name__)
class Pretrainer(Trainer):
def compute_loss(self,model, inputs):
outputs, mlm_loss, ctr_loss = model(**inputs)
if self.state.global_step % self.args.logging_steps == 0:
self.log({'MLM Loss':mlm_loss.item(), 'CTR Loss': ctr_loss.item()})
loss = mlm_loss + ctr_loss
return loss
def _maybe_log_save_evaluate(self, tr_loss, model, trial, epoch, ignore_keys_for_eval):
if self.control.should_log:
logs: Dict[str, float] = {}
# all_gather + mean() to get average loss over all processes
tr_loss_scalar = self._nested_gather(tr_loss).mean().item()
# reset tr_loss to zero
tr_loss -= tr_loss
logs["loss"] = round(tr_loss_scalar / (self.state.global_step - self._globalstep_last_logged), 4)
logs["learning_rate"] = self._get_learning_rate()
self._total_loss_scalar += tr_loss_scalar
self._globalstep_last_logged = self.state.global_step
self.store_flos()
self.log(logs)
metrics = {}
if self.control.should_evaluate:
if isinstance(self.eval_dataset, dict):
for eval_dataset_name, eval_dataset in self.eval_dataset.items():
if eval_dataset_name == 'click':
self.label_names = ['qids', 'labels']
else:
self.label_names = ['qids', 'labels', 'freqs']
local_metrics = self.evaluate(
eval_dataset=eval_dataset,
ignore_keys=ignore_keys_for_eval,
metric_key_prefix=f"eval_{eval_dataset_name}",
)
metrics.update(local_metrics)
else:
metrics = self.evaluate(ignore_keys=ignore_keys_for_eval)
self._report_to_hp_search(trial, epoch, metrics)#self.state.global_step
if self.control.should_save:
self._save_checkpoint(model, trial, metrics=metrics)
self.control = self.callback_handler.on_save(self.args, self.state, self.control)
def prediction_step(
self,
model,
inputs: Dict[str, Union[torch.Tensor, Any]],
prediction_loss_only: bool,
ignore_keys: Optional[List[str]] = None,
) -> Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]:
assert prediction_loss_only == False
assert ignore_keys is None
has_labels = all(inputs.get(k) is not None for k in self.label_names)
inputs = self._prepare_inputs(inputs)
# labels may be popped when computing the loss (label smoothing for instance) so we grab them first.
if has_labels:
labels = nested_detach(tuple(inputs.get(name) for name in self.label_names))
if len(labels) == 1:
labels = labels[0]
else:
labels = None
eval_input_keys = ['input_ids', 'attention_mask', 'token_type_ids']
eval_inputs = {k: inputs[k] for k in eval_input_keys}
with torch.no_grad():
loss = None
with self.autocast_smart_context_manager():
logits = model(**eval_inputs).detach().contiguous()
return (loss, logits, labels)
| 4,014 | 37.605769 | 109 | py |
Tencent_wsdm_cup2023 | Tencent_wsdm_cup2023-main/pytorch_pretrain/finetune/dataset.py | # -*- coding: utf-8 -*-
# @Time : 2022/11/1 16:22
# @Author : Xiangsheng Li
# @File : dataset.py
import sys,os
import random
import collections
from models.utils import SPECIAL_TOKENS
import logging
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from dataclasses import dataclass
from typing import List, Dict, Any
from collections import defaultdict
from tqdm.auto import tqdm
from pretrain.dataset import (
process_data,
TestDataset
)
class PairwiseFinetuneDataset(Dataset):
def __init__(self, fpath):
self.queries = {}
self.docs = dict()
self.q_rels = defaultdict(list)
self.q_irrels = defaultdict(list)
self._load_annotated_data(fpath)
self.idx2qids = {i:qid for i,qid in enumerate(self.q_rels.keys())}
self.all_docids = list(self.docs.keys())
def _load_annotated_data(self, fpath):
doc2docid = {}
for line in tqdm(open(fpath, 'rb'),
desc='load annotated data'):
line_list = line.strip(b'\n').split(b'\t')
qid, query, title, content, label, freq = line_list
if qid not in self.queries:
self.queries[qid] = query
if (title, content) not in doc2docid:
doc2docid[(title, content)] = len(doc2docid)
docid = doc2docid[(title, content)]
if docid not in self.docs:
self.docs[docid] = (title, content)
label = int(label)
if label >= 2: #2,1
self.q_rels[qid].append((docid, label))
else:
self.q_irrels[qid].append((docid, label))
def __len__(self):
return len(self.q_rels)
def __getitem__(self, index):
qid = self.idx2qids[index]
query = self.queries[qid]
rel_doc_id, rel_label = random.choice(self.q_rels[qid])
all_doc_list = self.q_rels[qid] + self.q_irrels[qid]
rest_docids = [docid for (docid, label) in all_doc_list if (rel_doc_id != docid and rel_label > label)]
if len(rest_docids) == 0:
irrel_doc_id = random.choice(self.all_docids)
else:
irrel_doc_id = random.choice(rest_docids)
return {'query':query, 'rel_doc': self.docs[rel_doc_id], 'irrel_doc': self.docs[irrel_doc_id]}
@dataclass
class PairwiseDataCollator:
def __init__(self, max_seq_len):
self.max_seq_len = max_seq_len
def __call__(self, features: Dict[str, Any]) -> Dict[str, Any]:
batch_src_input, batch_segment = [], []
for sample in features:
src_input, segment = process_data(sample['query'], sample['rel_doc'][0], sample['rel_doc'][1],
self.max_seq_len)
batch_src_input.append(src_input)
batch_segment.append(segment)
src_input, segment = process_data(sample['query'], sample['irrel_doc'][0], sample['irrel_doc'][1],
self.max_seq_len)
batch_src_input.append(src_input)
batch_segment.append(segment)
batch_src_input = torch.LongTensor(batch_src_input)
batch_segment = torch.LongTensor(batch_segment)
batch_data = {
'input_ids': batch_src_input,
'attention_mask': (batch_src_input > 0).float(), # torch.FloatTensor, 0 is PAD
'token_type_ids': batch_segment
}
return batch_data
| 3,483 | 34.191919 | 111 | py |
Tencent_wsdm_cup2023 | Tencent_wsdm_cup2023-main/pytorch_pretrain/finetune/trainer.py | # -*- coding: utf-8 -*-
# @Time : 2022/11/2 14:38
# @Author : Xiangsheng Li
# @File : trainer.py
import os
from typing import Dict, List, Tuple, Optional, Any, Union
import torch
from torch import nn, Tensor
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torch.nn import Softmax, MarginRankingLoss
from pretrain.trainer import Pretrainer as Trainer
from pretrain.dataset import DataCollator
import logging
logger = logging.getLogger(__name__)
class Finetuner(Trainer):
def __init__(self, *args, **kwargs):
super(Finetuner, self).__init__(*args, **kwargs)
self.softmax = Softmax(dim=1)
def compute_loss(self, model, inputs):
return self.compute_pair_loss(model, inputs)
def compute_pair_loss(self, model, inputs):
prediction_scores = model(**inputs)
logits = prediction_scores.view(-1, 2)
if self.args.temperature is not None:
assert self.args.temperature > 0
logits = logits / self.args.temperature
logits = self.softmax(logits)
pos_logits = logits[:, 0]
neg_logits = logits[:, 1]
marginloss = MarginRankingLoss(margin=1.0)
pair_label = torch.ones_like(pos_logits)
pair_loss = marginloss(pos_logits, neg_logits, pair_label)
return pair_loss
def get_eval_dataloader(self, eval_dataset) -> DataLoader:
# use DataCollator for eval dataset
eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset
data_collator = DataCollator()
eval_sampler = self._get_eval_sampler(eval_dataset)
return DataLoader(
eval_dataset,
sampler=eval_sampler,
batch_size=self.args.eval_batch_size,
collate_fn=data_collator,
drop_last=self.args.dataloader_drop_last,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
| 1,976 | 28.954545 | 86 | py |
Tencent_wsdm_cup2023 | Tencent_wsdm_cup2023-main/pytorch_pretrain/models/modeling.py | # -*- coding: utf-8 -*-
# @Time : 2022/10/25 12:02
# @Author : Xiangsheng Li
# @File : modeling.py
import torch
from torch import nn, Tensor
import torch.distributed as dist
import torch.nn.functional as F
from transformers import BertModel, BertPreTrainedModel
from transformers.modeling_outputs import MaskedLMOutput
from torch.nn import CrossEntropyLoss
import logging
logger = logging.getLogger(__name__)
from transformers.activations import ACT2FN
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
class CTRPretrainingModel(BertPreTrainedModel):
def __init__(
self,
config,
model_args, data_args, training_args
):
super(CTRPretrainingModel, self).__init__(config)
self.bert = BertModel(config)
self.cls = nn.Linear(config.hidden_size, 1)
self.predictions = BertLMPredictionHead(config)#self.bert.embeddings.word_embeddings.weight
self.config = config
self.model_args, self.data_args, self.training_args = model_args, data_args, training_args
self.mlm_loss_fct = CrossEntropyLoss(ignore_index=-1)
self.cross_entropy = nn.CrossEntropyLoss(reduction='mean')
self.init_weights()
def forward(self, input_ids, attention_mask, token_type_ids,masked_lm_labels=None):#**kwargs
outputs = self.bert(input_ids, attention_mask, token_type_ids, return_dict=True)
sequence_output = outputs.last_hidden_state #(bs, seq_len, dim)
pooler_output = outputs.pooler_output #(bs, dim)
prediction_scores = self.cls(pooler_output) #(bs, 1)
mlm_loss, ctr_loss = None, None
if masked_lm_labels is not None:
lm_prediction_scores = self.predictions(sequence_output)
mlm_loss = self.mlm_loss_fct(lm_prediction_scores.view(-1, self.config.vocab_size),
masked_lm_labels.view(-1))# if config.MLM else 0.
ctr_loss = self.groupwise_ctr_loss(prediction_scores)
if mlm_loss and ctr_loss:
return prediction_scores, mlm_loss, ctr_loss
else:
return prediction_scores
def groupwise_ctr_loss(self, prediction_scores):
# only the first column is positive, others are negatives
logits = prediction_scores
if self.training_args.temperature is not None:
assert self.training_args.temperature > 0
logits = logits / self.training_args.temperature
logits = logits.view(
self.training_args.per_device_train_batch_size,
self.training_args.train_group_size
)
target_label = torch.zeros(self.training_args.per_device_train_batch_size,
dtype=torch.long,
device=logits.device)
loss = self.cross_entropy(logits, target_label)
return loss
def pointwise_ctr_loss(self, prediction_scores, click_labels):
click_loss_fct = nn.BCEWithLogitsLoss()
prediction_scores = torch.squeeze(prediction_scores, dim=-1)
point_ctr_loss = click_loss_fct(prediction_scores, click_labels)
return point_ctr_loss
def pairwise_ctr_loss(self, prediction_scores):
# Pairwise loss
logits = prediction_scores.view(-1, 2)
if self.training_args.temperature is not None:
assert self.training_args.temperature > 0
logits = logits / self.training_args.temperature
softmax = Softmax(dim=1)
logits = softmax(logits)
pos_logits = logits[:, 0]
neg_logits = logits[:, 1]
marginloss = MarginRankingLoss(margin=1.0)
pair_label = torch.ones_like(pos_logits)
pair_loss = marginloss(pos_logits, neg_logits, pair_label)
return pair_loss
| 5,193 | 37.474074 | 116 | py |
POMO | POMO-master/OLD_ipynb_ver/POMO_TSP/TORCH_OBJECTS.py |
"""
The MIT License
Copyright (c) Yeong-Dae Kwon
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import torch
use_cuda = torch.cuda.is_available()
# use_cuda = False
# torch.cuda.set_device(0)
FloatTensor = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if use_cuda else torch.LongTensor
ByteTensor = torch.cuda.ByteTensor if use_cuda else torch.ByteTensor
BoolTensor = torch.cuda.BoolTensor if use_cuda else torch.BoolTensor
Tensor = FloatTensor
if use_cuda:
device = torch.device("cuda")
else:
device = torch.device("cpu")
| 1,570 | 32.425532 | 77 | py |
POMO | POMO-master/OLD_ipynb_ver/POMO_TSP/source/utilities.py |
"""
The MIT License
Copyright (c) 2020 Yeong-Dae Kwon
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import logging
import os
import time
import datetime
import pytz
import re
import numpy as np
from HYPER_PARAMS import *
from TORCH_OBJECTS import *
########################################
# Get_Logger
########################################
tz = pytz.timezone("Asia/Seoul")
def timetz(*args):
return datetime.datetime.now(tz).timetuple()
def Get_Logger(SAVE_FOLDER_NAME):
# make_dir
#######################################################
prefix = datetime.datetime.now(pytz.timezone("Asia/Seoul")).strftime("%Y%m%d_%H%M__")
result_folder_no_postfix = "./result/{}".format(prefix + SAVE_FOLDER_NAME)
result_folder_path = result_folder_no_postfix
folder_idx = 0
while os.path.exists(result_folder_path):
folder_idx += 1
result_folder_path = result_folder_no_postfix + "({})".format(folder_idx)
os.makedirs(result_folder_path)
# Logger
#######################################################
logger = logging.getLogger(result_folder_path) # this already includes streamHandler??
streamHandler = logging.StreamHandler()
fileHandler = logging.FileHandler('{}/log.txt'.format(result_folder_path))
formatter = logging.Formatter("[%(asctime)s] %(message)s", "%Y-%m-%d %H:%M:%S")
formatter.converter = timetz
streamHandler.setFormatter(formatter)
fileHandler.setFormatter(formatter)
logger.addHandler(streamHandler)
logger.addHandler(fileHandler)
logger.setLevel(level=logging.INFO)
return logger, result_folder_path
def Extract_from_LogFile(result_folder_path, variable_name):
logfile_path = '{}/log.txt'.format(result_folder_path)
with open(logfile_path) as f:
datafile = f.readlines()
found = False # This isn't really necessary
for line in reversed(datafile):
if variable_name in line:
found = True
m = re.search(variable_name + '[^\n]+', line)
break
exec_command = "Print(No such variable found !!)"
if found:
return m.group(0)
else:
return exec_command
########################################
# Average_Meter
########################################
class Average_Meter:
def __init__(self):
self.sum = None
self.count = None
self.reset()
def reset(self):
self.sum = torch.tensor(0.).to(device)
self.count = 0
def push(self, some_tensor, n_for_rank_0_tensor=None):
assert not some_tensor.requires_grad # You get Memory error, if you keep tensors with grad history
rank = len(some_tensor.shape)
if rank == 0: # assuming "already averaged" Tensor was pushed
self.sum += some_tensor * n_for_rank_0_tensor
self.count += n_for_rank_0_tensor
else:
self.sum += some_tensor.sum()
self.count += some_tensor.numel()
def peek(self):
average = (self.sum / self.count).tolist()
return average
def result(self):
average = (self.sum / self.count).tolist()
self.reset()
return average
########################################
# View NN Parameters
########################################
def get_n_params1(model):
pp = 0
for p in list(model.parameters()):
nn_count = 1
for s in list(p.size()):
nn_count = nn_count * s
pp += nn_count
print(nn_count)
print(p.shape)
print("Total: {:d}".format(pp))
def get_n_params2(model):
model_parameters = filter(lambda p: p.requires_grad, model.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
print(params)
def get_n_params3(model):
print(sum(p.numel() for p in model.parameters() if p.requires_grad))
def get_structure(model):
print(model)
########################################
# Augment xy data
########################################
def augment_xy_data_by_8_fold(xy_data):
# xy_data.shape = (batch_s, problem, 2)
x = xy_data[:, :, [0]]
y = xy_data[:, :, [1]]
# x,y shape = (batch, problem, 1)
dat1 = torch.cat((x, y), dim=2)
dat2 = torch.cat((1-x, y), dim=2)
dat3 = torch.cat((x, 1-y), dim=2)
dat4 = torch.cat((1-x, 1-y), dim=2)
dat5 = torch.cat((y, x), dim=2)
dat6 = torch.cat((1-y, x), dim=2)
dat7 = torch.cat((y, 1-x), dim=2)
dat8 = torch.cat((1-y, 1-x), dim=2)
data_augmented = torch.cat((dat1, dat2, dat3, dat4, dat5, dat6, dat7, dat8), dim=0)
# shape = (8*batch, problem, 2)
return data_augmented
| 5,647 | 26.686275 | 106 | py |
POMO | POMO-master/OLD_ipynb_ver/POMO_TSP/source/travelling_saleman_problem.py |
"""
The MIT License
Copyright (c) 2020 Yeong-Dae Kwon
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
####################################
# EXTERNAL LIBRARY
####################################
import torch
import numpy as np
# For debugging
from IPython.core.debugger import set_trace
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
####################################
# PROJECT VARIABLES
####################################
from HYPER_PARAMS import *
from TORCH_OBJECTS import *
####################################
# DATA
####################################
def TSP_DATA_LOADER__RANDOM(num_sample, num_nodes, batch_size):
dataset = TSP_Dataset__Random(num_sample=num_sample, num_nodes=num_nodes)
data_loader = DataLoader(dataset=dataset,
batch_size=batch_size,
shuffle=False,
num_workers=0,
collate_fn=TSP_collate_fn)
return data_loader
class TSP_Dataset__Random(Dataset):
def __init__(self, num_sample, num_nodes):
self.num_sample = num_sample
self.num_nodes = num_nodes
def __getitem__(self, index):
node_xy_data = np.random.rand(self.num_nodes, 2)
return node_xy_data
def __len__(self):
return self.num_sample
def TSP_collate_fn(batch):
node_xy = Tensor(batch)
return node_xy
####################################
# STATE
####################################
class STATE:
def __init__(self, seq):
self.seq = seq
self.batch_s = seq.size(0)
self.current_node = None
# History
####################################
self.selected_count = 0
self.available_mask = BoolTensor(np.ones((self.batch_s, TSP_SIZE)))
self.ninf_mask = Tensor(np.zeros((self.batch_s, TSP_SIZE)))
# shape = (batch_s, TSP_SIZE)
self.selected_node_list = LongTensor(np.zeros((self.batch_s, 0)))
# shape = (batch_s, selected_count)
def move_to(self, selected_node_idx):
# selected_node_idx.shape = (batch,)
self.current_node = selected_node_idx
# History
####################################
self.selected_count += 1
self.available_mask[torch.arange(self.batch_s), selected_node_idx] = False
self.ninf_mask[torch.arange(self.batch_s), selected_node_idx] = -np.inf
self.selected_node_list = torch.cat((self.selected_node_list, selected_node_idx[:, None]), dim=1)
class GROUP_STATE:
def __init__(self, group_size, data):
# data.shape = (batch, group, 2)
self.batch_s = data.size(0)
self.group_s = group_size
self.data = data
# History
####################################
self.selected_count = 0
self.current_node = None
# shape = (batch, group)
self.selected_node_list = LongTensor(np.zeros((self.batch_s, group_size, 0)))
# shape = (batch, group, selected_count)
# Status
####################################
self.ninf_mask = Tensor(np.zeros((self.batch_s, group_size, TSP_SIZE)))
# shape = (batch, group, TSP_SIZE)
def move_to(self, selected_idx_mat):
# selected_idx_mat.shape = (batch, group)
# History
####################################
self.selected_count += 1
self.current_node = selected_idx_mat
self.selected_node_list = torch.cat((self.selected_node_list, selected_idx_mat[:, :, None]), dim=2)
# Status
####################################
batch_idx_mat = torch.arange(self.batch_s)[:, None].expand(self.batch_s, self.group_s)
group_idx_mat = torch.arange(self.group_s)[None, :].expand(self.batch_s, self.group_s)
self.ninf_mask[batch_idx_mat, group_idx_mat, selected_idx_mat] = -np.inf
####################################
# ENVIRONMENT
####################################
class ENVIRONMENT:
def __init__(self, seq):
# seq.shape = (batch, TSP_SIZE, 2)
self.seq = seq
self.batch_s = seq.size(0)
self.state = None
def reset(self):
self.state = STATE(self.seq)
reward = None
done = False
return self.state, reward, done
def step(self, selected_node_idx):
# selected_node_idx.shape = (batch,)
# move state
self.state.move_to(selected_node_idx)
# returning values
done = self.state.selected_count == TSP_SIZE
if done:
reward = -self._get_travel_distance() # note the minus sign!
else:
reward = None
return self.state, reward, done
def _get_travel_distance(self):
gathering_index = self.state.selected_node_list.unsqueeze(2).expand(-1, TSP_SIZE, 2)
# shape = (batch, TSP_SIZE, 2)
ordered_seq = self.seq.gather(dim=1, index=gathering_index)
rolled_seq = ordered_seq.roll(dims=1, shifts=-1)
segment_lengths = ((ordered_seq-rolled_seq)**2).sum(2).sqrt()
# size = (batch, TSP_SIZE)
travel_distances = segment_lengths.sum(1)
# size = (batch,)
return travel_distances
class GROUP_ENVIRONMENT:
def __init__(self, data):
# seq.shape = (batch, TSP_SIZE, 2)
self.data = data
self.batch_s = data.size(0)
self.group_s = None
self.group_state = None
def reset(self, group_size):
self.group_s = group_size
self.group_state = GROUP_STATE(group_size=group_size, data=self.data)
reward = None
done = False
return self.group_state, reward, done
def step(self, selected_idx_mat):
# selected_idx_mat.shape = (batch, group)
# move state
self.group_state.move_to(selected_idx_mat)
# returning values
done = (self.group_state.selected_count == TSP_SIZE)
if done:
reward = -self._get_group_travel_distance() # note the minus sign!
else:
reward = None
return self.group_state, reward, done
def _get_group_travel_distance(self):
gathering_index = self.group_state.selected_node_list.unsqueeze(3).expand(self.batch_s, -1, TSP_SIZE, 2)
# shape = (batch, group, TSP_SIZE, 2)
seq_expanded = self.data[:, None, :, :].expand(self.batch_s, self.group_s, TSP_SIZE, 2)
ordered_seq = seq_expanded.gather(dim=2, index=gathering_index)
# shape = (batch, group, TSP_SIZE, 2)
rolled_seq = ordered_seq.roll(dims=2, shifts=-1)
segment_lengths = ((ordered_seq-rolled_seq)**2).sum(3).sqrt()
# size = (batch, group, TSP_SIZE)
group_travel_distances = segment_lengths.sum(2)
# size = (batch, group)
return group_travel_distances
| 7,808 | 30.873469 | 112 | py |
POMO | POMO-master/OLD_ipynb_ver/POMO_TSP/source/MODEL__Actor/grouped_actors.py |
"""
The MIT License
Copyright (c) 2020 Yeong-Dae Kwon
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
# For debugging
from IPython.core.debugger import set_trace
# Hyper Parameters
from HYPER_PARAMS import *
from TORCH_OBJECTS import *
########################################
# ACTOR
########################################
class ACTOR(nn.Module):
def __init__(self):
super().__init__()
self.box_select_probabilities = None
# shape = (batch, group, TSP_SIZE)
self.encoder = Encoder()
self.node_prob_calculator = Next_Node_Probability_Calculator_for_group()
self.batch_s = None
self.encoded_nodes = None
def reset(self, group_state):
self.batch_s = group_state.data.size(0)
self.encoded_nodes = self.encoder(group_state.data)
# shape = (batch_s, TSP_SIZE, EMBEDDING_DIM)
self.node_prob_calculator.reset(self.encoded_nodes, group_ninf_mask=group_state.ninf_mask)
def soft_reset(self, group_state):
self.node_prob_calculator.reset(self.encoded_nodes, group_ninf_mask=group_state.ninf_mask)
def update(self, group_state):
encoded_LAST_NODES = pick_nodes_for_each_group(self.encoded_nodes, group_state.current_node)
# shape = (batch_s, group, EMBEDDING_DIM)
probs = self.node_prob_calculator(encoded_LAST_NODES)
# shape = (batch_s, group, TSP_SIZE)
self.box_select_probabilities = probs
def get_action_probabilities(self):
return self.box_select_probabilities
########################################
# ACTOR_SUB_NN : ENCODER
########################################
class Encoder(nn.Module):
def __init__(self):
super().__init__()
self.embedding = nn.Linear(2, EMBEDDING_DIM)
self.layers = nn.ModuleList([Encoder_Layer() for _ in range(ENCODER_LAYER_NUM)])
def forward(self, data):
# data.shape = (batch_s, TSP_SIZE, 2)
embedded_input = self.embedding(data)
# shape = (batch_s, TSP_SIZE, EMBEDDING_DIM)
out = embedded_input
for layer in self.layers:
out = layer(out)
return out
class Encoder_Layer(nn.Module):
def __init__(self):
super().__init__()
self.Wq = nn.Linear(EMBEDDING_DIM, HEAD_NUM * KEY_DIM, bias=False)
self.Wk = nn.Linear(EMBEDDING_DIM, HEAD_NUM * KEY_DIM, bias=False)
self.Wv = nn.Linear(EMBEDDING_DIM, HEAD_NUM * KEY_DIM, bias=False)
self.multi_head_combine = nn.Linear(HEAD_NUM * KEY_DIM, EMBEDDING_DIM)
self.addAndNormalization1 = Add_And_Normalization_Module()
self.feedForward = Feed_Forward_Module()
self.addAndNormalization2 = Add_And_Normalization_Module()
def forward(self, input1):
# input.shape = (batch_s, TSP_SIZE, EMBEDDING_DIM)
q = reshape_by_heads(self.Wq(input1), head_num=HEAD_NUM)
k = reshape_by_heads(self.Wk(input1), head_num=HEAD_NUM)
v = reshape_by_heads(self.Wv(input1), head_num=HEAD_NUM)
# q shape = (batch_s, HEAD_NUM, TSP_SIZE, KEY_DIM)
out_concat = multi_head_attention(q, k, v)
# shape = (batch_s, TSP_SIZE, HEAD_NUM*KEY_DIM)
multi_head_out = self.multi_head_combine(out_concat)
# shape = (batch_s, TSP_SIZE, EMBEDDING_DIM)
out1 = self.addAndNormalization1(input1, multi_head_out)
out2 = self.feedForward(out1)
out3 = self.addAndNormalization2(out1, out2)
return out3
########################################
# ACTOR_SUB_NN : Next_Node_Probability_Calculator
########################################
class Next_Node_Probability_Calculator_for_group(nn.Module):
def __init__(self):
super().__init__()
self.Wq_graph = nn.Linear(EMBEDDING_DIM, HEAD_NUM * KEY_DIM, bias=False)
self.Wq_first = nn.Linear(EMBEDDING_DIM, HEAD_NUM * KEY_DIM, bias=False)
self.Wq_last = nn.Linear(EMBEDDING_DIM, HEAD_NUM * KEY_DIM, bias=False)
self.Wk = nn.Linear(EMBEDDING_DIM, HEAD_NUM * KEY_DIM, bias=False)
self.Wv = nn.Linear(EMBEDDING_DIM, HEAD_NUM * KEY_DIM, bias=False)
self.multi_head_combine = nn.Linear(HEAD_NUM * KEY_DIM, EMBEDDING_DIM)
self.q_graph = None # saved q1, for multi-head attention
self.q_first = None # saved q2, for multi-head attention
self.k = None # saved key, for multi-head attention
self.v = None # saved value, for multi-head_attention
self.single_head_key = None # saved, for single-head attention
self.group_ninf_mask = None # reference to ninf_mask owned by state
def reset(self, encoded_nodes, group_ninf_mask):
# encoded_nodes.shape = (batch_s, TSP_SIZE, EMBEDDING_DIM)
encoded_graph = encoded_nodes.mean(dim=1, keepdim=True)
# shape = (batch_s, 1, EMBEDDING_DIM)
self.q_graph = reshape_by_heads(self.Wq_graph(encoded_graph), head_num=HEAD_NUM)
# shape = (batch_s, HEAD_NUM, 1, KEY_DIM)
self.q_first = None
# shape = (batch_s, HEAD_NUM, group, KEY_DIM)
self.k = reshape_by_heads(self.Wk(encoded_nodes), head_num=HEAD_NUM)
self.v = reshape_by_heads(self.Wv(encoded_nodes), head_num=HEAD_NUM)
# shape = (batch_s, HEAD_NUM, TSP_SIZE, KEY_DIM)
self.single_head_key = encoded_nodes.transpose(1, 2)
# shape = (batch_s, EMBEDDING_DIM, TSP_SIZE)
self.group_ninf_mask = group_ninf_mask
# shape = (batch_s, group, TSP_SIZE)
def forward(self, encoded_LAST_NODE):
# encoded_LAST_NODE.shape = (batch_s, group, EMBEDDING_DIM)
if self.q_first is None:
self.q_first = reshape_by_heads(self.Wq_first(encoded_LAST_NODE), head_num=HEAD_NUM)
# shape = (batch_s, HEAD_NUM, group, KEY_DIM)
# Multi-Head Attention
#######################################################
q_last = reshape_by_heads(self.Wq_last(encoded_LAST_NODE), head_num=HEAD_NUM)
# shape = (batch_s, HEAD_NUM, group, KEY_DIM)
q = self.q_graph + self.q_first + q_last
# shape = (batch_s, HEAD_NUM, group, KEY_DIM)
out_concat = multi_head_attention(q, self.k, self.v, group_ninf_mask=self.group_ninf_mask)
# shape = (batch_s, group, HEAD_NUM*KEY_DIM)
mh_atten_out = self.multi_head_combine(out_concat)
# shape = (batch_s, group, EMBEDDING_DIM)
# Single-Head Attention, for probability calculation
#######################################################
score = torch.matmul(mh_atten_out, self.single_head_key)
# shape = (batch_s, group, TSP_SIZE)
score_scaled = score / np.sqrt(EMBEDDING_DIM)
# shape = (batch_s, group, TSP_SIZE)
score_clipped = LOGIT_CLIPPING * torch.tanh(score_scaled)
score_masked = score_clipped + self.group_ninf_mask.clone()
probs = F.softmax(score_masked, dim=2)
# shape = (batch_s, group, TSP_SIZE)
return probs
########################################
# NN SUB CLASS / FUNCTIONS
########################################
def pick_nodes_for_each_group(encoded_nodes, node_index_to_pick):
# encoded_nodes.shape = (batch_s, TSP_SIZE, EMBEDDING_DIM)
# node_index_to_pick.shape = (batch_s, group_s)
batch_s = node_index_to_pick.size(0)
group_s = node_index_to_pick.size(1)
gathering_index = node_index_to_pick[:, :, None].expand(batch_s, group_s, EMBEDDING_DIM)
# shape = (batch_s, group, EMBEDDING_DIM)
picked_nodes = encoded_nodes.gather(dim=1, index=gathering_index)
# shape = (batch_s, group, EMBEDDING_DIM)
return picked_nodes
def reshape_by_heads(qkv, head_num):
# q.shape = (batch, C, head_num*key_dim)
batch_s = qkv.size(0)
C = qkv.size(1)
q_reshaped = qkv.reshape(batch_s, C, head_num, -1)
# shape = (batch, C, head_num, key_dim)
q_transposed = q_reshaped.transpose(1, 2)
# shape = (batch, head_num, C, key_dim)
return q_transposed
def multi_head_attention(q, k, v, ninf_mask=None, group_ninf_mask=None):
# q shape = (batch_s, head_num, n, key_dim) : n can be either 1 or TSP_SIZE
# k,v shape = (batch_s, head_num, TSP_SIZE, key_dim)
# ninf_mask.shape = (batch_s, TSP_SIZE)
# group_ninf_mask.shape = (batch_s, group, TSP_SIZE)
batch_s = q.size(0)
head_num = q.size(1)
n = q.size(2)
key_dim = q.size(3)
score = torch.matmul(q, k.transpose(2, 3))
# shape = (batch_s, head_num, n, TSP_SIZE)
score_scaled = score / np.sqrt(key_dim)
if ninf_mask is not None:
score_scaled = score_scaled + ninf_mask[:, None, None, :].expand(batch_s, head_num, n, TSP_SIZE)
if group_ninf_mask is not None:
score_scaled = score_scaled + group_ninf_mask[:, None, :, :].expand(batch_s, head_num, n, TSP_SIZE)
weights = nn.Softmax(dim=3)(score_scaled)
# shape = (batch_s, head_num, n, TSP_SIZE)
out = torch.matmul(weights, v)
# shape = (batch_s, head_num, n, key_dim)
out_transposed = out.transpose(1, 2)
# shape = (batch_s, n, head_num, key_dim)
out_concat = out_transposed.reshape(batch_s, n, head_num * key_dim)
# shape = (batch_s, n, head_num*key_dim)
return out_concat
class Add_And_Normalization_Module(nn.Module):
def __init__(self):
super().__init__()
self.norm_by_EMB = nn.BatchNorm1d(EMBEDDING_DIM, affine=True)
# 'Funny' Batch_Norm, as it will normalized by EMB dim
def forward(self, input1, input2):
# input.shape = (batch_s, TSP_SIZE, EMBEDDING_DIM)
batch_s = input1.size(0)
added = input1 + input2
normalized = self.norm_by_EMB(added.reshape(batch_s * TSP_SIZE, EMBEDDING_DIM))
return normalized.reshape(batch_s, TSP_SIZE, EMBEDDING_DIM)
class Feed_Forward_Module(nn.Module):
def __init__(self):
super().__init__()
self.W1 = nn.Linear(EMBEDDING_DIM, FF_HIDDEN_DIM)
self.W2 = nn.Linear(FF_HIDDEN_DIM, EMBEDDING_DIM)
def forward(self, input1):
# input.shape = (batch_s, TSP_SIZE, EMBEDDING_DIM)
return self.W2(F.relu(self.W1(input1)))
| 11,175 | 34.592357 | 107 | py |
POMO | POMO-master/OLD_ipynb_ver/POMO_TSP/source/TRAIN_N_EVAL/Train_Grouped_Actors.py |
"""
The MIT License
Copyright (c) 2020 Yeong-Dae Kwon
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import numpy as np
import time
# For debugging
from IPython.core.debugger import set_trace
# Hyper Parameters
from HYPER_PARAMS import *
from TORCH_OBJECTS import *
from source.utilities import Average_Meter
from source.travelling_saleman_problem import TSP_DATA_LOADER__RANDOM, GROUP_ENVIRONMENT
########################################
# TRAIN
########################################
def TRAIN(actor_group, epoch, timer_start, logger):
actor_group.train()
distance_AM = Average_Meter()
actor_loss_AM = Average_Meter()
train_loader = TSP_DATA_LOADER__RANDOM(num_sample=TRAIN_DATASET_SIZE, num_nodes=TSP_SIZE, batch_size=TRAIN_BATCH_SIZE)
logger_start = time.time()
episode = 0
for data in train_loader:
# data.shape = (batch_s, TSP_SIZE, 2)
batch_s = data.size(0)
episode = episode + batch_s
# Actor Group Move
###############################################
env = GROUP_ENVIRONMENT(data)
group_s = TSP_SIZE
group_state, reward, done = env.reset(group_size=group_s)
actor_group.reset(group_state)
# First Move is given
first_action = LongTensor(np.arange(group_s))[None, :].expand(batch_s, group_s)
group_state, reward, done = env.step(first_action)
group_prob_list = Tensor(np.zeros((batch_s, group_s, 0)))
while not done:
actor_group.update(group_state)
action_probs = actor_group.get_action_probabilities()
# shape = (batch, group, TSP_SIZE)
action = action_probs.reshape(batch_s*group_s, -1).multinomial(1).squeeze(dim=1).reshape(batch_s, group_s)
# shape = (batch, group)
group_state, reward, done = env.step(action)
batch_idx_mat = torch.arange(batch_s)[:, None].expand(batch_s, group_s)
group_idx_mat = torch.arange(group_s)[None, :].expand(batch_s, group_s)
chosen_action_prob = action_probs[batch_idx_mat, group_idx_mat, action].reshape(batch_s, group_s)
# shape = (batch, group)
group_prob_list = torch.cat((group_prob_list, chosen_action_prob[:, :, None]), dim=2)
# LEARNING - Actor
###############################################
group_reward = reward
group_log_prob = group_prob_list.log().sum(dim=2)
# shape = (batch, group)
group_advantage = group_reward - group_reward.mean(dim=1, keepdim=True)
group_loss = -group_advantage * group_log_prob
# shape = (batch, group)
loss = group_loss.mean()
actor_group.optimizer.zero_grad()
loss.backward()
actor_group.optimizer.step()
# RECORDING
###############################################
max_reward, _ = group_reward.max(dim=1)
distance_AM.push(-max_reward) # reward was given as negative dist
actor_loss_AM.push(group_loss.detach().reshape(-1))
# LOGGING
###############################################
if (time.time()-logger_start > LOG_PERIOD_SEC) or (episode == TRAIN_DATASET_SIZE):
timestr = time.strftime("%H:%M:%S", time.gmtime(time.time()-timer_start))
log_str = 'Ep:{:03d}-{:07d}({:5.1f}%) T:{:s} ALoss:{:+5f} CLoss:{:5f} Avg.dist:{:5f}' \
.format(epoch, episode, episode/TRAIN_DATASET_SIZE*100,
timestr, actor_loss_AM.result(), 0,
distance_AM.result())
logger.info(log_str)
logger_start = time.time()
# LR STEP, after each epoch
actor_group.lr_stepper.step()
| 4,697 | 36.584 | 122 | py |
POMO | POMO-master/OLD_ipynb_ver/POMO_TSP/source/TRAIN_N_EVAL/Evaluate_Grouped_Actors.py |
"""
The MIT License
Copyright (c) 2020 Yeong-Dae Kwon
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import numpy as np
# For debugging
from IPython.core.debugger import set_trace
# Hyper Parameters
from HYPER_PARAMS import *
from TORCH_OBJECTS import *
from source.utilities import Average_Meter
from source.travelling_saleman_problem import TSP_DATA_LOADER__RANDOM, GROUP_ENVIRONMENT
########################################
# EVAL
########################################
eval_result = []
def update_eval_result(old_result):
global eval_result
eval_result = old_result
def EVAL(actor_group, epoch, timer_start, logger):
global eval_result
actor_group.eval()
eval_dist_AM = Average_Meter()
if TSP_SIZE == 5:
raise NotImplementedError
elif TSP_SIZE == 10:
raise NotImplementedError
else:
test_loader = TSP_DATA_LOADER__RANDOM(num_sample=TEST_DATASET_SIZE, num_nodes=TSP_SIZE, batch_size=TEST_BATCH_SIZE)
for data in test_loader:
# data.shape = (batch_s, TSP_SIZE, 2)
batch_s = data.size(0)
with torch.no_grad():
env = GROUP_ENVIRONMENT(data)
group_s = TSP_SIZE
group_state, reward, done = env.reset(group_size=group_s)
actor_group.reset(group_state)
# First Move is given
first_action = LongTensor(np.arange(group_s))[None, :].expand(batch_s, group_s)
group_state, reward, done = env.step(first_action)
while not done:
actor_group.update(group_state)
action_probs = actor_group.get_action_probabilities()
# shape = (batch, group, TSP_SIZE)
action = action_probs.argmax(dim=2)
# shape = (batch, group)
group_state, reward, done = env.step(action)
max_reward, _ = reward.max(dim=1)
eval_dist_AM.push(-max_reward) # reward was given as negative dist
# LOGGING
dist_avg = eval_dist_AM.result()
eval_result.append(dist_avg)
logger.info('--------------------------------------------------------------------------')
log_str = ' <<< EVAL after Epoch:{:03d} >>> Avg.dist:{:f}'.format(epoch, dist_avg)
logger.info(log_str)
logger.info('--------------------------------------------------------------------------')
logger.info('eval_result = {}'.format(eval_result))
logger.info('--------------------------------------------------------------------------')
logger.info('--------------------------------------------------------------------------')
logger.info('--------------------------------------------------------------------------')
| 3,672 | 34.317308 | 123 | py |
POMO | POMO-master/OLD_ipynb_ver/POMO_KP/TORCH_OBJECTS.py |
"""
The MIT License
Copyright (c) 2020 Yeong-Dae Kwon
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import torch
use_cuda = torch.cuda.is_available()
# use_cuda = False
torch.cuda.set_device(0)
FloatTensor = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if use_cuda else torch.LongTensor
ByteTensor = torch.cuda.ByteTensor if use_cuda else torch.ByteTensor
BoolTensor = torch.cuda.BoolTensor if use_cuda else torch.BoolTensor
Tensor = FloatTensor
if use_cuda:
device = torch.device("cuda")
else:
device = torch.device("cpu")
| 1,573 | 32.489362 | 77 | py |
POMO | POMO-master/OLD_ipynb_ver/POMO_KP/source/utilities.py |
"""
The MIT License
Copyright (c) 2020 Yeong-Dae Kwon
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import logging
import os
import time
import datetime
import pytz
import re
import numpy as np
from HYPER_PARAMS import *
from TORCH_OBJECTS import *
########################################
# Get_Logger
########################################
tz = pytz.timezone("Asia/Seoul")
def timetz(*args):
return datetime.datetime.now(tz).timetuple()
def Get_Logger(SAVE_FOLDER_NAME):
# make_dir
#######################################################
prefix = datetime.datetime.now(pytz.timezone("Asia/Seoul")).strftime("%Y%m%d_%H%M__")
result_folder_no_postfix = "./result/{}".format(prefix + SAVE_FOLDER_NAME)
result_folder_path = result_folder_no_postfix
folder_idx = 0
while os.path.exists(result_folder_path):
folder_idx += 1
result_folder_path = result_folder_no_postfix + "({})".format(folder_idx)
os.makedirs(result_folder_path)
# Logger
#######################################################
logger = logging.getLogger(result_folder_path) # this already includes streamHandler??
streamHandler = logging.StreamHandler()
fileHandler = logging.FileHandler('{}/log.txt'.format(result_folder_path))
formatter = logging.Formatter("[%(asctime)s] %(message)s", "%Y-%m-%d %H:%M:%S")
formatter.converter = timetz
streamHandler.setFormatter(formatter)
fileHandler.setFormatter(formatter)
logger.addHandler(streamHandler)
logger.addHandler(fileHandler)
logger.setLevel(level=logging.INFO)
return logger, result_folder_path
def Extract_from_LogFile(result_folder_path, variable_name):
logfile_path = '{}/log.txt'.format(result_folder_path)
with open(logfile_path) as f:
datafile = f.readlines()
found = False # This isn't really necessary
for line in reversed(datafile):
if variable_name in line:
found = True
m = re.search(variable_name + '[^\n]+', line)
break
exec_command = "Print(No such variable found !!)"
if found:
return m.group(0)
else:
return exec_command
########################################
# Average_Meter
########################################
class Average_Meter:
def __init__(self):
self.sum = None
self.count = None
self.reset()
def reset(self):
self.sum = torch.tensor(0.).to(device)
self.count = 0
def push(self, some_tensor, n_for_rank_0_tensor=None):
assert not some_tensor.requires_grad # You get Memory error, if you keep tensors with grad history
rank = len(some_tensor.shape)
if rank == 0: # assuming "already averaged" Tensor was pushed
self.sum += some_tensor * n_for_rank_0_tensor
self.count += n_for_rank_0_tensor
else:
self.sum += some_tensor.sum()
self.count += some_tensor.numel()
def peek(self):
average = (self.sum / self.count).tolist()
return average
def result(self):
average = (self.sum / self.count).tolist()
self.reset()
return average
########################################
# View NN Parameters
########################################
def get_n_params1(model):
pp = 0
for p in list(model.parameters()):
nn_count = 1
for s in list(p.size()):
nn_count = nn_count * s
pp += nn_count
print(nn_count)
print(p.shape)
print("Total: {:d}".format(pp))
def get_n_params2(model):
model_parameters = filter(lambda p: p.requires_grad, model.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
print(params)
def get_n_params3(model):
print(sum(p.numel() for p in model.parameters() if p.requires_grad))
def get_structure(model):
print(model)
| 4,907 | 27.045714 | 106 | py |
POMO | POMO-master/OLD_ipynb_ver/POMO_KP/source/knapsack_problem.py |
"""
The MIT License
Copyright (c) 2020 Yeong-Dae Kwon
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
####################################
# EXTERNAL LIBRARY
####################################
import torch
import numpy as np
# For debugging
from IPython.core.debugger import set_trace
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
####################################
# PROJECT VARIABLES
####################################
from HYPER_PARAMS import *
from TORCH_OBJECTS import *
####################################
# DATA
####################################
def KNAPSACK_DATA_LOADER__RANDOM(num_sample, num_items, batch_size):
dataset = KnapSack_Dataset__Random(num_sample=num_sample, num_items=num_items)
data_loader = DataLoader(dataset=dataset,
batch_size=batch_size,
shuffle=False,
num_workers=0,
collate_fn=knapsack_collate_fn)
return data_loader
class KnapSack_Dataset__Random(Dataset):
def __init__(self, num_sample, num_items):
self.num_sample = num_sample
self.num_items = num_items
def __getitem__(self, index):
data = np.random.rand(self.num_items, 2)
return data
def __len__(self):
return self.num_sample
def knapsack_collate_fn(batch):
return Tensor(batch)
####################################
# STATE
####################################
class STATE:
def __init__(self, item_data, capacity):
self.batch_s = item_data.size(0)
self.items_and_a_dummy = Tensor(np.zeros((self.batch_s, PROBLEM_SIZE+1, 2)))
self.items_and_a_dummy[:, :PROBLEM_SIZE, :] = item_data
self.item_data = self.items_and_a_dummy[:, :PROBLEM_SIZE, :]
# shape = (batch, problem, 2)
# History
####################################
self.current_node = None
self.selected_count = 0
self.selected_node_list = LongTensor(np.zeros((self.batch_s, 0)))
# shape = (batch, selected_count)
# Status
####################################
self.accumulated_value = Tensor(np.zeros((self.batch_s,)))
# shape = (batch,)
self.capacity = Tensor(np.ones((self.batch_s,))) * capacity
# shape = (batch,)
self.ninf_mask_w_dummy = Tensor(np.zeros((self.batch_s, PROBLEM_SIZE+1)))
self.ninf_mask = self.ninf_mask_w_dummy[:, :PROBLEM_SIZE]
# shape = (batch, problem)
self.fit_ninf_mask = None
self.finished = BoolTensor(np.zeros((self.batch_s,)))
# shape = (batch,)
def move_to(self, selected_item_idx):
# selected_item_idx.shape = (batch,)
# History
####################################
self.current_node = selected_item_idx
self.selected_count += 1
self.selected_node_list = torch.cat((self.selected_node_list, selected_item_idx[:, None]), dim=1)
# Status
####################################
gathering_index = selected_item_idx[:, None, None].expand(self.batch_s, 1, 2)
selected_item = self.items_and_a_dummy.gather(dim=1, index=gathering_index).squeeze(dim=1)
# shape = (batch, 2)
self.accumulated_value += selected_item[:, 1]
self.capacity -= selected_item[:, 0]
self.ninf_mask_w_dummy[torch.arange(self.batch_s), selected_item_idx] = -np.inf
unfit_bool = (self.capacity[:, None] - self.item_data[:, :, 0]) < 0
# shape = (batch, problem)
self.fit_ninf_mask = self.ninf_mask.clone()
self.fit_ninf_mask[unfit_bool] = -np.inf
self.finished = (self.fit_ninf_mask == -np.inf).all(dim=1)
# shape = (batch,)
self.fit_ninf_mask[self.finished[:, None].expand(self.batch_s, PROBLEM_SIZE)] = 0 # do not mask finished epi.
class GROUP_STATE:
def __init__(self, group_size, item_data, capacity):
self.batch_s = item_data.size(0)
self.group_s = group_size
self.items_and_a_dummy = Tensor(np.zeros((self.batch_s, PROBLEM_SIZE+1, 2)))
self.items_and_a_dummy[:, :PROBLEM_SIZE, :] = item_data
self.item_data = self.items_and_a_dummy[:, :PROBLEM_SIZE, :]
# shape = (batch, problem, 2)
# History
####################################
self.current_node = None
# shape = (batch_s, group)
self.selected_count = 0
self.selected_node_list = LongTensor(np.zeros((self.batch_s, self.group_s, 0)))
# shape = (batch_s, group, selected_count)
# Status
####################################
self.accumulated_value = Tensor(np.zeros((self.batch_s, self.group_s)))
# shape = (batch, group)
self.capacity = Tensor(np.ones((self.batch_s, self.group_s))) * capacity
# shape = (batch, group)
self.ninf_mask_w_dummy = Tensor(np.zeros((self.batch_s, self.group_s, PROBLEM_SIZE+1)))
self.ninf_mask = self.ninf_mask_w_dummy[:, :, :PROBLEM_SIZE]
# shape = (batch, group, problem)
self.fit_ninf_mask = None
self.finished = BoolTensor(np.zeros((self.batch_s, self.group_s)))
# shape = (batch, group)
def move_to(self, selected_idx_mat):
# selected_idx_mat.shape = (batch, group)
# History
####################################
self.current_node = selected_idx_mat
self.selected_count += 1
self.selected_node_list = torch.cat((self.selected_node_list, selected_idx_mat[:, :, None]), dim=2)
# Status
####################################
items_mat = self.items_and_a_dummy[:, None, :, :].expand(self.batch_s, self.group_s, PROBLEM_SIZE+1, 2)
gathering_index = selected_idx_mat[:, :, None, None].expand(self.batch_s, self.group_s, 1, 2)
selected_item = items_mat.gather(dim=2, index=gathering_index).squeeze(dim=2)
# shape = (batch, group, 2)
self.accumulated_value += selected_item[:, :, 1]
self.capacity -= selected_item[:, :, 0]
batch_idx_mat = torch.arange(self.batch_s)[:, None].expand(self.batch_s, self.group_s)
group_idx_mat = torch.arange(self.group_s)[None, :].expand(self.batch_s, self.group_s)
self.ninf_mask_w_dummy[batch_idx_mat, group_idx_mat, selected_idx_mat] = -np.inf
unfit_bool = (self.capacity[:, :, None] - self.item_data[:, None, :, 0]) < 0
# shape = (batch, group, problem)
self.fit_ninf_mask = self.ninf_mask.clone()
self.fit_ninf_mask[unfit_bool] = -np.inf
self.finished = (self.fit_ninf_mask == -np.inf).all(dim=2)
# shape = (batch, group)
self.fit_ninf_mask[self.finished[:, :, None].expand(self.batch_s, self.group_s, PROBLEM_SIZE)] = 0
# do not mask finished episode
####################################
# ENVIRONMENT
####################################
class ENVIRONMENT:
def __init__(self, item_data):
# item_data.shape = (batch, problem, 2)
self.item_data = item_data
self.batch_s = item_data.size(0)
self.state = None
def reset(self):
if PROBLEM_SIZE == 50:
capacity = 12.5
elif PROBLEM_SIZE == 100:
capacity = 25
elif PROBLEM_SIZE == 200:
capacity = 25
else:
raise NotImplementedError
self.state = STATE(item_data=self.item_data, capacity=capacity)
reward = None
done = False
return self.state, reward, done
def step(self, selected_item_idx):
# selected_node_idx.shape = (batch,)
# move state
self.state.move_to(selected_item_idx)
# returning values
done = self.state.finished.all()
if done:
reward = self.state.accumulated_value
else:
reward = None
return self.state, reward, done
class GROUP_ENVIRONMENT:
def __init__(self, item_data):
# seq.shape = (batch, problem, 2)
self.item_data = item_data
self.batch_s = item_data.size(0)
self.group_state = None
def reset(self, group_size):
if PROBLEM_SIZE == 50:
capacity = 12.5
elif PROBLEM_SIZE == 100:
capacity = 25
elif PROBLEM_SIZE == 200:
capacity = 25
else:
raise NotImplementedError
self.group_state = GROUP_STATE(group_size=group_size,
item_data=self.item_data, capacity=capacity)
reward = None
done = False
return self.group_state, reward, done
def step(self, selected_idx_mat):
# selected_idx_mat.shape = (batch, group)
# move state
self.group_state.move_to(selected_idx_mat)
# returning values
reward = None
done = self.group_state.finished.all() # state.finished.shape = (batch, group)
if done:
reward = self.group_state.accumulated_value
else:
reward = None
return self.group_state, reward, done
| 10,073 | 33.033784 | 118 | py |
POMO | POMO-master/OLD_ipynb_ver/POMO_KP/source/MODEL__Actor/grouped_actors.py |
"""
The MIT License
Copyright (c) 2020 Yeong-Dae Kwon
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
# For debugging
from IPython.core.debugger import set_trace
# Hyper Parameters
from HYPER_PARAMS import *
from TORCH_OBJECTS import *
########################################
# ACTOR
########################################
class ACTOR(nn.Module):
def __init__(self):
super().__init__()
self.encoder = Encoder()
self.node_prob_calculator = Next_Node_Probability_Calculator_for_group()
self.batch_s = None
self.encoded_nodes_and_dummy = None
self.encoded_nodes = None
self.encoded_graph = None
def reset(self, group_state):
self.batch_s = group_state.item_data.size(0)
self.encoded_nodes_and_dummy = Tensor(np.zeros((self.batch_s, PROBLEM_SIZE+1, EMBEDDING_DIM)))
self.encoded_nodes_and_dummy[:, :PROBLEM_SIZE, :] = self.encoder(group_state.item_data)
self.encoded_nodes = self.encoded_nodes_and_dummy[:, :PROBLEM_SIZE, :]
# shape = (batch, problem, EMBEDDING_DIM)
self.encoded_graph = self.encoded_nodes.mean(dim=1, keepdim=True)
# shape = (batch, 1, EMBEDDING_DIM)
self.node_prob_calculator.reset(self.encoded_nodes)
def get_action_probabilities(self, group_state):
probs = self.node_prob_calculator(graph=self.encoded_graph, capacity=group_state.capacity,
ninf_mask=group_state.fit_ninf_mask)
# shape = (batch, group, problem)
return probs
########################################
# ACTOR_SUB_NN : ENCODER
########################################
class Encoder(nn.Module):
def __init__(self):
super().__init__()
self.embedding = nn.Linear(2, EMBEDDING_DIM)
self.layers = nn.ModuleList([Encoder_Layer() for _ in range(ENCODER_LAYER_NUM)])
def forward(self, item_data):
# item_data.shape = (batch, problem, 2)
embedded_input = self.embedding(item_data)
# shape = (batch, problem, EMBEDDING_DIM)
out = embedded_input
for layer in self.layers:
out = layer(out)
return out
class Encoder_Layer(nn.Module):
def __init__(self):
super().__init__()
self.Wq = nn.Linear(EMBEDDING_DIM, HEAD_NUM * KEY_DIM, bias=False)
self.Wk = nn.Linear(EMBEDDING_DIM, HEAD_NUM * KEY_DIM, bias=False)
self.Wv = nn.Linear(EMBEDDING_DIM, HEAD_NUM * KEY_DIM, bias=False)
self.multi_head_combine = nn.Linear(HEAD_NUM * KEY_DIM, EMBEDDING_DIM)
self.addAndNormalization1 = Add_And_Normalization_Module()
self.feedForward = Feed_Forward_Module()
self.addAndNormalization2 = Add_And_Normalization_Module()
def forward(self, input1):
# input.shape = (batch, problem, EMBEDDING_DIM)
q = reshape_by_heads(self.Wq(input1), head_num=HEAD_NUM)
k = reshape_by_heads(self.Wk(input1), head_num=HEAD_NUM)
v = reshape_by_heads(self.Wv(input1), head_num=HEAD_NUM)
# q shape = (batch, HEAD_NUM, problem, KEY_DIM)
out_concat = multi_head_attention(q, k, v)
# shape = (batch, problem, HEAD_NUM*KEY_DIM)
multi_head_out = self.multi_head_combine(out_concat)
# shape = (batch, problem, EMBEDDING_DIM)
out1 = self.addAndNormalization1(input1, multi_head_out)
out2 = self.feedForward(out1)
out3 = self.addAndNormalization2(out1, out2)
return out3
########################################
# ACTOR_SUB_NN : Next_Node_Probability_Calculator
########################################
class Next_Node_Probability_Calculator_for_group(nn.Module):
def __init__(self):
super().__init__()
self.Wq = nn.Linear(1+EMBEDDING_DIM, HEAD_NUM * KEY_DIM, bias=False)
self.Wk = nn.Linear(EMBEDDING_DIM, HEAD_NUM * KEY_DIM, bias=False)
self.Wv = nn.Linear(EMBEDDING_DIM, HEAD_NUM * KEY_DIM, bias=False)
self.multi_head_combine = nn.Linear(HEAD_NUM * KEY_DIM, EMBEDDING_DIM)
self.k = None # saved key, for multi-head attention
self.v = None # saved value, for multi-head_attention
self.single_head_key = None # saved, for single-head attention
def reset(self, encoded_nodes):
# encoded_nodes.shape = (batch, problem, EMBEDDING_DIM)
self.k = reshape_by_heads(self.Wk(encoded_nodes), head_num=HEAD_NUM)
self.v = reshape_by_heads(self.Wv(encoded_nodes), head_num=HEAD_NUM)
# shape = (batch, HEAD_NUM, problem, KEY_DIM)
self.single_head_key = encoded_nodes.transpose(1, 2)
# shape = (batch, EMBEDDING_DIM, problem)
def forward(self, graph, capacity, ninf_mask=None):
# graph.shape = (batch, 1, EMBEDDING_DIM)
# capacity.shape = (batch, group)
# ninf_mask.shape = (batch, group, problem)
batch_s = capacity.size(0)
group_s = capacity.size(1)
# Multi-Head Attention
#######################################################
input1 = graph.expand(batch_s, group_s, EMBEDDING_DIM)
input2 = capacity[:, :, None]
input_cat = torch.cat((input1, input2), dim=2)
# shape = (batch, group, 1+EMBEDDING_DIM)
q = reshape_by_heads(self.Wq(input_cat), head_num=HEAD_NUM)
# shape = (batch, HEAD_NUM, group, KEY_DIM)
out_concat = multi_head_attention(q, self.k, self.v, ninf_mask=ninf_mask)
# shape = (batch, group, HEAD_NUM*KEY_DIM)
mh_atten_out = self.multi_head_combine(out_concat)
# shape = (batch, group, EMBEDDING_DIM)
# Single-Head Attention, for probability calculation
#######################################################
score = torch.matmul(mh_atten_out, self.single_head_key)
# shape = (batch, group, problem)
score_scaled = score / np.sqrt(EMBEDDING_DIM)
# shape = (batch, group, problem)
score_clipped = LOGIT_CLIPPING * torch.tanh(score_scaled)
if ninf_mask is None:
score_masked = score_clipped
else:
score_masked = score_clipped + ninf_mask
probs = F.softmax(score_masked, dim=2)
# shape = (batch, group, problem)
return probs
########################################
# NN SUB CLASS / FUNCTIONS
########################################
def pick_nodes_for_each_group(encoded_nodes, node_index_to_pick):
# encoded_nodes.shape = (batch, problem, EMBEDDING_DIM)
# node_index_to_pick.shape = (batch, group_s)
batch_s = node_index_to_pick.size(0)
group_s = node_index_to_pick.size(1)
gathering_index = node_index_to_pick[:, :, None].expand(batch_s, group_s, EMBEDDING_DIM)
# shape = (batch, group, EMBEDDING_DIM)
picked_nodes = encoded_nodes.gather(dim=1, index=gathering_index)
# shape = (batch, group, EMBEDDING_DIM)
return picked_nodes
def reshape_by_heads(qkv, head_num):
# q.shape = (batch, C, head_num*key_dim)
batch_s = qkv.size(0)
C = qkv.size(1)
q_reshaped = qkv.reshape(batch_s, C, head_num, -1)
# shape = (batch, C, head_num, key_dim)
q_transposed = q_reshaped.transpose(1, 2)
# shape = (batch, head_num, C, key_dim)
return q_transposed
def multi_head_attention(q, k, v, ninf_mask=None):
# q shape = (batch, head_num, n, key_dim) : n can be either 1 or group
# k,v shape = (batch, head_num, problem, key_dim)
# ninf_mask.shape = (batch, group, problem)
batch_s = q.size(0)
head_num = q.size(1)
n = q.size(2)
key_dim = q.size(3)
problem_s = k.size(2)
score = torch.matmul(q, k.transpose(2, 3))
# shape = (batch, head_num, n, TSP_SIZE)
score_scaled = score / np.sqrt(key_dim)
if ninf_mask is not None:
score_scaled = score_scaled + ninf_mask[:, None, :, :].expand(batch_s, head_num, n, problem_s)
weights = nn.Softmax(dim=3)(score_scaled)
# shape = (batch, head_num, n, problem)
out = torch.matmul(weights, v)
# shape = (batch, head_num, n, key_dim)
out_transposed = out.transpose(1, 2)
# shape = (batch, n, head_num, key_dim)
out_concat = out_transposed.reshape(batch_s, n, head_num * key_dim)
# shape = (batch, n, head_num*key_dim)
return out_concat
class Add_And_Normalization_Module(nn.Module):
def __init__(self):
super().__init__()
self.norm_by_EMB = nn.BatchNorm1d(EMBEDDING_DIM, affine=True)
# 'Funny' Batch_Norm, as it will normalized by EMB dim
def forward(self, input1, input2):
# input.shape = (batch, problem, EMBEDDING_DIM)
batch_s = input1.size(0)
problem_s = input1.size(1)
added = input1 + input2
normalized = self.norm_by_EMB(added.reshape(batch_s * problem_s, EMBEDDING_DIM))
return normalized.reshape(batch_s, problem_s, EMBEDDING_DIM)
class Feed_Forward_Module(nn.Module):
def __init__(self):
super().__init__()
self.W1 = nn.Linear(EMBEDDING_DIM, FF_HIDDEN_DIM)
self.W2 = nn.Linear(FF_HIDDEN_DIM, EMBEDDING_DIM)
def forward(self, input1):
# input.shape = (batch, problem, EMBEDDING_DIM)
return self.W2(F.relu(self.W1(input1)))
| 10,279 | 32.594771 | 102 | py |
POMO | POMO-master/OLD_ipynb_ver/POMO_KP/source/TRAIN_N_EVAL/Train_Grouped_Actors.py |
"""
The MIT License
Copyright (c) 2020 Yeong-Dae Kwon
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import numpy as np
# For Logging
import time
# For debugging
from IPython.core.debugger import set_trace
# Hyper Parameters
from HYPER_PARAMS import *
from TORCH_OBJECTS import *
from source.utilities import Average_Meter
from source.knapsack_problem import KNAPSACK_DATA_LOADER__RANDOM, GROUP_ENVIRONMENT
########################################
# TRAIN
########################################
def TRAIN(grouped_actor, epoch, timer_start, logger):
grouped_actor.train()
score_AM = Average_Meter()
actor_loss_AM = Average_Meter()
train_loader = KNAPSACK_DATA_LOADER__RANDOM(num_sample=TRAIN_DATASET_SIZE,
num_items=PROBLEM_SIZE,
batch_size=BATCH_SIZE)
logger_start = time.time()
episode = 0
for item_data in train_loader:
# item_data.shape = (batch, problem, 2)
batch_s = item_data.size(0)
episode = episode + batch_s
# Actor Group Move
###############################################
env = GROUP_ENVIRONMENT(item_data)
group_s = PROBLEM_SIZE
group_state, reward, done = env.reset(group_size=group_s)
grouped_actor.reset(group_state)
# First Move is given
first_action = LongTensor(np.arange(group_s))[None, :].expand(batch_s, group_s)
group_state, reward, done = env.step(first_action)
group_prob_list = Tensor(np.zeros((batch_s, group_s, 0)))
while not done:
action_probs = grouped_actor.get_action_probabilities(group_state)
# shape = (batch, group, problem)
action = action_probs.reshape(batch_s*group_s, PROBLEM_SIZE).multinomial(1).squeeze(dim=1).reshape(batch_s, group_s)
# shape = (batch, group)
action_w_finisehd = action.clone()
action_w_finisehd[group_state.finished] = PROBLEM_SIZE # dummy item
group_state, reward, done = env.step(action_w_finisehd)
batch_idx_mat = torch.arange(batch_s)[:, None].expand(batch_s, group_s)
group_idx_mat = torch.arange(group_s)[None, :].expand(batch_s, group_s)
chosen_action_prob = action_probs[batch_idx_mat, group_idx_mat, action].reshape(batch_s, group_s)
# shape = (batch, group)
chosen_action_prob[group_state.finished] = 1 # done episode will gain no more probability
group_prob_list = torch.cat((group_prob_list, chosen_action_prob[:, :, None]), dim=2)
# shape = (batch, group, x)
# LEARNING - Actor
###############################################
group_reward = reward
# shape = (batch, group)
group_log_prob = group_prob_list.log().sum(dim=2)
# shape = (batch, group)
group_advantage = group_reward - group_reward.mean(dim=1, keepdim=True)
group_loss = -group_advantage * group_log_prob
# shape = (batch, group)
loss = group_loss.mean()
grouped_actor.optimizer.zero_grad()
loss.backward()
grouped_actor.optimizer.step()
# RECORDING
###############################################
max_reward, _ = group_reward.max(dim=1)
score_AM.push(max_reward)
actor_loss_AM.push(group_loss.detach())
# LOGGING
###############################################
if (time.time()-logger_start > LOG_PERIOD_SEC) or (episode == TRAIN_DATASET_SIZE):
timestr = time.strftime("%H:%M:%S", time.gmtime(time.time()-timer_start))
log_str = 'Ep:{:03d}-{:07d}({:5.1f}%) T:{:s} ALoss:{:+5f} Avg.dist:{:5f}' \
.format(epoch, episode, episode/TRAIN_DATASET_SIZE*100,
timestr, actor_loss_AM.result(),
score_AM.result())
logger.info(log_str)
logger_start = time.time()
# LR STEP, after each epoch
grouped_actor.lr_stepper.step()
| 5,058 | 36.753731 | 128 | py |
POMO | POMO-master/OLD_ipynb_ver/POMO_KP/source/TRAIN_N_EVAL/Evaluate_Grouped_Actors.py |
"""
The MIT License
Copyright (c) 2020 Yeong-Dae Kwon
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import numpy as np
# For debugging
from IPython.core.debugger import set_trace
# Hyper Parameters
from HYPER_PARAMS import *
from TORCH_OBJECTS import *
from source.utilities import Average_Meter
from source.knapsack_problem import KNAPSACK_DATA_LOADER__RANDOM, GROUP_ENVIRONMENT
########################################
# EVAL
########################################
eval_result = []
def EVAL(grouped_actor, epoch, timer_start, logger):
global eval_result
grouped_actor.eval()
eval_AM = Average_Meter()
test_loader = KNAPSACK_DATA_LOADER__RANDOM(num_sample=TEST_DATASET_SIZE,
num_items=PROBLEM_SIZE,
batch_size=TEST_BATCH_SIZE)
with torch.no_grad():
for item_data in test_loader:
# item_data.shape = (batch, problem, 2)
batch_s = item_data.size(0)
env = GROUP_ENVIRONMENT(item_data)
group_s = PROBLEM_SIZE
group_state, reward, done = env.reset(group_size=group_s)
grouped_actor.reset(group_state)
# First Move is given
first_action = LongTensor(np.arange(group_s))[None, :].expand(batch_s, group_s)
group_state, reward, done = env.step(first_action)
while not done:
action_probs = grouped_actor.get_action_probabilities(group_state)
# shape = (batch, group, problem)
action = action_probs.argmax(dim=2)
# shape = (batch, group)
action_w_finished = action.clone()
action_w_finished[group_state.finished] = PROBLEM_SIZE # this is dummy item with 0 size 0 value
group_state, reward, done = env.step(action_w_finished)
max_reward, _ = group_state.accumulated_value.max(dim=1)
eval_AM.push(max_reward)
# LOGGING
score_avg = eval_AM.result()
eval_result.append(score_avg)
logger.info('--------------------------------------------------------------------------')
log_str = ' <<< EVAL after Epoch:{:03d} >>> Avg.score:{:f}'.format(epoch, score_avg)
logger.info(log_str)
logger.info('--------------------------------------------------------------------------')
logger.info('eval_result = {}'.format(eval_result))
logger.info('--------------------------------------------------------------------------')
logger.info('--------------------------------------------------------------------------')
logger.info('--------------------------------------------------------------------------')
| 3,710 | 36.11 | 112 | py |
POMO | POMO-master/OLD_ipynb_ver/POMO_CVRP/TORCH_OBJECTS.py |
"""
The MIT License
Copyright (c) 2020 Yeong-Dae Kwon
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import torch
use_cuda = torch.cuda.is_available()
# use_cuda = False
# torch.cuda.set_device(1)
FloatTensor = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if use_cuda else torch.LongTensor
ByteTensor = torch.cuda.ByteTensor if use_cuda else torch.ByteTensor
BoolTensor = torch.cuda.BoolTensor if use_cuda else torch.BoolTensor
Tensor = FloatTensor
if use_cuda:
device = torch.device("cuda")
else:
device = torch.device("cpu")
| 1,575 | 32.531915 | 77 | py |
POMO | POMO-master/OLD_ipynb_ver/POMO_CVRP/source/cvrp.py |
"""
The MIT License
Copyright (c) 2020 Yeong-Dae Kwon
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
####################################
# EXTERNAL LIBRARY
####################################
import torch
import numpy as np
# For debugging
from IPython.core.debugger import set_trace
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
####################################
# PROJECT VARIABLES
####################################
from HYPER_PARAMS import *
from TORCH_OBJECTS import *
####################################
# DATA
####################################
def CVRP_DATA_LOADER__RANDOM(num_sample, num_nodes, batch_size):
dataset = CVRP_Dataset__Random(num_sample=num_sample, num_nodes=num_nodes)
data_loader = DataLoader(dataset=dataset,
batch_size=batch_size,
shuffle=False,
num_workers=0,
collate_fn=CVRP_collate_fn)
return data_loader
class CVRP_Dataset__Random(Dataset):
def __init__(self, num_sample, num_nodes):
self.num_sample = num_sample
self.num_nodes = num_nodes
if num_nodes == 20:
demand_scaler = 30
elif num_nodes == 50:
demand_scaler = 40
elif num_nodes == 100:
demand_scaler = 50
else:
raise NotImplementedError
self.demand_scaler = demand_scaler
def __getitem__(self, index):
depot_xy_data = np.random.rand(1, 2)
node_xy_data = np.random.rand(self.num_nodes, 2)
node_demand_data = np.random.randint(1, 10, self.num_nodes) / self.demand_scaler
return depot_xy_data, node_xy_data, node_demand_data
def __len__(self):
return self.num_sample
def CVRP_collate_fn(batch):
depot_xy_data_tuples, node_xy_data_tuples, node_demand_data_tuples = zip(*batch)
depot_xy = Tensor(depot_xy_data_tuples)
node_xy = Tensor(node_xy_data_tuples)
node_demand = Tensor(node_demand_data_tuples)[:, :, None] # unsqeeeze to match the shape of node_xy
return depot_xy, node_xy, node_demand
####################################
# STATE
####################################
class STATE:
def __init__(self, data):
# data.shape = (batch, problem+1, 3)
self.batch_s = data.size(0)
self.data = data
# History
####################################
self.selected_count = 0
self.current_node = None
# shape = (batch,)
self.selected_node_list = LongTensor(np.zeros((self.batch_s, 0)))
# shape = (batch, selected_count)
# Status
####################################
self.at_the_depot = None
# shape = (batch,)
self.loaded = Tensor(np.ones((self.batch_s,)))
# shape = (batch,)
self.visited_ninf_flag = Tensor(np.zeros((self.batch_s, PROBLEM_SIZE+1)))
# shape = (batch, problem+1)
self.ninf_mask = Tensor(np.zeros((self.batch_s, PROBLEM_SIZE+1)))
# shape = (batch, problem+1)
self.finished = BoolTensor(np.zeros((self.batch_s,)))
# shape = (batch,)
def move_to(self, selected_node_idx):
# selected_node_idx.shape = (batch,)
# History
####################################
self.selected_count += 1
self.current_node = selected_node_idx
self.selected_node_list = torch.cat((self.selected_node_list, selected_node_idx[:, None]), dim=1)
# Status
####################################
self.at_the_depot = (selected_node_idx == 0)
demand_list = self.data[:, :, 2]
# shape = (batch, problem+1)
gathering_index = selected_node_idx[:, None]
selected_demand = demand_list.gather(dim=1, index=gathering_index).squeeze(dim=1)
# shape = (batch,)
self.loaded -= selected_demand
self.loaded[self.at_the_depot] = 1 # refill loaded at the depot
self.visited_ninf_flag[torch.arange(self.batch_s), selected_node_idx] = -np.inf
self.finished = self.finished + (self.visited_ninf_flag == -np.inf).all(dim=1)
# shape = (batch,)
# Status Edit
####################################
self.visited_ninf_flag[:, 0][~self.at_the_depot] = 0 # allow car to visit depot anytime
demand_too_large = self.loaded[:, None] < demand_list
# shape = (batch, problem+1)
self.ninf_mask = self.visited_ninf_flag.clone()
self.ninf_mask[demand_too_large] = -np.inf
self.ninf_mask[self.finished[:, None].expand(self.batch_s, PROBLEM_SIZE+1)] = 0 # do not mask finished episode
class GROUP_STATE:
def __init__(self, group_size, data):
# data.shape = (batch, problem+1, 3)
self.batch_s = data.size(0)
self.group_s = group_size
self.data = data
# History
####################################
self.selected_count = 0
self.current_node = None
# shape = (batch, group)
self.selected_node_list = LongTensor(np.zeros((self.batch_s, self.group_s, 0)))
# shape = (batch, group, selected_count)
# Status
####################################
self.at_the_depot = None
# shape = (batch, group)
self.loaded = Tensor(np.ones((self.batch_s, self.group_s)))
# shape = (batch, group)
self.visited_ninf_flag = Tensor(np.zeros((self.batch_s, self.group_s, PROBLEM_SIZE+1)))
# shape = (batch, group, problem+1)
self.ninf_mask = Tensor(np.zeros((self.batch_s, self.group_s, PROBLEM_SIZE+1)))
# shape = (batch, group, problem+1)
self.finished = BoolTensor(np.zeros((self.batch_s, self.group_s)))
# shape = (batch, group)
def move_to(self, selected_idx_mat):
# selected_idx_mat.shape = (batch, group)
# History
####################################
self.selected_count += 1
self.current_node = selected_idx_mat
self.selected_node_list = torch.cat((self.selected_node_list, selected_idx_mat[:, :, None]), dim=2)
# Status
####################################
self.at_the_depot = (selected_idx_mat == 0)
demand_list = self.data[:, None, :, 2].expand(self.batch_s, self.group_s, -1)
# shape = (batch, group, problem+1)
gathering_index = selected_idx_mat[:, :, None]
selected_demand = demand_list.gather(dim=2, index=gathering_index).squeeze(dim=2)
# shape = (batch, group)
self.loaded -= selected_demand
self.loaded[self.at_the_depot] = 1 # refill loaded at the depot
batch_idx_mat = torch.arange(self.batch_s)[:, None].expand(self.batch_s, self.group_s)
group_idx_mat = torch.arange(self.group_s)[None, :].expand(self.batch_s, self.group_s)
self.visited_ninf_flag[batch_idx_mat, group_idx_mat, selected_idx_mat] = -np.inf
self.finished = self.finished + (self.visited_ninf_flag == -np.inf).all(dim=2)
# shape = (batch, group)
# Status Edit
####################################
self.visited_ninf_flag[:, :, 0][~self.at_the_depot] = 0 # allow car to visit depot anytime
round_error_epsilon = 0.000001
demand_too_large = self.loaded[:, :, None] + round_error_epsilon < demand_list
# shape = (batch, group, problem+1)
self.ninf_mask = self.visited_ninf_flag.clone()
self.ninf_mask[demand_too_large] = -np.inf
self.ninf_mask[self.finished[:, :, None].expand(self.batch_s, self.group_s, PROBLEM_SIZE+1)] = 0
# do not mask finished episode
####################################
# ENVIRONMENT
####################################
class ENVIRONMENT:
def __init__(self, depot_xy, node_xy, node_demand):
# depot_xy.shape = (batch, 1, 2)
# node_xy.shape = (batch, problem, 2)
# node_demand.shape = (batch, problem, 1)
self.batch_s = depot_xy.size(0)
self.state = None
all_node_xy = torch.cat((depot_xy, node_xy), dim=1)
# shape = (batch, problem+1, 2)
depot_demand = Tensor(np.zeros((self.batch_s, 1, 1)))
all_node_demand = torch.cat((depot_demand, node_demand), dim=1)
# shape = (batch, problem+1, 1)
self.data = torch.cat((all_node_xy, all_node_demand), dim=2)
# shape = (batch, problem+1, 3)
def reset(self):
self.state = STATE(self.data)
reward = None
done = False
return self.state, reward, done
def step(self, selected_node_idx):
# selected_node_idx.shape = (batch,)
# move state
self.state.move_to(selected_node_idx)
# returning values
done = self.state.finished.all()
if done:
reward = -self._get_travel_distance() # note the minus sign!
else:
reward = None
return self.state, reward, done
def _get_travel_distance(self):
all_node_xy = self.data[:, :, 0:2]
# shape = (batch, problem+1, 2)
gathering_index = self.state.selected_node_list.unsqueeze(2).expand(-1, -1, 2)
# shape = (batch, selected_count, 2)
ordered_seq = all_node_xy.gather(dim=1, index=gathering_index)
# shape = (batch, selected_count, 2)
rolled_seq = ordered_seq.roll(dims=1, shifts=-1)
segment_lengths = ((ordered_seq-rolled_seq)**2).sum(2).sqrt()
# size = (batch, selected_count)
travel_distances = segment_lengths.sum(1)
# size = (batch,)
return travel_distances
class GROUP_ENVIRONMENT:
def __init__(self, depot_xy, node_xy, node_demand):
# depot_xy.shape = (batch, 1, 2)
# node_xy.shape = (batch, problem, 2)
# node_demand.shape = (batch, problem, 1)
self.batch_s = depot_xy.size(0)
self.group_s = None
self.group_state = None
all_node_xy = torch.cat((depot_xy, node_xy), dim=1)
# shape = (batch, problem+1, 2)
depot_demand = Tensor(np.zeros((self.batch_s, 1, 1)))
all_node_demand = torch.cat((depot_demand, node_demand), dim=1)
# shape = (batch, problem+1, 1)
self.data = torch.cat((all_node_xy, all_node_demand), dim=2)
# shape = (batch, problem+1, 3)
def reset(self, group_size):
self.group_s = group_size
self.group_state = GROUP_STATE(group_size=group_size, data=self.data)
reward = None
done = False
return self.group_state, reward, done
def step(self, selected_idx_mat):
# selected_idx_mat.shape = (batch, group)
# move state
self.group_state.move_to(selected_idx_mat)
# returning values
done = self.group_state.finished.all() # state.finished.shape = (batch, group)
if done:
reward = -self._get_travel_distance() # note the minus sign!
else:
reward = None
return self.group_state, reward, done
def _get_travel_distance(self):
all_node_xy = self.data[:, None, :, 0:2].expand(self.batch_s, self.group_s, -1, 2)
# shape = (batch, group, problem+1, 2)
gathering_index = self.group_state.selected_node_list[:, :, :, None].expand(-1, -1, -1, 2)
# shape = (batch, group, selected_count, 2)
ordered_seq = all_node_xy.gather(dim=2, index=gathering_index)
# shape = (batch, group, selected_count, 2)
rolled_seq = ordered_seq.roll(dims=2, shifts=-1)
segment_lengths = ((ordered_seq-rolled_seq)**2).sum(3).sqrt()
# size = (batch, group, selected_count)
travel_distances = segment_lengths.sum(2)
# size = (batch, group)
return travel_distances
| 12,715 | 35.645533 | 119 | py |
POMO | POMO-master/OLD_ipynb_ver/POMO_CVRP/source/utilities.py |
"""
The MIT License
Copyright (c) 2020 Yeong-Dae Kwon
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import logging
import os
import time
import datetime
import pytz
import re
import numpy as np
from HYPER_PARAMS import *
from TORCH_OBJECTS import *
########################################
# Get_Logger
########################################
tz = pytz.timezone("Asia/Seoul")
def timetz(*args):
return datetime.datetime.now(tz).timetuple()
def Get_Logger(SAVE_FOLDER_NAME):
# make_dir
#######################################################
prefix = datetime.datetime.now(pytz.timezone("Asia/Seoul")).strftime("%Y%m%d_%H%M__")
result_folder_no_postfix = "./result/{}".format(prefix + SAVE_FOLDER_NAME)
result_folder_path = result_folder_no_postfix
folder_idx = 0
while os.path.exists(result_folder_path):
folder_idx += 1
result_folder_path = result_folder_no_postfix + "({})".format(folder_idx)
os.makedirs(result_folder_path)
# Logger
#######################################################
logger = logging.getLogger(result_folder_path) # this already includes streamHandler??
streamHandler = logging.StreamHandler()
fileHandler = logging.FileHandler('{}/log.txt'.format(result_folder_path))
formatter = logging.Formatter("[%(asctime)s] %(message)s", "%Y-%m-%d %H:%M:%S")
formatter.converter = timetz
streamHandler.setFormatter(formatter)
fileHandler.setFormatter(formatter)
logger.addHandler(streamHandler)
logger.addHandler(fileHandler)
logger.setLevel(level=logging.INFO)
return logger, result_folder_path
def Extract_from_LogFile(result_folder_path, variable_name):
logfile_path = '{}/log.txt'.format(result_folder_path)
with open(logfile_path) as f:
datafile = f.readlines()
found = False # This isn't really necessary
for line in reversed(datafile):
if variable_name in line:
found = True
m = re.search(variable_name + '[^\n]+', line)
break
exec_command = "Print(No such variable found !!)"
if found:
return m.group(0)
else:
return exec_command
########################################
# Average_Meter
########################################
class Average_Meter:
def __init__(self):
self.sum = None
self.count = None
self.reset()
def reset(self):
self.sum = torch.tensor(0.).to(device)
self.count = 0
def push(self, some_tensor, n_for_rank_0_tensor=None):
assert not some_tensor.requires_grad # You get Memory error, if you keep tensors with grad history
rank = len(some_tensor.shape)
if rank == 0: # assuming "already averaged" Tensor was pushed
self.sum += some_tensor * n_for_rank_0_tensor
self.count += n_for_rank_0_tensor
else:
self.sum += some_tensor.sum()
self.count += some_tensor.numel()
def peek(self):
average = (self.sum / self.count).tolist()
return average
def result(self):
average = (self.sum / self.count).tolist()
self.reset()
return average
########################################
# View NN Parameters
########################################
def get_n_params1(model):
pp = 0
for p in list(model.parameters()):
nn_count = 1
for s in list(p.size()):
nn_count = nn_count * s
pp += nn_count
print(nn_count)
print(p.shape)
print("Total: {:d}".format(pp))
def get_n_params2(model):
model_parameters = filter(lambda p: p.requires_grad, model.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
print(params)
def get_n_params3(model):
print(sum(p.numel() for p in model.parameters() if p.requires_grad))
def get_structure(model):
print(model)
########################################
# Augment xy data
########################################
def augment_xy_data_by_8_fold(xy_data):
# xy_data.shape = (batch_s, problem, 2)
x = xy_data[:, :, [0]]
y = xy_data[:, :, [1]]
# x,y shape = (batch, problem, 1)
dat1 = torch.cat((x, y), dim=2)
dat2 = torch.cat((1-x, y), dim=2)
dat3 = torch.cat((x, 1-y), dim=2)
dat4 = torch.cat((1-x, 1-y), dim=2)
dat5 = torch.cat((y, x), dim=2)
dat6 = torch.cat((1-y, x), dim=2)
dat7 = torch.cat((y, 1-x), dim=2)
dat8 = torch.cat((1-y, 1-x), dim=2)
data_augmented = torch.cat((dat1, dat2, dat3, dat4, dat5, dat6, dat7, dat8), dim=0)
# shape = (8*batch, problem, 2)
return data_augmented
| 5,645 | 26.950495 | 106 | py |
POMO | POMO-master/OLD_ipynb_ver/POMO_CVRP/source/MODEL__Actor/grouped_actors.py |
"""
The MIT License
Copyright (c) 2020 Yeong-Dae Kwon
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
# For debugging
from IPython.core.debugger import set_trace
# Hyper Parameters
from HYPER_PARAMS import *
from TORCH_OBJECTS import *
########################################
# ACTOR
########################################
class ACTOR(nn.Module):
def __init__(self):
super().__init__()
self.encoder = Encoder()
self.node_prob_calculator = Next_Node_Probability_Calculator_for_group()
self.batch_s = None
self.encoded_nodes = None
self.encoded_graph = None
def reset(self, group_state):
self.batch_s = group_state.data.size(0)
self.encoded_nodes = self.encoder(group_state.data)
# shape = (batch, problem+1, EMBEDDING_DIM)
self.encoded_graph = self.encoded_nodes.mean(dim=1, keepdim=True)
# shape = (batch, 1, EMBEDDING_DIM)
self.node_prob_calculator.reset(self.encoded_nodes)
def get_action_probabilities(self, group_state):
encoded_LAST_NODES = pick_nodes_for_each_group(self.encoded_nodes, group_state.current_node)
# shape = (batch, group, EMBEDDING_DIM)
remaining_loaded = group_state.loaded[:, :, None]
# shape = (batch, group, 1)
item_select_probabilities = self.node_prob_calculator(self.encoded_graph, encoded_LAST_NODES,
remaining_loaded, ninf_mask=group_state.ninf_mask)
# shape = (batch, group, problem+1)
return item_select_probabilities
########################################
# ACTOR_SUB_NN : ENCODER
########################################
class Encoder(nn.Module):
def __init__(self):
super().__init__()
self.embedding_depot = nn.Linear(2, EMBEDDING_DIM)
self.embedding_node = nn.Linear(3, EMBEDDING_DIM)
self.layers = nn.ModuleList([Encoder_Layer() for _ in range(ENCODER_LAYER_NUM)])
def forward(self, data):
# data.shape = (batch, problem+1, 3)
depot_xy = data[:, [0], 0:2]
# shape = (batch, 1, 2)
node_xy_demand = data[:, 1:, 0:3]
# shape = (batch, problem, 3)
embedded_depot = self.embedding_depot(depot_xy)
# shape = (batch, 1, EMBEDDING_DIM)
embedded_node = self.embedding_node(node_xy_demand)
# shape = (batch, problem, EMBEDDING_DIM)
out = torch.cat((embedded_depot, embedded_node), dim=1)
# shape = (batch, problem+1, EMBEDDING_DIM)
for layer in self.layers:
out = layer(out)
return out
class Encoder_Layer(nn.Module):
def __init__(self):
super().__init__()
self.Wq = nn.Linear(EMBEDDING_DIM, HEAD_NUM * KEY_DIM, bias=False)
self.Wk = nn.Linear(EMBEDDING_DIM, HEAD_NUM * KEY_DIM, bias=False)
self.Wv = nn.Linear(EMBEDDING_DIM, HEAD_NUM * KEY_DIM, bias=False)
self.multi_head_combine = nn.Linear(HEAD_NUM * KEY_DIM, EMBEDDING_DIM)
self.addAndNormalization1 = Add_And_Normalization_Module()
self.feedForward = Feed_Forward_Module()
self.addAndNormalization2 = Add_And_Normalization_Module()
def forward(self, input1):
# input.shape = (batch, problem, EMBEDDING_DIM)
q = reshape_by_heads(self.Wq(input1), head_num=HEAD_NUM)
k = reshape_by_heads(self.Wk(input1), head_num=HEAD_NUM)
v = reshape_by_heads(self.Wv(input1), head_num=HEAD_NUM)
# q shape = (batch, HEAD_NUM, problem, KEY_DIM)
out_concat = multi_head_attention(q, k, v)
# shape = (batch, problem, HEAD_NUM*KEY_DIM)
multi_head_out = self.multi_head_combine(out_concat)
# shape = (batch, problem, EMBEDDING_DIM)
out1 = self.addAndNormalization1(input1, multi_head_out)
out2 = self.feedForward(out1)
out3 = self.addAndNormalization2(out1, out2)
return out3
########################################
# ACTOR_SUB_NN : Next_Node_Probability_Calculator
########################################
class Next_Node_Probability_Calculator_for_group(nn.Module):
def __init__(self):
super().__init__()
self.Wq = nn.Linear(2*EMBEDDING_DIM+1, HEAD_NUM * KEY_DIM, bias=False)
self.Wk = nn.Linear(EMBEDDING_DIM, HEAD_NUM * KEY_DIM, bias=False)
self.Wv = nn.Linear(EMBEDDING_DIM, HEAD_NUM * KEY_DIM, bias=False)
self.multi_head_combine = nn.Linear(HEAD_NUM * KEY_DIM, EMBEDDING_DIM)
self.k = None # saved key, for multi-head attention
self.v = None # saved value, for multi-head_attention
self.single_head_key = None # saved, for single-head attention
def reset(self, encoded_nodes):
# encoded_nodes.shape = (batch, problem+1, EMBEDDING_DIM)
self.k = reshape_by_heads(self.Wk(encoded_nodes), head_num=HEAD_NUM)
self.v = reshape_by_heads(self.Wv(encoded_nodes), head_num=HEAD_NUM)
# shape = (batch, HEAD_NUM, problem+1, KEY_DIM)
self.single_head_key = encoded_nodes.transpose(1, 2)
# shape = (batch, EMBEDDING_DIM, problem+1)
def forward(self, input1, input2, remaining_loaded, ninf_mask=None):
# input1.shape = (batch, 1, EMBEDDING_DIM)
# input2.shape = (batch, group, EMBEDDING_DIM)
# remaining_loaded.shape = (batch, group, 1)
# ninf_mask.shape = (batch, group, problem+1)
group_s = input2.size(1)
# Multi-Head Attention
#######################################################
input_cat = torch.cat((input1.expand(-1, group_s, -1), input2, remaining_loaded), dim=2)
# shape = (batch, group, 2*EMBEDDING_DIM+1)
q = reshape_by_heads(self.Wq(input_cat), head_num=HEAD_NUM)
# shape = (batch, HEAD_NUM, group, KEY_DIM)
out_concat = multi_head_attention(q, self.k, self.v, ninf_mask=ninf_mask)
# shape = (batch, n, HEAD_NUM*KEY_DIM)
mh_atten_out = self.multi_head_combine(out_concat)
# shape = (batch, n, EMBEDDING_DIM)
# Single-Head Attention, for probability calculation
#######################################################
score = torch.matmul(mh_atten_out, self.single_head_key)
# shape = (batch, n, problem+1)
score_scaled = score / np.sqrt(EMBEDDING_DIM)
# shape = (batch_s, group, problem+1)
score_clipped = LOGIT_CLIPPING * torch.tanh(score_scaled)
if ninf_mask is None:
score_masked = score_clipped
else:
score_masked = score_clipped + ninf_mask
probs = F.softmax(score_masked, dim=2)
# shape = (batch, group, problem+1)
return probs
########################################
# NN SUB CLASS / FUNCTIONS
########################################
def pick_nodes_for_each_group(encoded_nodes, node_index_to_pick):
# encoded_nodes.shape = (batch, problem, EMBEDDING_DIM)
# node_index_to_pick.shape = (batch, group)
gathering_index = node_index_to_pick[:, :, None].expand(-1, -1, EMBEDDING_DIM)
# shape = (batch, group, EMBEDDING_DIM)
picked_nodes = encoded_nodes.gather(dim=1, index=gathering_index)
# shape = (batch, group, EMBEDDING_DIM)
return picked_nodes
def reshape_by_heads(qkv, head_num):
# q.shape = (batch, C, head_num*key_dim)
batch_s = qkv.size(0)
C = qkv.size(1)
q_reshaped = qkv.reshape(batch_s, C, head_num, -1)
# shape = (batch, C, head_num, key_dim)
q_transposed = q_reshaped.transpose(1, 2)
# shape = (batch, head_num, C, key_dim)
return q_transposed
def multi_head_attention(q, k, v, ninf_mask=None):
# q shape = (batch, head_num, n, key_dim) : n can be either 1 or group
# k,v shape = (batch, head_num, problem, key_dim)
# ninf_mask.shape = (batch, group, problem)
batch_s = q.size(0)
head_num = q.size(1)
n = q.size(2)
key_dim = q.size(3)
problem_s = k.size(2)
score = torch.matmul(q, k.transpose(2, 3))
# shape = (batch, head_num, n, problem)
score_scaled = score / np.sqrt(key_dim)
if ninf_mask is not None:
score_scaled = score_scaled + ninf_mask[:, None, :, :].expand(batch_s, head_num, n, problem_s)
weights = nn.Softmax(dim=3)(score_scaled)
# shape = (batch, head_num, n, problem)
out = torch.matmul(weights, v)
# shape = (batch, head_num, n, key_dim)
out_transposed = out.transpose(1, 2)
# shape = (batch, n, head_num, key_dim)
out_concat = out_transposed.reshape(batch_s, n, head_num * key_dim)
# shape = (batch, n, head_num*key_dim)
return out_concat
class Add_And_Normalization_Module(nn.Module):
def __init__(self):
super().__init__()
self.norm_by_EMB = nn.BatchNorm1d(EMBEDDING_DIM, affine=True)
# 'Funny' Batch_Norm, as it will normalized by EMB dim
def forward(self, input1, input2):
# input.shape = (batch, problem, EMBEDDING_DIM)
batch_s = input1.size(0)
problem_s = input1.size(1)
added = input1 + input2
normalized = self.norm_by_EMB(added.reshape(batch_s * problem_s, EMBEDDING_DIM))
return normalized.reshape(batch_s, problem_s, EMBEDDING_DIM)
class Feed_Forward_Module(nn.Module):
def __init__(self):
super().__init__()
self.W1 = nn.Linear(EMBEDDING_DIM, FF_HIDDEN_DIM)
self.W2 = nn.Linear(FF_HIDDEN_DIM, EMBEDDING_DIM)
def forward(self, input1):
# input.shape = (batch, problem, EMBEDDING_DIM)
return self.W2(F.relu(self.W1(input1)))
| 10,612 | 33.016026 | 112 | py |
POMO | POMO-master/OLD_ipynb_ver/POMO_CVRP/source/TRAIN_N_EVAL/Evaluate__Grouped_Actors.py |
"""
The MIT License
Copyright (c) 2020 Yeong-Dae Kwon
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import numpy as np
# For debugging
from IPython.core.debugger import set_trace
# Hyper Parameters
from HYPER_PARAMS import *
from TORCH_OBJECTS import *
from source.utilities import Average_Meter, augment_xy_data_by_8_fold
from source.cvrp import CVRP_DATA_LOADER__RANDOM, GROUP_ENVIRONMENT
########################################
# EVAL
########################################
eval_result = []
def EVAL(grouped_actor, epoch, timer_start, logger):
global eval_result
grouped_actor.eval()
eval_AM = Average_Meter()
test_loader = CVRP_DATA_LOADER__RANDOM(num_sample=TEST_DATASET_SIZE,
num_nodes=PROBLEM_SIZE,
batch_size=TEST_BATCH_SIZE)
with torch.no_grad():
for depot_xy, node_xy, node_demand in test_loader:
# depot_xy.shape = (batch, 1, 2)
# node_xy.shape = (batch, problem, 2)
# node_demand.shape = (batch, problem, 1)
batch_s = depot_xy.size(0)
env = GROUP_ENVIRONMENT(depot_xy, node_xy, node_demand)
group_s = PROBLEM_SIZE
group_state, reward, done = env.reset(group_size=group_s)
grouped_actor.reset(group_state)
# First Move is given
first_action = LongTensor(np.zeros((batch_s, group_s))) # start from node_0-depot
group_state, reward, done = env.step(first_action)
# Second Move is given
second_action = LongTensor(np.arange(group_s))[None, :].expand(batch_s, group_s)
group_state, reward, done = env.step(second_action)
while not done:
action_probs = grouped_actor.get_action_probabilities(group_state)
# shape = (batch, group, problem+1)
action = action_probs.argmax(dim=2)
# shape = (batch, group)
action[group_state.finished] = 0 # stay at depot, if you are finished
group_state, reward, done = env.step(action)
max_reward, _ = reward.max(dim=1)
eval_AM.push(-max_reward) # reward was given as negative dist
# LOGGING
dist_avg = eval_AM.result()
eval_result.append(dist_avg)
logger.info('--------------------------------------------------------------------------')
log_str = ' <<< EVAL after Epoch:{:03d} >>> Avg.dist:{:f}'.format(epoch, dist_avg)
logger.info(log_str)
logger.info('--------------------------------------------------------------------------')
logger.info('eval_result = {}'.format(eval_result))
logger.info('--------------------------------------------------------------------------')
logger.info('--------------------------------------------------------------------------')
logger.info('--------------------------------------------------------------------------')
| 3,969 | 36.45283 | 94 | py |
POMO | POMO-master/OLD_ipynb_ver/POMO_CVRP/source/TRAIN_N_EVAL/Train_Grouped_Actors.py |
"""
The MIT License
Copyright (c) 2020 Yeong-Dae Kwon
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import numpy as np
# For Logging
import time
# For debugging
from IPython.core.debugger import set_trace
# Hyper Parameters
from HYPER_PARAMS import *
from TORCH_OBJECTS import *
from source.utilities import Average_Meter
from source.cvrp import CVRP_DATA_LOADER__RANDOM, GROUP_ENVIRONMENT
########################################
# TRAIN
########################################
def TRAIN(grouped_actor, epoch, timer_start, logger):
grouped_actor.train()
dist_AM = Average_Meter()
actor_loss_AM = Average_Meter()
train_loader = CVRP_DATA_LOADER__RANDOM(num_sample=TRAIN_DATASET_SIZE,
num_nodes=PROBLEM_SIZE,
batch_size=BATCH_SIZE)
logger_start = time.time()
episode = 0
for depot_xy, node_xy, node_demand in train_loader:
# depot_xy.shape = (batch, 1, 2)
# node_xy.shape = (batch, problem, 2)
# node_demand.shape = (batch, problem, 1)
batch_s = depot_xy.size(0)
episode = episode + batch_s
# Actor Group Move
###############################################
env = GROUP_ENVIRONMENT(depot_xy, node_xy, node_demand)
group_s = PROBLEM_SIZE
group_state, reward, done = env.reset(group_size=group_s)
grouped_actor.reset(group_state)
# First Move is given
first_action = LongTensor(np.zeros((batch_s, group_s))) # start from node_0-depot
group_state, reward, done = env.step(first_action)
# Second Move is given
second_action = LongTensor(np.arange(group_s)+1)[None, :].expand(batch_s, group_s)
group_state, reward, done = env.step(second_action)
group_prob_list = Tensor(np.zeros((batch_s, group_s, 0)))
while not done:
action_probs = grouped_actor.get_action_probabilities(group_state)
# shape = (batch, group, problem+1)
action = action_probs.reshape(batch_s*group_s, -1).multinomial(1)\
.squeeze(dim=1).reshape(batch_s, group_s)
# shape = (batch, group)
action[group_state.finished] = 0 # stay at depot, if you are finished
group_state, reward, done = env.step(action)
batch_idx_mat = torch.arange(batch_s)[:, None].expand(batch_s, group_s)
group_idx_mat = torch.arange(group_s)[None, :].expand(batch_s, group_s)
chosen_action_prob = action_probs[batch_idx_mat, group_idx_mat, action].reshape(batch_s, group_s)
# shape = (batch, group)
chosen_action_prob[group_state.finished] = 1 # done episode will gain no more probability
group_prob_list = torch.cat((group_prob_list, chosen_action_prob[:, :, None]), dim=2)
# shape = (batch, group, x)
# LEARNING - Actor
###############################################
group_reward = reward
# shape = (batch, group)
group_log_prob = group_prob_list.log().sum(dim=2)
# shape = (batch, group)
group_advantage = group_reward - group_reward.mean(dim=1, keepdim=True)
group_loss = -group_advantage * group_log_prob
# shape = (batch, group)
loss = group_loss.mean()
grouped_actor.optimizer.zero_grad()
loss.backward()
grouped_actor.optimizer.step()
# RECORDING
###############################################
max_reward, _ = group_reward.max(dim=1)
dist_AM.push(-max_reward) # reward was given as negative dist
actor_loss_AM.push(group_loss.detach())
# LOGGING
###############################################
if (time.time()-logger_start > LOG_PERIOD_SEC) or (episode == TRAIN_DATASET_SIZE):
timestr = time.strftime("%H:%M:%S", time.gmtime(time.time()-timer_start))
log_str = 'Ep:{:03d}-{:07d}({:5.1f}%) T:{:s} ALoss:{:+5f} Avg.dist:{:5f}' \
.format(epoch, episode, episode/TRAIN_DATASET_SIZE*100,
timestr, actor_loss_AM.result(), dist_AM.result())
logger.info(log_str)
logger_start = time.time()
# LR STEP, after each epoch
grouped_actor.lr_stepper.step()
| 5,310 | 37.485507 | 109 | py |
POMO | POMO-master/NEW_py_ver/CVRP/CVRProblemDef.py |
import torch
import numpy as np
def get_random_problems(batch_size, problem_size):
depot_xy = torch.rand(size=(batch_size, 1, 2))
# shape: (batch, 1, 2)
node_xy = torch.rand(size=(batch_size, problem_size, 2))
# shape: (batch, problem, 2)
if problem_size == 20:
demand_scaler = 30
elif problem_size == 50:
demand_scaler = 40
elif problem_size == 100:
demand_scaler = 50
else:
raise NotImplementedError
node_demand = torch.randint(1, 10, size=(batch_size, problem_size)) / float(demand_scaler)
# shape: (batch, problem)
return depot_xy, node_xy, node_demand
def augment_xy_data_by_8_fold(xy_data):
# xy_data.shape: (batch, N, 2)
x = xy_data[:, :, [0]]
y = xy_data[:, :, [1]]
# x,y shape: (batch, N, 1)
dat1 = torch.cat((x, y), dim=2)
dat2 = torch.cat((1 - x, y), dim=2)
dat3 = torch.cat((x, 1 - y), dim=2)
dat4 = torch.cat((1 - x, 1 - y), dim=2)
dat5 = torch.cat((y, x), dim=2)
dat6 = torch.cat((1 - y, x), dim=2)
dat7 = torch.cat((y, 1 - x), dim=2)
dat8 = torch.cat((1 - y, 1 - x), dim=2)
aug_xy_data = torch.cat((dat1, dat2, dat3, dat4, dat5, dat6, dat7, dat8), dim=0)
# shape: (8*batch, N, 2)
return aug_xy_data | 1,263 | 25.333333 | 94 | py |
POMO | POMO-master/NEW_py_ver/CVRP/POMO/CVRPEnv.py |
from dataclasses import dataclass
import torch
from CVRProblemDef import get_random_problems, augment_xy_data_by_8_fold
@dataclass
class Reset_State:
depot_xy: torch.Tensor = None
# shape: (batch, 1, 2)
node_xy: torch.Tensor = None
# shape: (batch, problem, 2)
node_demand: torch.Tensor = None
# shape: (batch, problem)
@dataclass
class Step_State:
BATCH_IDX: torch.Tensor = None
POMO_IDX: torch.Tensor = None
# shape: (batch, pomo)
selected_count: int = None
load: torch.Tensor = None
# shape: (batch, pomo)
current_node: torch.Tensor = None
# shape: (batch, pomo)
ninf_mask: torch.Tensor = None
# shape: (batch, pomo, problem+1)
finished: torch.Tensor = None
# shape: (batch, pomo)
class CVRPEnv:
def __init__(self, **env_params):
# Const @INIT
####################################
self.env_params = env_params
self.problem_size = env_params['problem_size']
self.pomo_size = env_params['pomo_size']
self.FLAG__use_saved_problems = False
self.saved_depot_xy = None
self.saved_node_xy = None
self.saved_node_demand = None
self.saved_index = None
# Const @Load_Problem
####################################
self.batch_size = None
self.BATCH_IDX = None
self.POMO_IDX = None
# IDX.shape: (batch, pomo)
self.depot_node_xy = None
# shape: (batch, problem+1, 2)
self.depot_node_demand = None
# shape: (batch, problem+1)
# Dynamic-1
####################################
self.selected_count = None
self.current_node = None
# shape: (batch, pomo)
self.selected_node_list = None
# shape: (batch, pomo, 0~)
# Dynamic-2
####################################
self.at_the_depot = None
# shape: (batch, pomo)
self.load = None
# shape: (batch, pomo)
self.visited_ninf_flag = None
# shape: (batch, pomo, problem+1)
self.ninf_mask = None
# shape: (batch, pomo, problem+1)
self.finished = None
# shape: (batch, pomo)
# states to return
####################################
self.reset_state = Reset_State()
self.step_state = Step_State()
def use_saved_problems(self, filename, device):
self.FLAG__use_saved_problems = True
loaded_dict = torch.load(filename, map_location=device)
self.saved_depot_xy = loaded_dict['depot_xy']
self.saved_node_xy = loaded_dict['node_xy']
self.saved_node_demand = loaded_dict['node_demand']
self.saved_index = 0
def load_problems(self, batch_size, aug_factor=1):
self.batch_size = batch_size
if not self.FLAG__use_saved_problems:
depot_xy, node_xy, node_demand = get_random_problems(batch_size, self.problem_size)
else:
depot_xy = self.saved_depot_xy[self.saved_index:self.saved_index+batch_size]
node_xy = self.saved_node_xy[self.saved_index:self.saved_index+batch_size]
node_demand = self.saved_node_demand[self.saved_index:self.saved_index+batch_size]
self.saved_index += batch_size
if aug_factor > 1:
if aug_factor == 8:
self.batch_size = self.batch_size * 8
depot_xy = augment_xy_data_by_8_fold(depot_xy)
node_xy = augment_xy_data_by_8_fold(node_xy)
node_demand = node_demand.repeat(8, 1)
else:
raise NotImplementedError
self.depot_node_xy = torch.cat((depot_xy, node_xy), dim=1)
# shape: (batch, problem+1, 2)
depot_demand = torch.zeros(size=(self.batch_size, 1))
# shape: (batch, 1)
self.depot_node_demand = torch.cat((depot_demand, node_demand), dim=1)
# shape: (batch, problem+1)
self.BATCH_IDX = torch.arange(self.batch_size)[:, None].expand(self.batch_size, self.pomo_size)
self.POMO_IDX = torch.arange(self.pomo_size)[None, :].expand(self.batch_size, self.pomo_size)
self.reset_state.depot_xy = depot_xy
self.reset_state.node_xy = node_xy
self.reset_state.node_demand = node_demand
self.step_state.BATCH_IDX = self.BATCH_IDX
self.step_state.POMO_IDX = self.POMO_IDX
def reset(self):
self.selected_count = 0
self.current_node = None
# shape: (batch, pomo)
self.selected_node_list = torch.zeros((self.batch_size, self.pomo_size, 0), dtype=torch.long)
# shape: (batch, pomo, 0~)
self.at_the_depot = torch.ones(size=(self.batch_size, self.pomo_size), dtype=torch.bool)
# shape: (batch, pomo)
self.load = torch.ones(size=(self.batch_size, self.pomo_size))
# shape: (batch, pomo)
self.visited_ninf_flag = torch.zeros(size=(self.batch_size, self.pomo_size, self.problem_size+1))
# shape: (batch, pomo, problem+1)
self.ninf_mask = torch.zeros(size=(self.batch_size, self.pomo_size, self.problem_size+1))
# shape: (batch, pomo, problem+1)
self.finished = torch.zeros(size=(self.batch_size, self.pomo_size), dtype=torch.bool)
# shape: (batch, pomo)
reward = None
done = False
return self.reset_state, reward, done
def pre_step(self):
self.step_state.selected_count = self.selected_count
self.step_state.load = self.load
self.step_state.current_node = self.current_node
self.step_state.ninf_mask = self.ninf_mask
self.step_state.finished = self.finished
reward = None
done = False
return self.step_state, reward, done
def step(self, selected):
# selected.shape: (batch, pomo)
# Dynamic-1
####################################
self.selected_count += 1
self.current_node = selected
# shape: (batch, pomo)
self.selected_node_list = torch.cat((self.selected_node_list, self.current_node[:, :, None]), dim=2)
# shape: (batch, pomo, 0~)
# Dynamic-2
####################################
self.at_the_depot = (selected == 0)
demand_list = self.depot_node_demand[:, None, :].expand(self.batch_size, self.pomo_size, -1)
# shape: (batch, pomo, problem+1)
gathering_index = selected[:, :, None]
# shape: (batch, pomo, 1)
selected_demand = demand_list.gather(dim=2, index=gathering_index).squeeze(dim=2)
# shape: (batch, pomo)
self.load -= selected_demand
self.load[self.at_the_depot] = 1 # refill loaded at the depot
self.visited_ninf_flag[self.BATCH_IDX, self.POMO_IDX, selected] = float('-inf')
# shape: (batch, pomo, problem+1)
self.visited_ninf_flag[:, :, 0][~self.at_the_depot] = 0 # depot is considered unvisited, unless you are AT the depot
self.ninf_mask = self.visited_ninf_flag.clone()
round_error_epsilon = 0.00001
demand_too_large = self.load[:, :, None] + round_error_epsilon < demand_list
# shape: (batch, pomo, problem+1)
self.ninf_mask[demand_too_large] = float('-inf')
# shape: (batch, pomo, problem+1)
newly_finished = (self.visited_ninf_flag == float('-inf')).all(dim=2)
# shape: (batch, pomo)
self.finished = self.finished + newly_finished
# shape: (batch, pomo)
# do not mask depot for finished episode.
self.ninf_mask[:, :, 0][self.finished] = 0
self.step_state.selected_count = self.selected_count
self.step_state.load = self.load
self.step_state.current_node = self.current_node
self.step_state.ninf_mask = self.ninf_mask
self.step_state.finished = self.finished
# returning values
done = self.finished.all()
if done:
reward = -self._get_travel_distance() # note the minus sign!
else:
reward = None
return self.step_state, reward, done
def _get_travel_distance(self):
gathering_index = self.selected_node_list[:, :, :, None].expand(-1, -1, -1, 2)
# shape: (batch, pomo, selected_list_length, 2)
all_xy = self.depot_node_xy[:, None, :, :].expand(-1, self.pomo_size, -1, -1)
# shape: (batch, pomo, problem+1, 2)
ordered_seq = all_xy.gather(dim=2, index=gathering_index)
# shape: (batch, pomo, selected_list_length, 2)
rolled_seq = ordered_seq.roll(dims=2, shifts=-1)
segment_lengths = ((ordered_seq-rolled_seq)**2).sum(3).sqrt()
# shape: (batch, pomo, selected_list_length)
travel_distances = segment_lengths.sum(2)
# shape: (batch, pomo)
return travel_distances
| 8,799 | 35.514523 | 125 | py |
POMO | POMO-master/NEW_py_ver/CVRP/POMO/CVRPModel.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
class CVRPModel(nn.Module):
def __init__(self, **model_params):
super().__init__()
self.model_params = model_params
self.encoder = CVRP_Encoder(**model_params)
self.decoder = CVRP_Decoder(**model_params)
self.encoded_nodes = None
# shape: (batch, problem+1, EMBEDDING_DIM)
def pre_forward(self, reset_state):
depot_xy = reset_state.depot_xy
# shape: (batch, 1, 2)
node_xy = reset_state.node_xy
# shape: (batch, problem, 2)
node_demand = reset_state.node_demand
# shape: (batch, problem)
node_xy_demand = torch.cat((node_xy, node_demand[:, :, None]), dim=2)
# shape: (batch, problem, 3)
self.encoded_nodes = self.encoder(depot_xy, node_xy_demand)
# shape: (batch, problem+1, embedding)
self.decoder.set_kv(self.encoded_nodes)
def forward(self, state):
batch_size = state.BATCH_IDX.size(0)
pomo_size = state.BATCH_IDX.size(1)
if state.selected_count == 0: # First Move, depot
selected = torch.zeros(size=(batch_size, pomo_size), dtype=torch.long)
prob = torch.ones(size=(batch_size, pomo_size))
# # Use Averaged encoded nodes for decoder input_1
# encoded_nodes_mean = self.encoded_nodes.mean(dim=1, keepdim=True)
# # shape: (batch, 1, embedding)
# self.decoder.set_q1(encoded_nodes_mean)
# # Use encoded_depot for decoder input_2
# encoded_first_node = self.encoded_nodes[:, [0], :]
# # shape: (batch, 1, embedding)
# self.decoder.set_q2(encoded_first_node)
elif state.selected_count == 1: # Second Move, POMO
selected = torch.arange(start=1, end=pomo_size+1)[None, :].expand(batch_size, pomo_size)
prob = torch.ones(size=(batch_size, pomo_size))
else:
encoded_last_node = _get_encoding(self.encoded_nodes, state.current_node)
# shape: (batch, pomo, embedding)
probs = self.decoder(encoded_last_node, state.load, ninf_mask=state.ninf_mask)
# shape: (batch, pomo, problem+1)
if self.training or self.model_params['eval_type'] == 'softmax':
while True: # to fix pytorch.multinomial bug on selecting 0 probability elements
with torch.no_grad():
selected = probs.reshape(batch_size * pomo_size, -1).multinomial(1) \
.squeeze(dim=1).reshape(batch_size, pomo_size)
# shape: (batch, pomo)
prob = probs[state.BATCH_IDX, state.POMO_IDX, selected].reshape(batch_size, pomo_size)
# shape: (batch, pomo)
if (prob != 0).all():
break
else:
selected = probs.argmax(dim=2)
# shape: (batch, pomo)
prob = None # value not needed. Can be anything.
return selected, prob
def _get_encoding(encoded_nodes, node_index_to_pick):
# encoded_nodes.shape: (batch, problem, embedding)
# node_index_to_pick.shape: (batch, pomo)
batch_size = node_index_to_pick.size(0)
pomo_size = node_index_to_pick.size(1)
embedding_dim = encoded_nodes.size(2)
gathering_index = node_index_to_pick[:, :, None].expand(batch_size, pomo_size, embedding_dim)
# shape: (batch, pomo, embedding)
picked_nodes = encoded_nodes.gather(dim=1, index=gathering_index)
# shape: (batch, pomo, embedding)
return picked_nodes
########################################
# ENCODER
########################################
class CVRP_Encoder(nn.Module):
def __init__(self, **model_params):
super().__init__()
self.model_params = model_params
embedding_dim = self.model_params['embedding_dim']
encoder_layer_num = self.model_params['encoder_layer_num']
self.embedding_depot = nn.Linear(2, embedding_dim)
self.embedding_node = nn.Linear(3, embedding_dim)
self.layers = nn.ModuleList([EncoderLayer(**model_params) for _ in range(encoder_layer_num)])
def forward(self, depot_xy, node_xy_demand):
# depot_xy.shape: (batch, 1, 2)
# node_xy_demand.shape: (batch, problem, 3)
embedded_depot = self.embedding_depot(depot_xy)
# shape: (batch, 1, embedding)
embedded_node = self.embedding_node(node_xy_demand)
# shape: (batch, problem, embedding)
out = torch.cat((embedded_depot, embedded_node), dim=1)
# shape: (batch, problem+1, embedding)
for layer in self.layers:
out = layer(out)
return out
# shape: (batch, problem+1, embedding)
class EncoderLayer(nn.Module):
def __init__(self, **model_params):
super().__init__()
self.model_params = model_params
embedding_dim = self.model_params['embedding_dim']
head_num = self.model_params['head_num']
qkv_dim = self.model_params['qkv_dim']
self.Wq = nn.Linear(embedding_dim, head_num * qkv_dim, bias=False)
self.Wk = nn.Linear(embedding_dim, head_num * qkv_dim, bias=False)
self.Wv = nn.Linear(embedding_dim, head_num * qkv_dim, bias=False)
self.multi_head_combine = nn.Linear(head_num * qkv_dim, embedding_dim)
self.add_n_normalization_1 = AddAndInstanceNormalization(**model_params)
self.feed_forward = FeedForward(**model_params)
self.add_n_normalization_2 = AddAndInstanceNormalization(**model_params)
def forward(self, input1):
# input1.shape: (batch, problem+1, embedding)
head_num = self.model_params['head_num']
q = reshape_by_heads(self.Wq(input1), head_num=head_num)
k = reshape_by_heads(self.Wk(input1), head_num=head_num)
v = reshape_by_heads(self.Wv(input1), head_num=head_num)
# qkv shape: (batch, head_num, problem, qkv_dim)
out_concat = multi_head_attention(q, k, v)
# shape: (batch, problem, head_num*qkv_dim)
multi_head_out = self.multi_head_combine(out_concat)
# shape: (batch, problem, embedding)
out1 = self.add_n_normalization_1(input1, multi_head_out)
out2 = self.feed_forward(out1)
out3 = self.add_n_normalization_2(out1, out2)
return out3
# shape: (batch, problem, embedding)
########################################
# DECODER
########################################
class CVRP_Decoder(nn.Module):
def __init__(self, **model_params):
super().__init__()
self.model_params = model_params
embedding_dim = self.model_params['embedding_dim']
head_num = self.model_params['head_num']
qkv_dim = self.model_params['qkv_dim']
# self.Wq_1 = nn.Linear(embedding_dim, head_num * qkv_dim, bias=False)
# self.Wq_2 = nn.Linear(embedding_dim, head_num * qkv_dim, bias=False)
self.Wq_last = nn.Linear(embedding_dim+1, head_num * qkv_dim, bias=False)
self.Wk = nn.Linear(embedding_dim, head_num * qkv_dim, bias=False)
self.Wv = nn.Linear(embedding_dim, head_num * qkv_dim, bias=False)
self.multi_head_combine = nn.Linear(head_num * qkv_dim, embedding_dim)
self.k = None # saved key, for multi-head attention
self.v = None # saved value, for multi-head_attention
self.single_head_key = None # saved, for single-head attention
# self.q1 = None # saved q1, for multi-head attention
# self.q2 = None # saved q2, for multi-head attention
def set_kv(self, encoded_nodes):
# encoded_nodes.shape: (batch, problem+1, embedding)
head_num = self.model_params['head_num']
self.k = reshape_by_heads(self.Wk(encoded_nodes), head_num=head_num)
self.v = reshape_by_heads(self.Wv(encoded_nodes), head_num=head_num)
# shape: (batch, head_num, problem+1, qkv_dim)
self.single_head_key = encoded_nodes.transpose(1, 2)
# shape: (batch, embedding, problem+1)
def set_q1(self, encoded_q1):
# encoded_q.shape: (batch, n, embedding) # n can be 1 or pomo
head_num = self.model_params['head_num']
self.q1 = reshape_by_heads(self.Wq_1(encoded_q1), head_num=head_num)
# shape: (batch, head_num, n, qkv_dim)
def set_q2(self, encoded_q2):
# encoded_q.shape: (batch, n, embedding) # n can be 1 or pomo
head_num = self.model_params['head_num']
self.q2 = reshape_by_heads(self.Wq_2(encoded_q2), head_num=head_num)
# shape: (batch, head_num, n, qkv_dim)
def forward(self, encoded_last_node, load, ninf_mask):
# encoded_last_node.shape: (batch, pomo, embedding)
# load.shape: (batch, pomo)
# ninf_mask.shape: (batch, pomo, problem)
head_num = self.model_params['head_num']
# Multi-Head Attention
#######################################################
input_cat = torch.cat((encoded_last_node, load[:, :, None]), dim=2)
# shape = (batch, group, EMBEDDING_DIM+1)
q_last = reshape_by_heads(self.Wq_last(input_cat), head_num=head_num)
# shape: (batch, head_num, pomo, qkv_dim)
# q = self.q1 + self.q2 + q_last
# # shape: (batch, head_num, pomo, qkv_dim)
q = q_last
# shape: (batch, head_num, pomo, qkv_dim)
out_concat = multi_head_attention(q, self.k, self.v, rank3_ninf_mask=ninf_mask)
# shape: (batch, pomo, head_num*qkv_dim)
mh_atten_out = self.multi_head_combine(out_concat)
# shape: (batch, pomo, embedding)
# Single-Head Attention, for probability calculation
#######################################################
score = torch.matmul(mh_atten_out, self.single_head_key)
# shape: (batch, pomo, problem)
sqrt_embedding_dim = self.model_params['sqrt_embedding_dim']
logit_clipping = self.model_params['logit_clipping']
score_scaled = score / sqrt_embedding_dim
# shape: (batch, pomo, problem)
score_clipped = logit_clipping * torch.tanh(score_scaled)
score_masked = score_clipped + ninf_mask
probs = F.softmax(score_masked, dim=2)
# shape: (batch, pomo, problem)
return probs
########################################
# NN SUB CLASS / FUNCTIONS
########################################
def reshape_by_heads(qkv, head_num):
# q.shape: (batch, n, head_num*key_dim) : n can be either 1 or PROBLEM_SIZE
batch_s = qkv.size(0)
n = qkv.size(1)
q_reshaped = qkv.reshape(batch_s, n, head_num, -1)
# shape: (batch, n, head_num, key_dim)
q_transposed = q_reshaped.transpose(1, 2)
# shape: (batch, head_num, n, key_dim)
return q_transposed
def multi_head_attention(q, k, v, rank2_ninf_mask=None, rank3_ninf_mask=None):
# q shape: (batch, head_num, n, key_dim) : n can be either 1 or PROBLEM_SIZE
# k,v shape: (batch, head_num, problem, key_dim)
# rank2_ninf_mask.shape: (batch, problem)
# rank3_ninf_mask.shape: (batch, group, problem)
batch_s = q.size(0)
head_num = q.size(1)
n = q.size(2)
key_dim = q.size(3)
input_s = k.size(2)
score = torch.matmul(q, k.transpose(2, 3))
# shape: (batch, head_num, n, problem)
score_scaled = score / torch.sqrt(torch.tensor(key_dim, dtype=torch.float))
if rank2_ninf_mask is not None:
score_scaled = score_scaled + rank2_ninf_mask[:, None, None, :].expand(batch_s, head_num, n, input_s)
if rank3_ninf_mask is not None:
score_scaled = score_scaled + rank3_ninf_mask[:, None, :, :].expand(batch_s, head_num, n, input_s)
weights = nn.Softmax(dim=3)(score_scaled)
# shape: (batch, head_num, n, problem)
out = torch.matmul(weights, v)
# shape: (batch, head_num, n, key_dim)
out_transposed = out.transpose(1, 2)
# shape: (batch, n, head_num, key_dim)
out_concat = out_transposed.reshape(batch_s, n, head_num * key_dim)
# shape: (batch, n, head_num*key_dim)
return out_concat
class AddAndInstanceNormalization(nn.Module):
def __init__(self, **model_params):
super().__init__()
embedding_dim = model_params['embedding_dim']
self.norm = nn.InstanceNorm1d(embedding_dim, affine=True, track_running_stats=False)
def forward(self, input1, input2):
# input.shape: (batch, problem, embedding)
added = input1 + input2
# shape: (batch, problem, embedding)
transposed = added.transpose(1, 2)
# shape: (batch, embedding, problem)
normalized = self.norm(transposed)
# shape: (batch, embedding, problem)
back_trans = normalized.transpose(1, 2)
# shape: (batch, problem, embedding)
return back_trans
class AddAndBatchNormalization(nn.Module):
def __init__(self, **model_params):
super().__init__()
embedding_dim = model_params['embedding_dim']
self.norm_by_EMB = nn.BatchNorm1d(embedding_dim, affine=True)
# 'Funny' Batch_Norm, as it will normalized by EMB dim
def forward(self, input1, input2):
# input.shape: (batch, problem, embedding)
batch_s = input1.size(0)
problem_s = input1.size(1)
embedding_dim = input1.size(2)
added = input1 + input2
normalized = self.norm_by_EMB(added.reshape(batch_s * problem_s, embedding_dim))
back_trans = normalized.reshape(batch_s, problem_s, embedding_dim)
return back_trans
class FeedForward(nn.Module):
def __init__(self, **model_params):
super().__init__()
embedding_dim = model_params['embedding_dim']
ff_hidden_dim = model_params['ff_hidden_dim']
self.W1 = nn.Linear(embedding_dim, ff_hidden_dim)
self.W2 = nn.Linear(ff_hidden_dim, embedding_dim)
def forward(self, input1):
# input.shape: (batch, problem, embedding)
return self.W2(F.relu(self.W1(input1))) | 14,042 | 36.150794 | 109 | py |
POMO | POMO-master/NEW_py_ver/CVRP/POMO/CVRPTrainer.py |
import torch
from logging import getLogger
from CVRPEnv import CVRPEnv as Env
from CVRPModel import CVRPModel as Model
from torch.optim import Adam as Optimizer
from torch.optim.lr_scheduler import MultiStepLR as Scheduler
from utils.utils import *
class CVRPTrainer:
def __init__(self,
env_params,
model_params,
optimizer_params,
trainer_params):
# save arguments
self.env_params = env_params
self.model_params = model_params
self.optimizer_params = optimizer_params
self.trainer_params = trainer_params
# result folder, logger
self.logger = getLogger(name='trainer')
self.result_folder = get_result_folder()
self.result_log = LogData()
# cuda
USE_CUDA = self.trainer_params['use_cuda']
if USE_CUDA:
cuda_device_num = self.trainer_params['cuda_device_num']
torch.cuda.set_device(cuda_device_num)
device = torch.device('cuda', cuda_device_num)
torch.set_default_tensor_type('torch.cuda.FloatTensor')
else:
device = torch.device('cpu')
torch.set_default_tensor_type('torch.FloatTensor')
# Main Components
self.model = Model(**self.model_params)
self.env = Env(**self.env_params)
self.optimizer = Optimizer(self.model.parameters(), **self.optimizer_params['optimizer'])
self.scheduler = Scheduler(self.optimizer, **self.optimizer_params['scheduler'])
# Restore
self.start_epoch = 1
model_load = trainer_params['model_load']
if model_load['enable']:
checkpoint_fullname = '{path}/checkpoint-{epoch}.pt'.format(**model_load)
checkpoint = torch.load(checkpoint_fullname, map_location=device)
self.model.load_state_dict(checkpoint['model_state_dict'])
self.start_epoch = 1 + model_load['epoch']
self.result_log.set_raw_data(checkpoint['result_log'])
self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
self.scheduler.last_epoch = model_load['epoch']-1
self.logger.info('Saved Model Loaded !!')
# utility
self.time_estimator = TimeEstimator()
def run(self):
self.time_estimator.reset(self.start_epoch)
for epoch in range(self.start_epoch, self.trainer_params['epochs']+1):
self.logger.info('=================================================================')
# LR Decay
self.scheduler.step()
# Train
train_score, train_loss = self._train_one_epoch(epoch)
self.result_log.append('train_score', epoch, train_score)
self.result_log.append('train_loss', epoch, train_loss)
############################
# Logs & Checkpoint
############################
elapsed_time_str, remain_time_str = self.time_estimator.get_est_string(epoch, self.trainer_params['epochs'])
self.logger.info("Epoch {:3d}/{:3d}: Time Est.: Elapsed[{}], Remain[{}]".format(
epoch, self.trainer_params['epochs'], elapsed_time_str, remain_time_str))
all_done = (epoch == self.trainer_params['epochs'])
model_save_interval = self.trainer_params['logging']['model_save_interval']
img_save_interval = self.trainer_params['logging']['img_save_interval']
# Save latest images, every epoch
if epoch > 1:
self.logger.info("Saving log_image")
image_prefix = '{}/latest'.format(self.result_folder)
util_save_log_image_with_label(image_prefix, self.trainer_params['logging']['log_image_params_1'],
self.result_log, labels=['train_score'])
util_save_log_image_with_label(image_prefix, self.trainer_params['logging']['log_image_params_2'],
self.result_log, labels=['train_loss'])
# Save Model
if all_done or (epoch % model_save_interval) == 0:
self.logger.info("Saving trained_model")
checkpoint_dict = {
'epoch': epoch,
'model_state_dict': self.model.state_dict(),
'optimizer_state_dict': self.optimizer.state_dict(),
'scheduler_state_dict': self.scheduler.state_dict(),
'result_log': self.result_log.get_raw_data()
}
torch.save(checkpoint_dict, '{}/checkpoint-{}.pt'.format(self.result_folder, epoch))
# Save Image
if all_done or (epoch % img_save_interval) == 0:
image_prefix = '{}/img/checkpoint-{}'.format(self.result_folder, epoch)
util_save_log_image_with_label(image_prefix, self.trainer_params['logging']['log_image_params_1'],
self.result_log, labels=['train_score'])
util_save_log_image_with_label(image_prefix, self.trainer_params['logging']['log_image_params_2'],
self.result_log, labels=['train_loss'])
# All-done announcement
if all_done:
self.logger.info(" *** Training Done *** ")
self.logger.info("Now, printing log array...")
util_print_log_array(self.logger, self.result_log)
def _train_one_epoch(self, epoch):
score_AM = AverageMeter()
loss_AM = AverageMeter()
train_num_episode = self.trainer_params['train_episodes']
episode = 0
loop_cnt = 0
while episode < train_num_episode:
remaining = train_num_episode - episode
batch_size = min(self.trainer_params['train_batch_size'], remaining)
avg_score, avg_loss = self._train_one_batch(batch_size)
score_AM.update(avg_score, batch_size)
loss_AM.update(avg_loss, batch_size)
episode += batch_size
# Log First 10 Batch, only at the first epoch
if epoch == self.start_epoch:
loop_cnt += 1
if loop_cnt <= 10:
self.logger.info('Epoch {:3d}: Train {:3d}/{:3d}({:1.1f}%) Score: {:.4f}, Loss: {:.4f}'
.format(epoch, episode, train_num_episode, 100. * episode / train_num_episode,
score_AM.avg, loss_AM.avg))
# Log Once, for each epoch
self.logger.info('Epoch {:3d}: Train ({:3.0f}%) Score: {:.4f}, Loss: {:.4f}'
.format(epoch, 100. * episode / train_num_episode,
score_AM.avg, loss_AM.avg))
return score_AM.avg, loss_AM.avg
def _train_one_batch(self, batch_size):
# Prep
###############################################
self.model.train()
self.env.load_problems(batch_size)
reset_state, _, _ = self.env.reset()
self.model.pre_forward(reset_state)
prob_list = torch.zeros(size=(batch_size, self.env.pomo_size, 0))
# shape: (batch, pomo, 0~problem)
# POMO Rollout
###############################################
state, reward, done = self.env.pre_step()
while not done:
selected, prob = self.model(state)
# shape: (batch, pomo)
state, reward, done = self.env.step(selected)
prob_list = torch.cat((prob_list, prob[:, :, None]), dim=2)
# Loss
###############################################
advantage = reward - reward.float().mean(dim=1, keepdims=True)
# shape: (batch, pomo)
log_prob = prob_list.log().sum(dim=2)
# size = (batch, pomo)
loss = -advantage * log_prob # Minus Sign: To Increase REWARD
# shape: (batch, pomo)
loss_mean = loss.mean()
# Score
###############################################
max_pomo_reward, _ = reward.max(dim=1) # get best results from pomo
score_mean = -max_pomo_reward.float().mean() # negative sign to make positive value
# Step & Return
###############################################
self.model.zero_grad()
loss_mean.backward()
self.optimizer.step()
return score_mean.item(), loss_mean.item()
| 8,460 | 41.094527 | 120 | py |
POMO | POMO-master/NEW_py_ver/CVRP/POMO/CVRPTester.py |
import torch
import os
from logging import getLogger
from CVRPEnv import CVRPEnv as Env
from CVRPModel import CVRPModel as Model
from utils.utils import *
class CVRPTester:
def __init__(self,
env_params,
model_params,
tester_params):
# save arguments
self.env_params = env_params
self.model_params = model_params
self.tester_params = tester_params
# result folder, logger
self.logger = getLogger(name='trainer')
self.result_folder = get_result_folder()
# cuda
USE_CUDA = self.tester_params['use_cuda']
if USE_CUDA:
cuda_device_num = self.tester_params['cuda_device_num']
torch.cuda.set_device(cuda_device_num)
device = torch.device('cuda', cuda_device_num)
torch.set_default_tensor_type('torch.cuda.FloatTensor')
else:
device = torch.device('cpu')
torch.set_default_tensor_type('torch.FloatTensor')
self.device = device
# ENV and MODEL
self.env = Env(**self.env_params)
self.model = Model(**self.model_params)
# Restore
model_load = tester_params['model_load']
checkpoint_fullname = '{path}/checkpoint-{epoch}.pt'.format(**model_load)
checkpoint = torch.load(checkpoint_fullname, map_location=device)
self.model.load_state_dict(checkpoint['model_state_dict'])
# utility
self.time_estimator = TimeEstimator()
def run(self):
self.time_estimator.reset()
score_AM = AverageMeter()
aug_score_AM = AverageMeter()
if self.tester_params['test_data_load']['enable']:
self.env.use_saved_problems(self.tester_params['test_data_load']['filename'], self.device)
test_num_episode = self.tester_params['test_episodes']
episode = 0
while episode < test_num_episode:
remaining = test_num_episode - episode
batch_size = min(self.tester_params['test_batch_size'], remaining)
score, aug_score = self._test_one_batch(batch_size)
score_AM.update(score, batch_size)
aug_score_AM.update(aug_score, batch_size)
episode += batch_size
############################
# Logs
############################
elapsed_time_str, remain_time_str = self.time_estimator.get_est_string(episode, test_num_episode)
self.logger.info("episode {:3d}/{:3d}, Elapsed[{}], Remain[{}], score:{:.3f}, aug_score:{:.3f}".format(
episode, test_num_episode, elapsed_time_str, remain_time_str, score, aug_score))
all_done = (episode == test_num_episode)
if all_done:
self.logger.info(" *** Test Done *** ")
self.logger.info(" NO-AUG SCORE: {:.4f} ".format(score_AM.avg))
self.logger.info(" AUGMENTATION SCORE: {:.4f} ".format(aug_score_AM.avg))
def _test_one_batch(self, batch_size):
# Augmentation
###############################################
if self.tester_params['augmentation_enable']:
aug_factor = self.tester_params['aug_factor']
else:
aug_factor = 1
# Ready
###############################################
self.model.eval()
with torch.no_grad():
self.env.load_problems(batch_size, aug_factor)
reset_state, _, _ = self.env.reset()
self.model.pre_forward(reset_state)
# POMO Rollout
###############################################
state, reward, done = self.env.pre_step()
while not done:
selected, _ = self.model(state)
# shape: (batch, pomo)
state, reward, done = self.env.step(selected)
# Return
###############################################
aug_reward = reward.reshape(aug_factor, batch_size, self.env.pomo_size)
# shape: (augmentation, batch, pomo)
max_pomo_reward, _ = aug_reward.max(dim=2) # get best results from pomo
# shape: (augmentation, batch)
no_aug_score = -max_pomo_reward[0, :].float().mean() # negative sign to make positive value
max_aug_pomo_reward, _ = max_pomo_reward.max(dim=0) # get best results from augmentation
# shape: (batch,)
aug_score = -max_aug_pomo_reward.float().mean() # negative sign to make positive value
return no_aug_score.item(), aug_score.item()
| 4,557 | 33.793893 | 115 | py |
POMO | POMO-master/NEW_py_ver/TSP/TSProblemDef.py |
import torch
import numpy as np
def get_random_problems(batch_size, problem_size):
problems = torch.rand(size=(batch_size, problem_size, 2))
# problems.shape: (batch, problem, 2)
return problems
def augment_xy_data_by_8_fold(problems):
# problems.shape: (batch, problem, 2)
x = problems[:, :, [0]]
y = problems[:, :, [1]]
# x,y shape: (batch, problem, 1)
dat1 = torch.cat((x, y), dim=2)
dat2 = torch.cat((1 - x, y), dim=2)
dat3 = torch.cat((x, 1 - y), dim=2)
dat4 = torch.cat((1 - x, 1 - y), dim=2)
dat5 = torch.cat((y, x), dim=2)
dat6 = torch.cat((1 - y, x), dim=2)
dat7 = torch.cat((y, 1 - x), dim=2)
dat8 = torch.cat((1 - y, 1 - x), dim=2)
aug_problems = torch.cat((dat1, dat2, dat3, dat4, dat5, dat6, dat7, dat8), dim=0)
# shape: (8*batch, problem, 2)
return aug_problems | 856 | 26.645161 | 85 | py |
POMO | POMO-master/NEW_py_ver/TSP/POMO/TSPEnv.py |
from dataclasses import dataclass
import torch
from TSProblemDef import get_random_problems, augment_xy_data_by_8_fold
@dataclass
class Reset_State:
problems: torch.Tensor
# shape: (batch, problem, 2)
@dataclass
class Step_State:
BATCH_IDX: torch.Tensor
POMO_IDX: torch.Tensor
# shape: (batch, pomo)
current_node: torch.Tensor = None
# shape: (batch, pomo)
ninf_mask: torch.Tensor = None
# shape: (batch, pomo, node)
class TSPEnv:
def __init__(self, **env_params):
# Const @INIT
####################################
self.env_params = env_params
self.problem_size = env_params['problem_size']
self.pomo_size = env_params['pomo_size']
# Const @Load_Problem
####################################
self.batch_size = None
self.BATCH_IDX = None
self.POMO_IDX = None
# IDX.shape: (batch, pomo)
self.problems = None
# shape: (batch, node, node)
# Dynamic
####################################
self.selected_count = None
self.current_node = None
# shape: (batch, pomo)
self.selected_node_list = None
# shape: (batch, pomo, 0~problem)
def load_problems(self, batch_size, aug_factor=1):
self.batch_size = batch_size
self.problems = get_random_problems(batch_size, self.problem_size)
# problems.shape: (batch, problem, 2)
if aug_factor > 1:
if aug_factor == 8:
self.batch_size = self.batch_size * 8
self.problems = augment_xy_data_by_8_fold(self.problems)
# shape: (8*batch, problem, 2)
else:
raise NotImplementedError
self.BATCH_IDX = torch.arange(self.batch_size)[:, None].expand(self.batch_size, self.pomo_size)
self.POMO_IDX = torch.arange(self.pomo_size)[None, :].expand(self.batch_size, self.pomo_size)
def reset(self):
self.selected_count = 0
self.current_node = None
# shape: (batch, pomo)
self.selected_node_list = torch.zeros((self.batch_size, self.pomo_size, 0), dtype=torch.long)
# shape: (batch, pomo, 0~problem)
# CREATE STEP STATE
self.step_state = Step_State(BATCH_IDX=self.BATCH_IDX, POMO_IDX=self.POMO_IDX)
self.step_state.ninf_mask = torch.zeros((self.batch_size, self.pomo_size, self.problem_size))
# shape: (batch, pomo, problem)
reward = None
done = False
return Reset_State(self.problems), reward, done
def pre_step(self):
reward = None
done = False
return self.step_state, reward, done
def step(self, selected):
# selected.shape: (batch, pomo)
self.selected_count += 1
self.current_node = selected
# shape: (batch, pomo)
self.selected_node_list = torch.cat((self.selected_node_list, self.current_node[:, :, None]), dim=2)
# shape: (batch, pomo, 0~problem)
# UPDATE STEP STATE
self.step_state.current_node = self.current_node
# shape: (batch, pomo)
self.step_state.ninf_mask[self.BATCH_IDX, self.POMO_IDX, self.current_node] = float('-inf')
# shape: (batch, pomo, node)
# returning values
done = (self.selected_count == self.problem_size)
if done:
reward = -self._get_travel_distance() # note the minus sign!
else:
reward = None
return self.step_state, reward, done
def _get_travel_distance(self):
gathering_index = self.selected_node_list.unsqueeze(3).expand(self.batch_size, -1, self.problem_size, 2)
# shape: (batch, pomo, problem, 2)
seq_expanded = self.problems[:, None, :, :].expand(self.batch_size, self.pomo_size, self.problem_size, 2)
ordered_seq = seq_expanded.gather(dim=2, index=gathering_index)
# shape: (batch, pomo, problem, 2)
rolled_seq = ordered_seq.roll(dims=2, shifts=-1)
segment_lengths = ((ordered_seq-rolled_seq)**2).sum(3).sqrt()
# shape: (batch, pomo, problem)
travel_distances = segment_lengths.sum(2)
# shape: (batch, pomo)
return travel_distances
| 4,228 | 32.039063 | 113 | py |
POMO | POMO-master/NEW_py_ver/TSP/POMO/TSPModel.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
class TSPModel(nn.Module):
def __init__(self, **model_params):
super().__init__()
self.model_params = model_params
self.encoder = TSP_Encoder(**model_params)
self.decoder = TSP_Decoder(**model_params)
self.encoded_nodes = None
# shape: (batch, problem, EMBEDDING_DIM)
def pre_forward(self, reset_state):
self.encoded_nodes = self.encoder(reset_state.problems)
# shape: (batch, problem, EMBEDDING_DIM)
self.decoder.set_kv(self.encoded_nodes)
def forward(self, state):
batch_size = state.BATCH_IDX.size(0)
pomo_size = state.BATCH_IDX.size(1)
if state.current_node is None:
selected = torch.arange(pomo_size)[None, :].expand(batch_size, pomo_size)
prob = torch.ones(size=(batch_size, pomo_size))
encoded_first_node = _get_encoding(self.encoded_nodes, selected)
# shape: (batch, pomo, embedding)
self.decoder.set_q1(encoded_first_node)
else:
encoded_last_node = _get_encoding(self.encoded_nodes, state.current_node)
# shape: (batch, pomo, embedding)
probs = self.decoder(encoded_last_node, ninf_mask=state.ninf_mask)
# shape: (batch, pomo, problem)
if self.training or self.model_params['eval_type'] == 'softmax':
while True:
selected = probs.reshape(batch_size * pomo_size, -1).multinomial(1) \
.squeeze(dim=1).reshape(batch_size, pomo_size)
# shape: (batch, pomo)
prob = probs[state.BATCH_IDX, state.POMO_IDX, selected] \
.reshape(batch_size, pomo_size)
# shape: (batch, pomo)
if (prob != 0).all():
break
else:
selected = probs.argmax(dim=2)
# shape: (batch, pomo)
prob = None
return selected, prob
def _get_encoding(encoded_nodes, node_index_to_pick):
# encoded_nodes.shape: (batch, problem, embedding)
# node_index_to_pick.shape: (batch, pomo)
batch_size = node_index_to_pick.size(0)
pomo_size = node_index_to_pick.size(1)
embedding_dim = encoded_nodes.size(2)
gathering_index = node_index_to_pick[:, :, None].expand(batch_size, pomo_size, embedding_dim)
# shape: (batch, pomo, embedding)
picked_nodes = encoded_nodes.gather(dim=1, index=gathering_index)
# shape: (batch, pomo, embedding)
return picked_nodes
########################################
# ENCODER
########################################
class TSP_Encoder(nn.Module):
def __init__(self, **model_params):
super().__init__()
self.model_params = model_params
embedding_dim = self.model_params['embedding_dim']
encoder_layer_num = self.model_params['encoder_layer_num']
self.embedding = nn.Linear(2, embedding_dim)
self.layers = nn.ModuleList([EncoderLayer(**model_params) for _ in range(encoder_layer_num)])
def forward(self, data):
# data.shape: (batch, problem, 2)
embedded_input = self.embedding(data)
# shape: (batch, problem, embedding)
out = embedded_input
for layer in self.layers:
out = layer(out)
return out
class EncoderLayer(nn.Module):
def __init__(self, **model_params):
super().__init__()
self.model_params = model_params
embedding_dim = self.model_params['embedding_dim']
head_num = self.model_params['head_num']
qkv_dim = self.model_params['qkv_dim']
self.Wq = nn.Linear(embedding_dim, head_num * qkv_dim, bias=False)
self.Wk = nn.Linear(embedding_dim, head_num * qkv_dim, bias=False)
self.Wv = nn.Linear(embedding_dim, head_num * qkv_dim, bias=False)
self.multi_head_combine = nn.Linear(head_num * qkv_dim, embedding_dim)
self.addAndNormalization1 = Add_And_Normalization_Module(**model_params)
self.feedForward = Feed_Forward_Module(**model_params)
self.addAndNormalization2 = Add_And_Normalization_Module(**model_params)
def forward(self, input1):
# input.shape: (batch, problem, EMBEDDING_DIM)
head_num = self.model_params['head_num']
q = reshape_by_heads(self.Wq(input1), head_num=head_num)
k = reshape_by_heads(self.Wk(input1), head_num=head_num)
v = reshape_by_heads(self.Wv(input1), head_num=head_num)
# q shape: (batch, HEAD_NUM, problem, KEY_DIM)
out_concat = multi_head_attention(q, k, v)
# shape: (batch, problem, HEAD_NUM*KEY_DIM)
multi_head_out = self.multi_head_combine(out_concat)
# shape: (batch, problem, EMBEDDING_DIM)
out1 = self.addAndNormalization1(input1, multi_head_out)
out2 = self.feedForward(out1)
out3 = self.addAndNormalization2(out1, out2)
return out3
# shape: (batch, problem, EMBEDDING_DIM)
########################################
# DECODER
########################################
class TSP_Decoder(nn.Module):
def __init__(self, **model_params):
super().__init__()
self.model_params = model_params
embedding_dim = self.model_params['embedding_dim']
head_num = self.model_params['head_num']
qkv_dim = self.model_params['qkv_dim']
self.Wq_first = nn.Linear(embedding_dim, head_num * qkv_dim, bias=False)
self.Wq_last = nn.Linear(embedding_dim, head_num * qkv_dim, bias=False)
self.Wk = nn.Linear(embedding_dim, head_num * qkv_dim, bias=False)
self.Wv = nn.Linear(embedding_dim, head_num * qkv_dim, bias=False)
self.multi_head_combine = nn.Linear(head_num * qkv_dim, embedding_dim)
self.k = None # saved key, for multi-head attention
self.v = None # saved value, for multi-head_attention
self.single_head_key = None # saved, for single-head attention
self.q_first = None # saved q1, for multi-head attention
def set_kv(self, encoded_nodes):
# encoded_nodes.shape: (batch, problem, embedding)
head_num = self.model_params['head_num']
self.k = reshape_by_heads(self.Wk(encoded_nodes), head_num=head_num)
self.v = reshape_by_heads(self.Wv(encoded_nodes), head_num=head_num)
# shape: (batch, head_num, pomo, qkv_dim)
self.single_head_key = encoded_nodes.transpose(1, 2)
# shape: (batch, embedding, problem)
def set_q1(self, encoded_q1):
# encoded_q.shape: (batch, n, embedding) # n can be 1 or pomo
head_num = self.model_params['head_num']
self.q_first = reshape_by_heads(self.Wq_first(encoded_q1), head_num=head_num)
# shape: (batch, head_num, n, qkv_dim)
def forward(self, encoded_last_node, ninf_mask):
# encoded_last_node.shape: (batch, pomo, embedding)
# ninf_mask.shape: (batch, pomo, problem)
head_num = self.model_params['head_num']
# Multi-Head Attention
#######################################################
q_last = reshape_by_heads(self.Wq_last(encoded_last_node), head_num=head_num)
# shape: (batch, head_num, pomo, qkv_dim)
q = self.q_first + q_last
# shape: (batch, head_num, pomo, qkv_dim)
out_concat = multi_head_attention(q, self.k, self.v, rank3_ninf_mask=ninf_mask)
# shape: (batch, pomo, head_num*qkv_dim)
mh_atten_out = self.multi_head_combine(out_concat)
# shape: (batch, pomo, embedding)
# Single-Head Attention, for probability calculation
#######################################################
score = torch.matmul(mh_atten_out, self.single_head_key)
# shape: (batch, pomo, problem)
sqrt_embedding_dim = self.model_params['sqrt_embedding_dim']
logit_clipping = self.model_params['logit_clipping']
score_scaled = score / sqrt_embedding_dim
# shape: (batch, pomo, problem)
score_clipped = logit_clipping * torch.tanh(score_scaled)
score_masked = score_clipped + ninf_mask
probs = F.softmax(score_masked, dim=2)
# shape: (batch, pomo, problem)
return probs
########################################
# NN SUB CLASS / FUNCTIONS
########################################
def reshape_by_heads(qkv, head_num):
# q.shape: (batch, n, head_num*key_dim) : n can be either 1 or PROBLEM_SIZE
batch_s = qkv.size(0)
n = qkv.size(1)
q_reshaped = qkv.reshape(batch_s, n, head_num, -1)
# shape: (batch, n, head_num, key_dim)
q_transposed = q_reshaped.transpose(1, 2)
# shape: (batch, head_num, n, key_dim)
return q_transposed
def multi_head_attention(q, k, v, rank2_ninf_mask=None, rank3_ninf_mask=None):
# q shape: (batch, head_num, n, key_dim) : n can be either 1 or PROBLEM_SIZE
# k,v shape: (batch, head_num, problem, key_dim)
# rank2_ninf_mask.shape: (batch, problem)
# rank3_ninf_mask.shape: (batch, group, problem)
batch_s = q.size(0)
head_num = q.size(1)
n = q.size(2)
key_dim = q.size(3)
input_s = k.size(2)
score = torch.matmul(q, k.transpose(2, 3))
# shape: (batch, head_num, n, problem)
score_scaled = score / torch.sqrt(torch.tensor(key_dim, dtype=torch.float))
if rank2_ninf_mask is not None:
score_scaled = score_scaled + rank2_ninf_mask[:, None, None, :].expand(batch_s, head_num, n, input_s)
if rank3_ninf_mask is not None:
score_scaled = score_scaled + rank3_ninf_mask[:, None, :, :].expand(batch_s, head_num, n, input_s)
weights = nn.Softmax(dim=3)(score_scaled)
# shape: (batch, head_num, n, problem)
out = torch.matmul(weights, v)
# shape: (batch, head_num, n, key_dim)
out_transposed = out.transpose(1, 2)
# shape: (batch, n, head_num, key_dim)
out_concat = out_transposed.reshape(batch_s, n, head_num * key_dim)
# shape: (batch, n, head_num*key_dim)
return out_concat
class Add_And_Normalization_Module(nn.Module):
def __init__(self, **model_params):
super().__init__()
embedding_dim = model_params['embedding_dim']
self.norm = nn.InstanceNorm1d(embedding_dim, affine=True, track_running_stats=False)
def forward(self, input1, input2):
# input.shape: (batch, problem, embedding)
added = input1 + input2
# shape: (batch, problem, embedding)
transposed = added.transpose(1, 2)
# shape: (batch, embedding, problem)
normalized = self.norm(transposed)
# shape: (batch, embedding, problem)
back_trans = normalized.transpose(1, 2)
# shape: (batch, problem, embedding)
return back_trans
class Feed_Forward_Module(nn.Module):
def __init__(self, **model_params):
super().__init__()
embedding_dim = model_params['embedding_dim']
ff_hidden_dim = model_params['ff_hidden_dim']
self.W1 = nn.Linear(embedding_dim, ff_hidden_dim)
self.W2 = nn.Linear(ff_hidden_dim, embedding_dim)
def forward(self, input1):
# input.shape: (batch, problem, embedding)
return self.W2(F.relu(self.W1(input1)))
| 11,293 | 34.074534 | 109 | py |
POMO | POMO-master/NEW_py_ver/TSP/POMO/TSPTester.py |
import torch
import os
from logging import getLogger
from TSPEnv import TSPEnv as Env
from TSPModel import TSPModel as Model
from utils.utils import *
class TSPTester:
def __init__(self,
env_params,
model_params,
tester_params):
# save arguments
self.env_params = env_params
self.model_params = model_params
self.tester_params = tester_params
# result folder, logger
self.logger = getLogger(name='trainer')
self.result_folder = get_result_folder()
# cuda
USE_CUDA = self.tester_params['use_cuda']
if USE_CUDA:
cuda_device_num = self.tester_params['cuda_device_num']
torch.cuda.set_device(cuda_device_num)
device = torch.device('cuda', cuda_device_num)
torch.set_default_tensor_type('torch.cuda.FloatTensor')
else:
device = torch.device('cpu')
torch.set_default_tensor_type('torch.FloatTensor')
self.device = device
# ENV and MODEL
self.env = Env(**self.env_params)
self.model = Model(**self.model_params)
# Restore
model_load = tester_params['model_load']
checkpoint_fullname = '{path}/checkpoint-{epoch}.pt'.format(**model_load)
checkpoint = torch.load(checkpoint_fullname, map_location=device)
self.model.load_state_dict(checkpoint['model_state_dict'])
# utility
self.time_estimator = TimeEstimator()
def run(self):
self.time_estimator.reset()
score_AM = AverageMeter()
aug_score_AM = AverageMeter()
test_num_episode = self.tester_params['test_episodes']
episode = 0
while episode < test_num_episode:
remaining = test_num_episode - episode
batch_size = min(self.tester_params['test_batch_size'], remaining)
score, aug_score = self._test_one_batch(batch_size)
score_AM.update(score, batch_size)
aug_score_AM.update(aug_score, batch_size)
episode += batch_size
############################
# Logs
############################
elapsed_time_str, remain_time_str = self.time_estimator.get_est_string(episode, test_num_episode)
self.logger.info("episode {:3d}/{:3d}, Elapsed[{}], Remain[{}], score:{:.3f}, aug_score:{:.3f}".format(
episode, test_num_episode, elapsed_time_str, remain_time_str, score, aug_score))
all_done = (episode == test_num_episode)
if all_done:
self.logger.info(" *** Test Done *** ")
self.logger.info(" NO-AUG SCORE: {:.4f} ".format(score_AM.avg))
self.logger.info(" AUGMENTATION SCORE: {:.4f} ".format(aug_score_AM.avg))
def _test_one_batch(self, batch_size):
# Augmentation
###############################################
if self.tester_params['augmentation_enable']:
aug_factor = self.tester_params['aug_factor']
else:
aug_factor = 1
# Ready
###############################################
self.model.eval()
with torch.no_grad():
self.env.load_problems(batch_size, aug_factor)
reset_state, _, _ = self.env.reset()
self.model.pre_forward(reset_state)
# POMO Rollout
###############################################
state, reward, done = self.env.pre_step()
while not done:
selected, _ = self.model(state)
# shape: (batch, pomo)
state, reward, done = self.env.step(selected)
# Return
###############################################
aug_reward = reward.reshape(aug_factor, batch_size, self.env.pomo_size)
# shape: (augmentation, batch, pomo)
max_pomo_reward, _ = aug_reward.max(dim=2) # get best results from pomo
# shape: (augmentation, batch)
no_aug_score = -max_pomo_reward[0, :].float().mean() # negative sign to make positive value
max_aug_pomo_reward, _ = max_pomo_reward.max(dim=0) # get best results from augmentation
# shape: (batch,)
aug_score = -max_aug_pomo_reward.float().mean() # negative sign to make positive value
return no_aug_score.item(), aug_score.item()
| 4,389 | 33.296875 | 115 | py |
POMO | POMO-master/NEW_py_ver/TSP/POMO/TSPTrainer.py |
import torch
from logging import getLogger
from TSPEnv import TSPEnv as Env
from TSPModel import TSPModel as Model
from torch.optim import Adam as Optimizer
from torch.optim.lr_scheduler import MultiStepLR as Scheduler
from utils.utils import *
class TSPTrainer:
def __init__(self,
env_params,
model_params,
optimizer_params,
trainer_params):
# save arguments
self.env_params = env_params
self.model_params = model_params
self.optimizer_params = optimizer_params
self.trainer_params = trainer_params
# result folder, logger
self.logger = getLogger(name='trainer')
self.result_folder = get_result_folder()
self.result_log = LogData()
# cuda
USE_CUDA = self.trainer_params['use_cuda']
if USE_CUDA:
cuda_device_num = self.trainer_params['cuda_device_num']
torch.cuda.set_device(cuda_device_num)
device = torch.device('cuda', cuda_device_num)
torch.set_default_tensor_type('torch.cuda.FloatTensor')
else:
device = torch.device('cpu')
torch.set_default_tensor_type('torch.FloatTensor')
# Main Components
self.model = Model(**self.model_params)
self.env = Env(**self.env_params)
self.optimizer = Optimizer(self.model.parameters(), **self.optimizer_params['optimizer'])
self.scheduler = Scheduler(self.optimizer, **self.optimizer_params['scheduler'])
# Restore
self.start_epoch = 1
model_load = trainer_params['model_load']
if model_load['enable']:
checkpoint_fullname = '{path}/checkpoint-{epoch}.pt'.format(**model_load)
checkpoint = torch.load(checkpoint_fullname, map_location=device)
self.model.load_state_dict(checkpoint['model_state_dict'])
self.start_epoch = 1 + model_load['epoch']
self.result_log.set_raw_data(checkpoint['result_log'])
self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
self.scheduler.last_epoch = model_load['epoch']-1
self.logger.info('Saved Model Loaded !!')
# utility
self.time_estimator = TimeEstimator()
def run(self):
self.time_estimator.reset(self.start_epoch)
for epoch in range(self.start_epoch, self.trainer_params['epochs']+1):
self.logger.info('=================================================================')
# LR Decay
self.scheduler.step()
# Train
train_score, train_loss = self._train_one_epoch(epoch)
self.result_log.append('train_score', epoch, train_score)
self.result_log.append('train_loss', epoch, train_loss)
############################
# Logs & Checkpoint
############################
elapsed_time_str, remain_time_str = self.time_estimator.get_est_string(epoch, self.trainer_params['epochs'])
self.logger.info("Epoch {:3d}/{:3d}: Time Est.: Elapsed[{}], Remain[{}]".format(
epoch, self.trainer_params['epochs'], elapsed_time_str, remain_time_str))
all_done = (epoch == self.trainer_params['epochs'])
model_save_interval = self.trainer_params['logging']['model_save_interval']
img_save_interval = self.trainer_params['logging']['img_save_interval']
if epoch > 1: # save latest images, every epoch
self.logger.info("Saving log_image")
image_prefix = '{}/latest'.format(self.result_folder)
util_save_log_image_with_label(image_prefix, self.trainer_params['logging']['log_image_params_1'],
self.result_log, labels=['train_score'])
util_save_log_image_with_label(image_prefix, self.trainer_params['logging']['log_image_params_2'],
self.result_log, labels=['train_loss'])
if all_done or (epoch % model_save_interval) == 0:
self.logger.info("Saving trained_model")
checkpoint_dict = {
'epoch': epoch,
'model_state_dict': self.model.state_dict(),
'optimizer_state_dict': self.optimizer.state_dict(),
'scheduler_state_dict': self.scheduler.state_dict(),
'result_log': self.result_log.get_raw_data()
}
torch.save(checkpoint_dict, '{}/checkpoint-{}.pt'.format(self.result_folder, epoch))
if all_done or (epoch % img_save_interval) == 0:
image_prefix = '{}/img/checkpoint-{}'.format(self.result_folder, epoch)
util_save_log_image_with_label(image_prefix, self.trainer_params['logging']['log_image_params_1'],
self.result_log, labels=['train_score'])
util_save_log_image_with_label(image_prefix, self.trainer_params['logging']['log_image_params_2'],
self.result_log, labels=['train_loss'])
if all_done:
self.logger.info(" *** Training Done *** ")
self.logger.info("Now, printing log array...")
util_print_log_array(self.logger, self.result_log)
def _train_one_epoch(self, epoch):
score_AM = AverageMeter()
loss_AM = AverageMeter()
train_num_episode = self.trainer_params['train_episodes']
episode = 0
loop_cnt = 0
while episode < train_num_episode:
remaining = train_num_episode - episode
batch_size = min(self.trainer_params['train_batch_size'], remaining)
avg_score, avg_loss = self._train_one_batch(batch_size)
score_AM.update(avg_score, batch_size)
loss_AM.update(avg_loss, batch_size)
episode += batch_size
# Log First 10 Batch, only at the first epoch
if epoch == self.start_epoch:
loop_cnt += 1
if loop_cnt <= 10:
self.logger.info('Epoch {:3d}: Train {:3d}/{:3d}({:1.1f}%) Score: {:.4f}, Loss: {:.4f}'
.format(epoch, episode, train_num_episode, 100. * episode / train_num_episode,
score_AM.avg, loss_AM.avg))
# Log Once, for each epoch
self.logger.info('Epoch {:3d}: Train ({:3.0f}%) Score: {:.4f}, Loss: {:.4f}'
.format(epoch, 100. * episode / train_num_episode,
score_AM.avg, loss_AM.avg))
return score_AM.avg, loss_AM.avg
def _train_one_batch(self, batch_size):
# Prep
###############################################
self.model.train()
self.env.load_problems(batch_size)
reset_state, _, _ = self.env.reset()
self.model.pre_forward(reset_state)
prob_list = torch.zeros(size=(batch_size, self.env.pomo_size, 0))
# shape: (batch, pomo, 0~problem)
# POMO Rollout
###############################################
state, reward, done = self.env.pre_step()
while not done:
selected, prob = self.model(state)
# shape: (batch, pomo)
state, reward, done = self.env.step(selected)
prob_list = torch.cat((prob_list, prob[:, :, None]), dim=2)
# Loss
###############################################
advantage = reward - reward.float().mean(dim=1, keepdims=True)
# shape: (batch, pomo)
log_prob = prob_list.log().sum(dim=2)
# size = (batch, pomo)
loss = -advantage * log_prob # Minus Sign: To Increase REWARD
# shape: (batch, pomo)
loss_mean = loss.mean()
# Score
###############################################
max_pomo_reward, _ = reward.max(dim=1) # get best results from pomo
score_mean = -max_pomo_reward.float().mean() # negative sign to make positive value
# Step & Return
###############################################
self.model.zero_grad()
loss_mean.backward()
self.optimizer.step()
return score_mean.item(), loss_mean.item()
| 8,357 | 41.642857 | 120 | py |
exoplanet-atlas | exoplanet-atlas-main/docs/conf.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# exoplanet-atlas documentation build configuration file, created by
# sphinx-quickstart on Sun Nov 17 17:31:38 2019.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.viewcode",
"sphinx.ext.githubpages",
"sphinx.ext.mathjax",
"nbsphinx",
]
html_sidebars = {
"**": ["globaltoc.html", "relations.html", "sourcelink.html", "searchbox.html"]
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "exoplanet-atlas"
copyright = "2019, Zach Berta-Thompson"
author = "Zach Berta-Thompson"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# import exoatlas
version = "0.2.6" # exoatlas.__version__
# The full version, including alpha/beta/rc tags.
release = "0.2.6" # exoatlas.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "exoplanet-atlasdoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"exoplanet-atlas.tex",
"exoplanet-atlas Documentation",
"Zach Berta-Thompson",
"manual",
),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, "exoplanet-atlas", "exoplanet-atlas Documentation", [author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"exoplanet-atlas",
"exoplanet-atlas Documentation",
author,
"exoplanet-atlas",
"One line description of project.",
"Miscellaneous",
),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {"https://docs.python.org/": None}
| 5,305 | 29.147727 | 83 | py |
animeGAN | animeGAN-master/main.py | from __future__ import print_function
import os
import time
import random
import argparse
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
from torch.autograd import Variable
### load project files
import models
from models import weights_init
parser = argparse.ArgumentParser()
parser.add_argument('--dataRoot', required=True, help='path to dataset')
parser.add_argument('--workers', type=int, default=2, help='number of data loading workers')
parser.add_argument('--batchSize', type=int, default=64, help='input batch size')
parser.add_argument('--imageSize', type=int, default=64, help='the height / width of the input image to network')
parser.add_argument('--nz', type=int, default=100, help='size of the latent z vector')
parser.add_argument('--ngf', type=int, default=64)
parser.add_argument('--ndf', type=int, default=64)
parser.add_argument('--niter', type=int, default=25, help='number of epochs to train for')
parser.add_argument('--lr', type=float, default=0.0002, help='learning rate, default=0.0002')
parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')
parser.add_argument('--cuda' , action='store_true', help='enables cuda')
parser.add_argument('--ngpu' , type=int, default=1, help='number of GPUs to use')
parser.add_argument('--netG', default='', help="path to netG (to continue training)")
parser.add_argument('--netD', default='', help="path to netD (to continue training)")
parser.add_argument('--outDir', default='.', help='folder to output images and model checkpoints')
parser.add_argument('--model', type=int, default=1, help='1 for dcgan, 2 for illustrationGAN-like-GAN')
parser.add_argument('--d_labelSmooth', type=float, default=0, help='for D, use soft label "1-labelSmooth" for real samples')
parser.add_argument('--n_extra_layers_d', type=int, default=0, help='number of extra conv layers in D')
parser.add_argument('--n_extra_layers_g', type=int, default=1, help='number of extra conv layers in G')
parser.add_argument('--binary', action='store_true', help='z from bernoulli distribution, with prob=0.5')
# simply prefer this way
# arg_list = [
# '--dataRoot', '/home/jielei/data/danbooru-faces',
# '--workers', '12',
# '--batchSize', '128',
# '--imageSize', '64',
# '--nz', '100',
# '--ngf', '64',
# '--ndf', '64',
# '--niter', '80',
# '--lr', '0.0002',
# '--beta1', '0.5',
# '--cuda',
# '--ngpu', '1',
# '--netG', '',
# '--netD', '',
# '--outDir', './results',
# '--model', '1',
# '--d_labelSmooth', '0.1', # 0.25 from imporved-GAN paper
# '--n_extra_layers_d', '0',
# '--n_extra_layers_g', '1', # in the sense that generator should be more powerful
# ]
args = parser.parse_args()
# opt = parser.parse_args(arg_list)
print(opt)
try:
os.makedirs(opt.outDir)
except OSError:
pass
opt.manualSeed = random.randint(1,10000) # fix seed, a scalar
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
cudnn.benchmark = True
if torch.cuda.is_available() and not opt.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
nc = 3
ngpu = opt.ngpu
nz = opt.nz
ngf = opt.ngf
ndf = opt.ndf
n_extra_d = opt.n_extra_layers_d
n_extra_g = opt.n_extra_layers_g
dataset = dset.ImageFolder(
root=opt.dataRoot,
transform=transforms.Compose([
transforms.Scale(opt.imageSize),
# transforms.CenterCrop(opt.imageSize),
transforms.ToTensor(),
transforms.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5)), # bring images to (-1,1)
])
)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batchSize,
shuffle=True, num_workers=opt.workers)
# load models
if opt.model == 1:
netG = models._netG_1(ngpu, nz, nc, ngf, n_extra_g)
netD = models._netD_1(ngpu, nz, nc, ndf, n_extra_d)
elif opt.model == 2:
netG = models._netG_2(ngpu, nz, nc, ngf)
netD = models._netD_2(ngpu, nz, nc, ndf)
netG.apply(weights_init)
if opt.netG != '':
netG.load_state_dict(torch.load(opt.netG))
print(netG)
netD.apply(weights_init)
if opt.netD != '':
netD.load_state_dict(torch.load(opt.netD))
print(netD)
criterion = nn.BCELoss()
criterion_MSE = nn.MSELoss()
input = torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize)
noise = torch.FloatTensor(opt.batchSize, nz, 1, 1)
if opt.binary:
bernoulli_prob = torch.FloatTensor(opt.batchSize, nz, 1, 1).fill_(0.5)
fixed_noise = torch.bernoulli(bernoulli_prob)
else:
fixed_noise = torch.FloatTensor(opt.batchSize, nz, 1, 1).normal_(0, 1)
label = torch.FloatTensor(opt.batchSize)
real_label = 1
fake_label = 0
if opt.cuda:
netD.cuda()
netG.cuda()
criterion.cuda()
criterion_MSE.cuda()
input, label = input.cuda(), label.cuda()
noise, fixed_noise = noise.cuda(), fixed_noise.cuda()
input = Variable(input)
label = Variable(label)
noise = Variable(noise)
fixed_noise = Variable(fixed_noise)
# setup optimizer
optimizerD = optim.Adam(netD.parameters(), lr = opt.lr, betas = (opt.beta1, 0.999))
optimizerG = optim.Adam(netG.parameters(), lr = opt.lr, betas = (opt.beta1, 0.999))
for epoch in range(opt.niter):
for i, data in enumerate(dataloader, 0):
start_iter = time.time()
############################
# (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
###########################
# train with real
netD.zero_grad()
real_cpu, _ = data
batch_size = real_cpu.size(0)
input.data.resize_(real_cpu.size()).copy_(real_cpu)
label.data.resize_(batch_size).fill_(real_label - opt.d_labelSmooth) # use smooth label for discriminator
output = netD(input)
errD_real = criterion(output, label)
errD_real.backward()
D_x = output.data.mean()
# train with fake
noise.data.resize_(batch_size, nz, 1, 1)
if opt.binary:
bernoulli_prob.resize_(noise.data.size())
noise.data.copy_(2*(torch.bernoulli(bernoulli_prob)-0.5))
else:
noise.data.normal_(0, 1)
fake,z_prediction = netG(noise)
label.data.fill_(fake_label)
output = netD(fake.detach()) # add ".detach()" to avoid backprop through G
errD_fake = criterion(output, label)
errD_fake.backward() # gradients for fake/real will be accumulated
D_G_z1 = output.data.mean()
errD = errD_real + errD_fake
optimizerD.step() # .step() can be called once the gradients are computed
############################
# (2) Update G network: maximize log(D(G(z)))
###########################
netG.zero_grad()
label.data.fill_(real_label) # fake labels are real for generator cost
output = netD(fake)
errG = criterion(output, label)
errG.backward(retain_variables=True) # True if backward through the graph for the second time
if opt.model == 2: # with z predictor
errG_z = criterion_MSE(z_prediction, noise)
errG_z.backward()
D_G_z2 = output.data.mean()
optimizerG.step()
end_iter = time.time()
print('[%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f D(x): %.4f D(G(z)): %.4f / %.4f Elapsed %.2f s'
% (epoch, opt.niter, i, len(dataloader),
errD.data[0], errG.data[0], D_x, D_G_z1, D_G_z2, end_iter-start_iter))
if i % 100 == 0:
# the first 64 samples from the mini-batch are saved.
vutils.save_image(real_cpu[0:64,:,:,:],
'%s/real_samples.png' % opt.outDir, nrow=8)
fake,_ = netG(fixed_noise)
vutils.save_image(fake.data[0:64,:,:,:],
'%s/fake_samples_epoch_%03d.png' % (opt.outDir, epoch), nrow=8)
if epoch % 1 == 0:
# do checkpointing
torch.save(netG.state_dict(), '%s/netG_epoch_%d.pth' % (opt.outDir, epoch))
torch.save(netD.state_dict(), '%s/netD_epoch_%d.pth' % (opt.outDir, epoch))
| 8,259 | 37.418605 | 124 | py |
animeGAN | animeGAN-master/models.py | import torch
import torch.nn as nn
import torch.nn.parallel
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
# DCGAN model, fully convolutional architecture
class _netG_1(nn.Module):
def __init__(self, ngpu, nz, nc , ngf, n_extra_layers_g):
super(_netG_1, self).__init__()
self.ngpu = ngpu
#self.nz = nz
#self.nc = nc
#self.ngf = ngf
main = nn.Sequential(
# input is Z, going into a convolution
# state size. nz x 1 x 1
nn.ConvTranspose2d( nz, ngf * 8, 4, 1, 0, bias=False),
nn.BatchNorm2d(ngf * 8),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ngf*8) x 4 x 4
nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 4),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ngf*4) x 8 x 8
nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 2),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ngf*2) x 16 x 16
nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ngf) x 32 x 32
)
# Extra layers
for t in range(n_extra_layers_g):
main.add_module('extra-layers-{0}.{1}.conv'.format(t, ngf),
nn.Conv2d(ngf, ngf, 3, 1, 1, bias=False))
main.add_module('extra-layers-{0}.{1}.batchnorm'.format(t, ngf),
nn.BatchNorm2d(ngf))
main.add_module('extra-layers-{0}.{1}.relu'.format(t, ngf),
nn.LeakyReLU(0.2, inplace=True))
main.add_module('final_layer.deconv',
nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False)) # 5,3,1 for 96x96
main.add_module('final_layer.tanh',
nn.Tanh())
# state size. (nc) x 96 x 96
self.main = main
def forward(self, input):
gpu_ids = None
if isinstance(input.data, torch.cuda.FloatTensor) and self.ngpu > 1:
gpu_ids = range(self.ngpu)
return nn.parallel.data_parallel(self.main, input, gpu_ids), 0
class _netD_1(nn.Module):
def __init__(self, ngpu, nz, nc, ndf, n_extra_layers_d):
super(_netD_1, self).__init__()
self.ngpu = ngpu
main = nn.Sequential(
# input is (nc) x 96 x 96
nn.Conv2d(nc, ndf, 4, 2, 1, bias=False), # 5,3,1 for 96x96
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf) x 32 x 32
nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 2),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*2) x 16 x 16
nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*4) x 8 x 8
nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*8) x 4 x 4
)
# Extra layers
for t in range(n_extra_layers_d):
main.add_module('extra-layers-{0}.{1}.conv'.format(t, ndf * 8),
nn.Conv2d(ndf * 8, ndf * 8, 3, 1, 1, bias=False))
main.add_module('extra-layers-{0}.{1}.batchnorm'.format(t, ndf * 8),
nn.BatchNorm2d(ndf * 8))
main.add_module('extra-layers-{0}.{1}.relu'.format(t, ndf * 8),
nn.LeakyReLU(0.2, inplace=True))
main.add_module('final_layers.conv', nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False))
main.add_module('final_layers.sigmoid', nn.Sigmoid())
# state size. 1 x 1 x 1
self.main = main
def forward(self, input):
gpu_ids = None
if isinstance(input.data, torch.cuda.FloatTensor) and self.ngpu > 1:
gpu_ids = range(self.ngpu)
output = nn.parallel.data_parallel(self.main, input, gpu_ids)
return output.view(-1, 1)
class _netD_2(nn.Module):
def __init__(self, ngpu, nz, nc , ndf):
super(_netD_2, self).__init__()
self.ngpu = ngpu
self.convs = nn.Sequential(
# input is (nc) x 96 x 96
nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf) x 32 x 32
nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 2),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*2) x 16 x 16
nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*4) x 8 x 8
nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*8) x 4 x 4
nn.Conv2d(ndf * 8, 1024, 4, 1, 0, bias=False),
nn.LeakyReLU(inplace=True),
nn.Dropout(0.5),
# state size. 1024 x 1 x 1
)
self.fcs = nn.Sequential(
nn.Linear(1024, 1024),
nn.LeakyReLU(inplace=True),
nn.Dropout(0.5),
nn.Linear(1024, 1),
nn.Sigmoid()
)
def forward(self, input):
gpu_ids = None
if isinstance(input.data, torch.cuda.FloatTensor) and self.ngpu > 1:
gpu_ids = range(self.ngpu)
output = nn.parallel.data_parallel(self.convs, input, gpu_ids)
output = self.fcs(output.view(-1,1024))
return output.view(-1, 1)
# with z decoder and fc layers
class _netG_2(nn.Module):
def __init__(self, ngpu, nz, nc , ngf):
super(_netG_2, self).__init__()
self.ngpu = ngpu
self.nz = nz
self.fcs = nn.Sequential(
# input is Z, going into a convolution
# state size. nz x 1 x 1
nn.Linear(nz, 1024),
nn.ReLU(inplace=True),
nn.Dropout(0.5),
nn.Linear(1024, 1024),
nn.ReLU(inplace=True),
nn.Dropout(0.5),
)
self.decode_fcs = nn.Sequential(
nn.Linear(1024, 1024),
nn.ReLU(inplace=True),
nn.Dropout(0.5),
nn.Linear(1024, nz),
)
self.convs = nn.Sequential(
# 1024x1x1
nn.ConvTranspose2d(1024, ngf * 8, 4, 1, 0, bias=False),
nn.BatchNorm2d(ngf * 8),
nn.ReLU(inplace=True),
# state size. (ngf*8) x 4 x 4
nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 4),
nn.ReLU(inplace=True),
# state size. (ngf*4) x 8 x 8
nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 2),
nn.ReLU(inplace=True),
# state size. (ngf*2) x 16 x 16
nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf),
nn.ReLU(inplace=True),
# state size. (ngf) x 32 x 32
nn.ConvTranspose2d( ngf, nc, 4, 2, 1, bias=False),
nn.Tanh()
# state size. (nc) x 96 x 96
)
def forward(self, input):
input = self.fcs(input.view(-1,self.nz))
gpu_ids = None
if isinstance(input.data, torch.cuda.FloatTensor) and self.ngpu > 1:
gpu_ids = range(self.ngpu)
z_prediction = self.decode_fcs(input)
input = input.view(-1,1024,1,1)
output = nn.parallel.data_parallel(self.convs, input, gpu_ids)
return output, z_prediction
# DCGAN model with fc layers
class _netG_3(nn.Module):
def __init__(self, ngpu, nz, nc , ngf):
super(_netG_3, self).__init__()
self.ngpu = ngpu
self.fcs = nn.Sequential(
# input is Z, going into a convolution
# state size. nz x 1 x 1
nn.Linear(nz, 1024),
nn.ReLU(inplace=True),
nn.Dropout(0.5),
nn.Linear(1024, 1024),
nn.ReLU(inplace=True),
nn.Dropout(0.5),
)
self.convs = nn.Sequential(
# 1024x1x1
nn.ConvTranspose2d(1024, ngf * 8, 4, 1, 0, bias=False),
nn.BatchNorm2d(ngf * 8),
nn.ReLU(inplace=True),
# state size. (ngf*8) x 4 x 4
nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 4),
nn.ReLU(inplace=True),
# state size. (ngf*4) x 8 x 8
nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 2),
nn.ReLU(inplace=True),
# state size. (ngf*2) x 16 x 16
nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf),
nn.ReLU(inplace=True),
# state size. (ngf) x 32 x 32
nn.ConvTranspose2d( ngf, nc, 4, 2, 1, bias=False),
nn.Tanh()
# state size. (nc) x 96 x 96
)
def forward(self, input):
input = self.fcs(input.view(-1,nz))
gpu_ids = None
if isinstance(input.data, torch.cuda.FloatTensor) and self.ngpu > 1:
gpu_ids = range(self.ngpu)
input = input.view(-1,1024,1,1)
return nn.parallel.data_parallel(self.convs, input, gpu_ids)
| 9,855 | 37.20155 | 89 | py |
bnp | bnp-master/bayesian_optimization/run_bo.py | import os
import argparse
from attrdict import AttrDict
import numpy as np
import os.path as osp
import yaml
import torch
from data.gp import *
import bayeso
import bayeso.gp as bayesogp
from bayeso import covariance
from bayeso import acquisition
from utils.paths import results_path
from utils.misc import load_module
def get_str_file(path_, str_kernel, str_model, noise, seed=None):
if noise is not None:
str_all = 'bo_{}_{}_{}'.format(str_kernel, 'noisy', str_model)
else:
str_all = 'bo_{}_{}'.format(str_kernel, str_model)
if seed is not None:
str_all += '_' + str(seed) + '.npy'
else:
str_all += '.npy'
return osp.join(path_, str_all)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--mode',
choices=['oracle', 'bo'],
default='bo')
parser.add_argument('--expid', type=str, default='run1')
parser.add_argument('--gpu', type=str, default='0')
parser.add_argument('--model', type=str, default='cnp')
parser.add_argument('--bo_num_samples', type=int, default=200)
parser.add_argument('--bo_num_init', type=int, default=1)
parser.add_argument('--bo_kernel', type=str, default='periodic')
parser.add_argument('--t_noise', type=float, default=None)
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
model_cls = getattr(load_module(f'models/{args.model}.py'), args.model.upper())
with open(f'configs/gp/{args.model}.yaml', 'r') as f:
config = yaml.safe_load(f)
model = model_cls(**config).cuda()
args.root = osp.join(results_path, 'gp', args.model, args.expid)
if args.mode == 'oracle':
oracle(args, model)
elif args.mode == 'bo':
bo(args, model)
def oracle(args, model):
seed = 42
num_all = 100
num_iter = 50
num_init = args.bo_num_init
str_cov = 'se'
list_dict = []
if args.bo_kernel == 'rbf':
kernel = RBFKernel()
elif args.bo_kernel == 'matern':
kernel = Matern52Kernel()
elif args.bo_kernel == 'periodic':
kernel = PeriodicKernel()
else:
raise ValueError(f'Invalid kernel {args.bo_kernel}')
for ind_seed in range(1, num_all + 1):
seed_ = seed * ind_seed
if seed_ is not None:
torch.manual_seed(seed_)
torch.cuda.manual_seed(seed_)
if os.path.exists(get_str_file('./results', args.bo_kernel, 'oracle', args.t_noise, seed=ind_seed)):
dict_exp = np.load(get_str_file('./results', args.bo_kernel, 'oracle', args.t_noise, seed=ind_seed), allow_pickle=True)
dict_exp = dict_exp[()]
list_dict.append(dict_exp)
print(dict_exp)
print(dict_exp['global'])
print(np.array2string(dict_exp['minima'], separator=','))
print(np.array2string(dict_exp['regrets'], separator=','))
continue
sampler = GPPriorSampler(kernel, t_noise=args.t_noise)
xp = torch.linspace(-2, 2, 1000).cuda()
xp_ = xp.unsqueeze(0).unsqueeze(2)
yp = sampler.sample(xp_)
min_yp = yp.min()
print(min_yp.cpu().numpy())
model.eval()
batch = AttrDict()
indices_permuted = torch.randperm(yp.shape[1])
batch.x = xp_[:, indices_permuted[:2*num_init], :]
batch.y = yp[:, indices_permuted[:2*num_init], :]
batch.xc = xp_[:, indices_permuted[:num_init], :]
batch.yc = yp[:, indices_permuted[:num_init], :]
batch.xt = xp_[:, indices_permuted[num_init:2*num_init], :]
batch.yt = yp[:, indices_permuted[num_init:2*num_init], :]
X_train = batch.xc.squeeze(0).cpu().numpy()
Y_train = batch.yc.squeeze(0).cpu().numpy()
X_test = xp_.squeeze(0).cpu().numpy()
list_min = []
list_min.append(batch.yc.min().cpu().numpy())
for ind_iter in range(0, num_iter):
print('ind_seed {} seed {} iter {}'.format(ind_seed, seed_, ind_iter + 1))
cov_X_X, inv_cov_X_X, hyps = bayesogp.get_optimized_kernel(X_train, Y_train, None, str_cov, is_fixed_noise=False, debug=False)
prior_mu_train = bayesogp.get_prior_mu(None, X_train)
prior_mu_test = bayesogp.get_prior_mu(None, X_test)
cov_X_Xs = covariance.cov_main(str_cov, X_train, X_test, hyps, False)
cov_Xs_Xs = covariance.cov_main(str_cov, X_test, X_test, hyps, True)
cov_Xs_Xs = (cov_Xs_Xs + cov_Xs_Xs.T) / 2.0
mu_ = np.dot(np.dot(cov_X_Xs.T, inv_cov_X_X), Y_train - prior_mu_train) + prior_mu_test
Sigma_ = cov_Xs_Xs - np.dot(np.dot(cov_X_Xs.T, inv_cov_X_X), cov_X_Xs)
sigma_ = np.expand_dims(np.sqrt(np.maximum(np.diag(Sigma_), 0.0)), axis=1)
acq_vals = -1.0 * acquisition.ei(np.ravel(mu_), np.ravel(sigma_), Y_train)
ind_ = np.argmin(acq_vals)
x_new = xp[ind_, None, None, None]
y_new = yp[:, ind_, None, :]
batch.x = torch.cat([batch.x, x_new], axis=1)
batch.y = torch.cat([batch.y, y_new], axis=1)
batch.xc = torch.cat([batch.xc, x_new], axis=1)
batch.yc = torch.cat([batch.yc, y_new], axis=1)
X_train = batch.xc.squeeze(0).cpu().numpy()
Y_train = batch.yc.squeeze(0).cpu().numpy()
min_cur = batch.yc.min()
list_min.append(min_cur.cpu().numpy())
print(min_yp.cpu().numpy())
print(np.array2string(np.array(list_min), separator=','))
print(np.array2string(np.array(list_min) - min_yp.cpu().numpy(), separator=','))
dict_exp = {
'seed': seed_,
'str_cov': str_cov,
'global': min_yp.cpu().numpy(),
'minima': np.array(list_min),
'regrets': np.array(list_min) - min_yp.cpu().numpy(),
'xc': X_train,
'yc': Y_train,
'model': 'oracle',
}
np.save(get_str_file('./results', args.bo_kernel, 'oracle', args.t_noise, seed=ind_seed), dict_exp)
list_dict.append(dict_exp)
np.save(get_str_file('./figures/results', args.bo_kernel, 'oracle', args.t_noise), list_dict)
def bo(args, model):
if args.mode == 'bo':
ckpt = torch.load(os.path.join(args.root, 'ckpt.tar'))
model.load_state_dict(ckpt.model)
if args.bo_kernel == 'rbf':
kernel = RBFKernel()
elif args.bo_kernel == 'matern':
kernel = Matern52Kernel()
elif args.bo_kernel == 'periodic':
kernel = PeriodicKernel()
else:
raise ValueError(f'Invalid kernel {args.bo_kernel}')
seed = 42
str_cov = 'se'
num_all = 100
num_iter = 50
num_init = args.bo_num_init
list_dict = []
for ind_seed in range(1, num_all + 1):
seed_ = seed * ind_seed
if seed_ is not None:
torch.manual_seed(seed_)
torch.cuda.manual_seed(seed_)
obj_prior = GPPriorSampler(kernel, t_noise=args.t_noise)
xp = torch.linspace(-2, 2, 1000).cuda()
xp_ = xp.unsqueeze(0).unsqueeze(2)
yp = obj_prior.sample(xp_)
min_yp = yp.min()
print(min_yp.cpu().numpy())
model.eval()
batch = AttrDict()
indices_permuted = torch.randperm(yp.shape[1])
batch.x = xp_[:, indices_permuted[:2*num_init], :]
batch.y = yp[:, indices_permuted[:2*num_init], :]
batch.xc = xp_[:, indices_permuted[:num_init], :]
batch.yc = yp[:, indices_permuted[:num_init], :]
batch.xt = xp_[:, indices_permuted[num_init:2*num_init], :]
batch.yt = yp[:, indices_permuted[num_init:2*num_init], :]
X_train = batch.xc.squeeze(0).cpu().numpy()
Y_train = batch.yc.squeeze(0).cpu().numpy()
X_test = xp_.squeeze(0).cpu().numpy()
list_min = []
list_min.append(batch.yc.min().cpu().numpy())
for ind_iter in range(0, num_iter):
print('ind_seed {} seed {} iter {}'.format(ind_seed, seed_, ind_iter + 1))
with torch.no_grad():
outs = model(batch, num_samples=args.bo_num_samples)
print('ctx_ll {:.4f} tar ll {:.4f}'.format(
outs.ctx_ll.item(), outs.tar_ll.item()))
py = model.predict(batch.xc, batch.yc,
xp[None,:,None].repeat(1, 1, 1),
num_samples=args.bo_num_samples)
mu, sigma = py.mean.squeeze(0), py.scale.squeeze(0)
if mu.dim() == 4:
print(mu.shape, sigma.shape)
var = sigma.pow(2).mean(0) + mu.pow(2).mean(0) - mu.mean(0).pow(2)
sigma = var.sqrt().squeeze(0)
mu = mu.mean(0).squeeze(0)
mu_ = mu.cpu().numpy()
sigma_ = sigma.cpu().numpy()
acq_vals = -1.0 * acquisition.ei(np.ravel(mu_), np.ravel(sigma_), Y_train)
# acq_vals = []
# for ind_mu in range(0, mu.shape[0]):
# acq_vals_ = -1.0 * acquisition.ei(np.ravel(mu[ind_mu].cpu().numpy()), np.ravel(sigma[ind_mu].cpu().numpy()), Y_train)
# acq_vals.append(acq_vals_)
# acq_vals = np.mean(acq_vals, axis=0)
else:
mu_ = mu.cpu().numpy()
sigma_ = sigma.cpu().numpy()
acq_vals = -1.0 * acquisition.ei(np.ravel(mu_), np.ravel(sigma_), Y_train)
# var = sigma.pow(2).mean(0) + mu.pow(2).mean(0) - mu.mean(0).pow(2)
# sigma = var.sqrt().squeeze(0)
# mu = mu.mean(0).squeeze(0)
ind_ = np.argmin(acq_vals)
x_new = xp[ind_, None, None, None]
y_new = yp[:, ind_, None, :]
batch.x = torch.cat([batch.x, x_new], axis=1)
batch.y = torch.cat([batch.y, y_new], axis=1)
batch.xc = torch.cat([batch.xc, x_new], axis=1)
batch.yc = torch.cat([batch.yc, y_new], axis=1)
X_train = batch.xc.squeeze(0).cpu().numpy()
Y_train = batch.yc.squeeze(0).cpu().numpy()
min_cur = batch.yc.min()
list_min.append(min_cur.cpu().numpy())
print(min_yp.cpu().numpy())
print(np.array2string(np.array(list_min), separator=','))
print(np.array2string(np.array(list_min) - min_yp.cpu().numpy(), separator=','))
dict_exp = {
'seed': seed_,
'global': min_yp.cpu().numpy(),
'minima': np.array(list_min),
'regrets': np.array(list_min) - min_yp.cpu().numpy(),
'xc': X_train,
'yc': Y_train,
'model': args.model,
'cov': str_cov,
}
list_dict.append(dict_exp)
np.save(get_str_file('./figures/results', args.bo_kernel, args.model, args.t_noise), list_dict)
if __name__ == '__main__':
main()
| 10,873 | 32.875389 | 138 | py |
bnp | bnp-master/bayesian_optimization/models/anp.py | import torch
import torch.nn as nn
from torch.distributions import kl_divergence
from attrdict import AttrDict
from utils.misc import stack, logmeanexp
from utils.sampling import sample_subset
from models.modules import CrossAttnEncoder, PoolingEncoder, Decoder
class ANP(nn.Module):
def __init__(self,
dim_x=1,
dim_y=1,
dim_hid=128,
dim_lat=128,
enc_v_depth=4,
enc_qk_depth=2,
enc_pre_depth=4,
enc_post_depth=2,
dec_depth=3):
super().__init__()
self.denc = CrossAttnEncoder(
dim_x=dim_x,
dim_y=dim_y,
dim_hid=dim_hid,
v_depth=enc_v_depth,
qk_depth=enc_qk_depth)
self.lenc = PoolingEncoder(
dim_x=dim_x,
dim_y=dim_y,
dim_hid=dim_hid,
dim_lat=dim_lat,
self_attn=True,
pre_depth=enc_pre_depth,
post_depth=enc_post_depth)
self.dec = Decoder(
dim_x=dim_x,
dim_y=dim_y,
dim_enc=dim_hid+dim_lat,
dim_hid=dim_hid,
depth=dec_depth)
def predict(self, xc, yc, xt, z=None, num_samples=None):
theta = stack(self.denc(xc, yc, xt), num_samples)
if z is None:
pz = self.lenc(xc, yc)
z = pz.rsample() if num_samples is None \
else pz.rsample([num_samples])
z = stack(z, xt.shape[-2], -2)
encoded = torch.cat([theta, z], -1)
return self.dec(encoded, stack(xt, num_samples))
def forward(self, batch, num_samples=None, reduce_ll=True):
outs = AttrDict()
if self.training:
pz = self.lenc(batch.xc, batch.yc)
qz = self.lenc(batch.x, batch.y)
z = qz.rsample() if num_samples is None else \
qz.rsample([num_samples])
py = self.predict(batch.xc, batch.yc, batch.x,
z=z, num_samples=num_samples)
if num_samples > 1:
# K * B * N
recon = py.log_prob(stack(batch.y, num_samples)).sum(-1)
# K * B
log_qz = qz.log_prob(z).sum(-1)
log_pz = pz.log_prob(z).sum(-1)
# K * B
log_w = recon.sum(-1) + log_pz - log_qz
outs.loss = -logmeanexp(log_w).mean() / batch.x.shape[-2]
else:
outs.recon = py.log_prob(batch.y).sum(-1).mean()
outs.kld = kl_divergence(qz, pz).sum(-1).mean()
outs.loss = -outs.recon + outs.kld / batch.x.shape[-2]
else:
py = self.predict(batch.xc, batch.yc, batch.x, num_samples=num_samples)
if num_samples is None:
ll = py.log_prob(batch.y).sum(-1)
else:
y = torch.stack([batch.y]*num_samples)
if reduce_ll:
ll = logmeanexp(py.log_prob(y).sum(-1))
else:
ll = py.log_prob(y).sum(-1)
num_ctx = batch.xc.shape[-2]
if reduce_ll:
outs.ctx_ll = ll[...,:num_ctx].mean()
outs.tar_ll = ll[...,num_ctx:].mean()
else:
outs.ctx_ll = ll[...,:num_ctx]
outs.tar_ll = ll[...,num_ctx:]
return outs
| 3,447 | 32.153846 | 83 | py |
bnp | bnp-master/bayesian_optimization/models/cnp.py | import torch
import torch.nn as nn
from attrdict import AttrDict
from models.modules import PoolingEncoder, Decoder
class CNP(nn.Module):
def __init__(self,
dim_x=1,
dim_y=1,
dim_hid=128,
enc_pre_depth=4,
enc_post_depth=2,
dec_depth=3):
super().__init__()
self.enc1 = PoolingEncoder(
dim_x=dim_x,
dim_y=dim_y,
dim_hid=dim_hid,
pre_depth=enc_pre_depth,
post_depth=enc_post_depth)
self.enc2 = PoolingEncoder(
dim_x=dim_x,
dim_y=dim_y,
dim_hid=dim_hid,
pre_depth=enc_pre_depth,
post_depth=enc_post_depth)
self.dec = Decoder(
dim_x=dim_x,
dim_y=dim_y,
dim_enc=2*dim_hid,
dim_hid=dim_hid,
depth=dec_depth)
def predict(self, xc, yc, xt, num_samples=None):
encoded = torch.cat([self.enc1(xc, yc), self.enc2(xc, yc)], -1)
encoded = torch.stack([encoded]*xt.shape[-2], -2)
return self.dec(encoded, xt)
def forward(self, batch, num_samples=None, reduce_ll=True):
outs = AttrDict()
py = self.predict(batch.xc, batch.yc, batch.x)
ll = py.log_prob(batch.y).sum(-1)
if self.training:
outs.loss = -ll.mean()
else:
num_ctx = batch.xc.shape[-2]
if reduce_ll:
outs.ctx_ll = ll[...,:num_ctx].mean()
outs.tar_ll = ll[...,num_ctx:].mean()
else:
outs.ctx_ll = ll[...,:num_ctx]
outs.tar_ll = ll[...,num_ctx:]
return outs
| 1,748 | 27.672131 | 71 | py |
bnp | bnp-master/bayesian_optimization/models/modules.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Normal
from models.attention import MultiHeadAttn, SelfAttn
__all__ = ['PoolingEncoder', 'CrossAttnEncoder', 'Decoder']
def build_mlp(dim_in, dim_hid, dim_out, depth):
modules = [nn.Linear(dim_in, dim_hid), nn.ReLU(True)]
for _ in range(depth-2):
modules.append(nn.Linear(dim_hid, dim_hid))
modules.append(nn.ReLU(True))
modules.append(nn.Linear(dim_hid, dim_out))
return nn.Sequential(*modules)
class PoolingEncoder(nn.Module):
def __init__(self, dim_x=1, dim_y=1,
dim_hid=128, dim_lat=None, self_attn=False,
pre_depth=4, post_depth=2):
super().__init__()
self.use_lat = dim_lat is not None
self.net_pre = build_mlp(dim_x+dim_y, dim_hid, dim_hid, pre_depth) \
if not self_attn else \
nn.Sequential(
build_mlp(dim_x+dim_y, dim_hid, dim_hid, pre_depth-2),
nn.ReLU(True),
SelfAttn(dim_hid, dim_hid))
self.net_post = build_mlp(dim_hid, dim_hid,
2*dim_lat if self.use_lat else dim_hid,
post_depth)
def forward(self, xc, yc, mask=None):
out = self.net_pre(torch.cat([xc, yc], -1))
if mask is None:
out = out.mean(-2)
else:
mask = mask.to(xc.device)
out = (out * mask.unsqueeze(-1)).sum(-2) / \
(mask.sum(-1, keepdim=True).detach() + 1e-5)
if self.use_lat:
mu, sigma = self.net_post(out).chunk(2, -1)
sigma = 0.1 + 0.9 * torch.sigmoid(sigma)
return Normal(mu, sigma)
else:
return self.net_post(out)
class CrossAttnEncoder(nn.Module):
def __init__(self, dim_x=1, dim_y=1, dim_hid=128,
dim_lat=None, self_attn=True,
v_depth=4, qk_depth=2):
super().__init__()
self.use_lat = dim_lat is not None
if not self_attn:
self.net_v = build_mlp(dim_x+dim_y, dim_hid, dim_hid, v_depth)
else:
self.net_v = build_mlp(dim_x+dim_y, dim_hid, dim_hid, v_depth-2)
self.self_attn = SelfAttn(dim_hid, dim_hid)
self.net_qk = build_mlp(dim_x, dim_hid, dim_hid, qk_depth)
self.attn = MultiHeadAttn(dim_hid, dim_hid, dim_hid,
2*dim_lat if self.use_lat else dim_hid)
def forward(self, xc, yc, xt, mask=None):
q, k = self.net_qk(xt), self.net_qk(xc)
v = self.net_v(torch.cat([xc, yc], -1))
if hasattr(self, 'self_attn'):
v = self.self_attn(v, mask=mask)
out = self.attn(q, k, v, mask=mask)
if self.use_lat:
mu, sigma = out.chunk(2, -1)
sigma = 0.1 + 0.9 * torch.sigmoid(sigma)
return Normal(mu, sigma)
else:
return out
class Decoder(nn.Module):
def __init__(self, dim_x=1, dim_y=1,
dim_enc=128, dim_hid=128, depth=3):
super().__init__()
self.fc = nn.Linear(dim_x+dim_enc, dim_hid)
self.dim_hid = dim_hid
modules = [nn.ReLU(True)]
for _ in range(depth-2):
modules.append(nn.Linear(dim_hid, dim_hid))
modules.append(nn.ReLU(True))
modules.append(nn.Linear(dim_hid, 2*dim_y))
self.mlp = nn.Sequential(*modules)
def add_ctx(self, dim_ctx):
self.dim_ctx = dim_ctx
self.fc_ctx = nn.Linear(dim_ctx, self.dim_hid, bias=False)
def forward(self, encoded, x, ctx=None):
packed = torch.cat([encoded, x], -1)
hid = self.fc(packed)
if ctx is not None:
hid = hid + self.fc_ctx(ctx)
out = self.mlp(hid)
mu, sigma = out.chunk(2, -1)
sigma = 0.1 + 0.9 * F.softplus(sigma)
return Normal(mu, sigma)
| 3,867 | 33.535714 | 78 | py |
bnp | bnp-master/bayesian_optimization/models/banp.py | import torch
import torch.nn as nn
from attrdict import AttrDict
from models.canp import CANP
from utils.misc import stack, logmeanexp
from utils.sampling import sample_with_replacement as SWR, sample_subset
class BANP(CANP):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dec.add_ctx(2*kwargs['dim_hid'])
def encode(self, xc, yc, xt, mask=None):
theta1 = self.enc1(xc, yc, xt)
theta2 = self.enc2(xc, yc)
encoded = torch.cat([theta1,
torch.stack([theta2]*xt.shape[-2], -2)], -1)
return encoded
def predict(self, xc, yc, xt, num_samples=None, return_base=False):
with torch.no_grad():
bxc, byc = SWR(xc, yc, num_samples=num_samples)
sxc, syc = stack(xc, num_samples), stack(yc, num_samples)
encoded = self.encode(bxc, byc, sxc)
py_res = self.dec(encoded, sxc)
mu, sigma = py_res.mean, py_res.scale
res = SWR((syc - mu)/sigma).detach()
res = (res - res.mean(-2, keepdim=True))
bxc = sxc
byc = mu + sigma * res
encoded_base = self.encode(xc, yc, xt)
sxt = stack(xt, num_samples)
encoded_bs = self.encode(bxc, byc, sxt)
py = self.dec(stack(encoded_base, num_samples),
sxt, ctx=encoded_bs)
if self.training or return_base:
py_base = self.dec(encoded_base, xt)
return py_base, py
else:
return py
def forward(self, batch, num_samples=None, reduce_ll=True):
outs = AttrDict()
def compute_ll(py, y):
ll = py.log_prob(y).sum(-1)
if ll.dim() == 3 and reduce_ll:
ll = logmeanexp(ll)
return ll
if self.training:
py_base, py = self.predict(batch.xc, batch.yc, batch.x,
num_samples=num_samples)
outs.ll_base = compute_ll(py_base, batch.y).mean()
outs.ll = compute_ll(py, batch.y).mean()
outs.loss = -outs.ll_base - outs.ll
else:
py = self.predict(batch.xc, batch.yc, batch.x,
num_samples=num_samples)
ll = compute_ll(py, batch.y)
num_ctx = batch.xc.shape[-2]
if reduce_ll:
outs.ctx_ll = ll[...,:num_ctx].mean()
outs.tar_ll = ll[...,num_ctx:].mean()
else:
outs.ctx_ll = ll[...,:num_ctx]
outs.tar_ll = ll[...,num_ctx:]
return outs
| 2,555 | 31.35443 | 72 | py |
bnp | bnp-master/bayesian_optimization/models/canp.py | import torch
import torch.nn as nn
from attrdict import AttrDict
from models.modules import CrossAttnEncoder, Decoder, PoolingEncoder
class CANP(nn.Module):
def __init__(self,
dim_x=1,
dim_y=1,
dim_hid=128,
enc_v_depth=4,
enc_qk_depth=2,
enc_pre_depth=4,
enc_post_depth=2,
dec_depth=3):
super().__init__()
self.enc1 = CrossAttnEncoder(
dim_x=dim_x,
dim_y=dim_y,
dim_hid=dim_hid,
v_depth=enc_v_depth,
qk_depth=enc_qk_depth)
self.enc2 = PoolingEncoder(
dim_x=dim_x,
dim_y=dim_y,
dim_hid=dim_hid,
self_attn=True,
pre_depth=enc_pre_depth,
post_depth=enc_post_depth)
self.dec = Decoder(
dim_x=dim_x,
dim_y=dim_y,
dim_enc=2*dim_hid,
dim_hid=dim_hid,
depth=dec_depth)
def predict(self, xc, yc, xt, num_samples=None):
theta1 = self.enc1(xc, yc, xt)
theta2 = self.enc2(xc, yc)
encoded = torch.cat([theta1,
torch.stack([theta2]*xt.shape[-2], -2)], -1)
return self.dec(encoded, xt)
def forward(self, batch, num_samples=None, reduce_ll=True):
outs = AttrDict()
py = self.predict(batch.xc, batch.yc, batch.x)
ll = py.log_prob(batch.y).sum(-1)
if self.training:
outs.loss = -ll.mean()
else:
num_ctx = batch.xc.shape[-2]
if reduce_ll:
outs.ctx_ll = ll[...,:num_ctx].mean()
outs.tar_ll = ll[...,num_ctx:].mean()
else:
outs.ctx_ll = ll[...,:num_ctx]
outs.tar_ll = ll[...,num_ctx:]
return outs
| 1,886 | 27.590909 | 68 | py |
bnp | bnp-master/bayesian_optimization/models/bnp.py | import torch
import torch.nn as nn
from attrdict import AttrDict
from models.cnp import CNP
from utils.misc import stack, logmeanexp
from utils.sampling import sample_with_replacement as SWR, sample_subset
class BNP(CNP):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dec.add_ctx(2*kwargs['dim_hid'])
def encode(self, xc, yc, xt, mask=None):
encoded = torch.cat([
self.enc1(xc, yc, mask=mask),
self.enc2(xc, yc, mask=mask)], -1)
return stack(encoded, xt.shape[-2], -2)
def predict(self, xc, yc, xt, num_samples=None, return_base=False):
with torch.no_grad():
bxc, byc = SWR(xc, yc, num_samples=num_samples)
sxc, syc = stack(xc, num_samples), stack(yc, num_samples)
encoded = self.encode(bxc, byc, sxc)
py_res = self.dec(encoded, sxc)
mu, sigma = py_res.mean, py_res.scale
res = SWR((syc - mu)/sigma).detach()
res = (res - res.mean(-2, keepdim=True))
bxc = sxc
byc = mu + sigma * res
encoded_base = self.encode(xc, yc, xt)
sxt = stack(xt, num_samples)
encoded_bs = self.encode(bxc, byc, sxt)
py = self.dec(stack(encoded_base, num_samples),
sxt, ctx=encoded_bs)
if self.training or return_base:
py_base = self.dec(encoded_base, xt)
return py_base, py
else:
return py
def forward(self, batch, num_samples=None, reduce_ll=True):
outs = AttrDict()
def compute_ll(py, y):
ll = py.log_prob(y).sum(-1)
if ll.dim() == 3 and reduce_ll:
ll = logmeanexp(ll)
return ll
if self.training:
py_base, py = self.predict(batch.xc, batch.yc, batch.x,
num_samples=num_samples)
outs.ll_base = compute_ll(py_base, batch.y).mean()
outs.ll = compute_ll(py, batch.y).mean()
outs.loss = -outs.ll_base - outs.ll
else:
py = self.predict(batch.xc, batch.yc, batch.x,
num_samples=num_samples)
ll = compute_ll(py, batch.y)
num_ctx = batch.xc.shape[-2]
if reduce_ll:
outs.ctx_ll = ll[...,:num_ctx].mean()
outs.tar_ll = ll[...,num_ctx:].mean()
else:
outs.ctx_ll = ll[...,:num_ctx]
outs.tar_ll = ll[...,num_ctx:]
return outs
| 2,527 | 31.410256 | 72 | py |
bnp | bnp-master/bayesian_optimization/models/np.py | import torch
import torch.nn as nn
from torch.distributions import kl_divergence
from attrdict import AttrDict
from utils.misc import stack, logmeanexp
from utils.sampling import sample_subset
from models.modules import PoolingEncoder, Decoder
class NP(nn.Module):
def __init__(self,
dim_x=1,
dim_y=1,
dim_hid=128,
dim_lat=128,
enc_pre_depth=4,
enc_post_depth=2,
dec_depth=3):
super().__init__()
self.denc = PoolingEncoder(
dim_x=dim_x,
dim_y=dim_y,
dim_hid=dim_hid,
pre_depth=enc_pre_depth,
post_depth=enc_post_depth)
self.lenc = PoolingEncoder(
dim_x=dim_x,
dim_y=dim_y,
dim_hid=dim_hid,
dim_lat=dim_lat,
pre_depth=enc_pre_depth,
post_depth=enc_post_depth)
self.dec = Decoder(
dim_x=dim_x,
dim_y=dim_y,
dim_enc=dim_hid+dim_lat,
dim_hid=dim_hid,
depth=dec_depth)
def predict(self, xc, yc, xt, z=None, num_samples=None):
theta = stack(self.denc(xc, yc), num_samples)
if z is None:
pz = self.lenc(xc, yc)
z = pz.rsample() if num_samples is None \
else pz.rsample([num_samples])
encoded = torch.cat([theta, z], -1)
encoded = stack(encoded, xt.shape[-2], -2)
return self.dec(encoded, stack(xt, num_samples))
def forward(self, batch, num_samples=None, reduce_ll=True):
outs = AttrDict()
if self.training:
pz = self.lenc(batch.xc, batch.yc)
qz = self.lenc(batch.x, batch.y)
z = qz.rsample() if num_samples is None else \
qz.rsample([num_samples])
py = self.predict(batch.xc, batch.yc, batch.x,
z=z, num_samples=num_samples)
if num_samples > 1:
# K * B * N
recon = py.log_prob(stack(batch.y, num_samples)).sum(-1)
# K * B
log_qz = qz.log_prob(z).sum(-1)
log_pz = pz.log_prob(z).sum(-1)
# K * B
log_w = recon.sum(-1) + log_pz - log_qz
outs.loss = -logmeanexp(log_w).mean() / batch.x.shape[-2]
else:
outs.recon = py.log_prob(batch.y).sum(-1).mean()
outs.kld = kl_divergence(qz, pz).sum(-1).mean()
outs.loss = -outs.recon + outs.kld / batch.x.shape[-2]
else:
py = self.predict(batch.xc, batch.yc, batch.x, num_samples=num_samples)
if num_samples is None:
ll = py.log_prob(batch.y).sum(-1)
else:
y = torch.stack([batch.y]*num_samples)
if reduce_ll:
ll = logmeanexp(py.log_prob(y).sum(-1))
else:
ll = py.log_prob(y).sum(-1)
num_ctx = batch.xc.shape[-2]
if reduce_ll:
outs.ctx_ll = ll[...,:num_ctx].mean()
outs.tar_ll = ll[...,num_ctx:].mean()
else:
outs.ctx_ll = ll[...,:num_ctx]
outs.tar_ll = ll[...,num_ctx:]
return outs
| 3,352 | 33.214286 | 83 | py |
bnp | bnp-master/bayesian_optimization/models/attention.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
class MultiHeadAttn(nn.Module):
def __init__(self, dim_q, dim_k, dim_v, dim_out, num_heads=8):
super().__init__()
self.num_heads = num_heads
self.dim_out = dim_out
self.fc_q = nn.Linear(dim_q, dim_out, bias=False)
self.fc_k = nn.Linear(dim_k, dim_out, bias=False)
self.fc_v = nn.Linear(dim_v, dim_out, bias=False)
self.fc_out = nn.Linear(dim_out, dim_out)
self.ln1 = nn.LayerNorm(dim_out)
self.ln2 = nn.LayerNorm(dim_out)
def scatter(self, x):
return torch.cat(x.chunk(self.num_heads, -1), -3)
def gather(self, x):
return torch.cat(x.chunk(self.num_heads, -3), -1)
def attend(self, q, k, v, mask=None):
q_, k_, v_ = [self.scatter(x) for x in [q, k, v]]
A_logits = q_ @ k_.transpose(-2, -1) / math.sqrt(self.dim_out)
if mask is not None:
mask = mask.bool().to(q.device)
mask = torch.stack([mask]*q.shape[-2], -2)
mask = torch.cat([mask]*self.num_heads, -3)
A = torch.softmax(A_logits.masked_fill(mask, -float('inf')), -1)
A = A.masked_fill(torch.isnan(A), 0.0)
else:
A = torch.softmax(A_logits, -1)
return self.gather(A @ v_)
def forward(self, q, k, v, mask=None):
q, k, v = self.fc_q(q), self.fc_k(k), self.fc_v(v)
out = self.ln1(q + self.attend(q, k, v, mask=mask))
out = self.ln2(out + F.relu(self.fc_out(out)))
return out
class SelfAttn(MultiHeadAttn):
def __init__(self, dim_in, dim_out, num_heads=8):
super().__init__(dim_in, dim_in, dim_in, dim_out, num_heads)
def forward(self, x, mask=None):
return super().forward(x, x, x, mask=mask)
| 1,805 | 35.857143 | 76 | py |
bnp | bnp-master/bayesian_optimization/utils/misc.py | import os
from importlib.machinery import SourceFileLoader
import math
import torch
def gen_load_func(parser, func):
def load(args, cmdline):
sub_args, cmdline = parser.parse_known_args(cmdline)
for k, v in sub_args.__dict__.items():
args.__dict__[k] = v
return func(**sub_args.__dict__), cmdline
return load
def load_module(filename):
module_name = os.path.splitext(os.path.basename(filename))[0]
return SourceFileLoader(module_name, filename).load_module()
def logmeanexp(x, dim=0):
return x.logsumexp(dim) - math.log(x.shape[dim])
def stack(x, num_samples=None, dim=0):
return x if num_samples is None \
else torch.stack([x]*num_samples, dim=dim)
| 726 | 29.291667 | 65 | py |
bnp | bnp-master/bayesian_optimization/utils/log.py | import torch
import time
import logging
from collections import OrderedDict
def get_logger(filename, mode='a'):
logging.basicConfig(level=logging.INFO, format='%(message)s')
logger = logging.getLogger()
logger.addHandler(logging.FileHandler(filename, mode=mode))
return logger
class RunningAverage(object):
def __init__(self, *keys):
self.sum = OrderedDict()
self.cnt = OrderedDict()
self.clock = time.time()
for key in keys:
self.sum[key] = 0
self.cnt[key] = 0
def update(self, key, val):
if isinstance(val, torch.Tensor):
val = val.item()
if self.sum.get(key, None) is None:
self.sum[key] = val
self.cnt[key] = 1
else:
self.sum[key] = self.sum[key] + val
self.cnt[key] += 1
def reset(self):
for key in self.sum.keys():
self.sum[key] = 0
self.cnt[key] = 0
self.clock = time.time()
def clear(self):
self.sum = OrderedDict()
self.cnt = OrderedDict()
self.clock = time.time()
def keys(self):
return self.sum.keys()
def get(self, key):
assert(self.sum.get(key, None) is not None)
return self.sum[key] / self.cnt[key]
def info(self, show_et=True):
line = ''
for key in self.sum.keys():
val = self.sum[key] / self.cnt[key]
if type(val) == float:
line += f'{key} {val:.4f} '
else:
line += f'{key} {val} '.format(key, val)
if show_et:
line += f'({time.time()-self.clock:.3f} secs)'
return line
| 1,679 | 27 | 65 | py |
bnp | bnp-master/bayesian_optimization/utils/sampling.py | import torch
def gather(items, idxs):
K = idxs.shape[0]
idxs = idxs.to(items[0].device)
gathered = []
for item in items:
gathered.append(torch.gather(
torch.stack([item]*K), -2,
torch.stack([idxs]*item.shape[-1], -1)).squeeze(0))
return gathered[0] if len(gathered) == 1 else gathered
def sample_subset(*items, r_N=None, num_samples=None):
r_N = r_N or torch.rand(1).item()
K = num_samples or 1
N = items[0].shape[-2]
Ns = max(1, int(r_N * N))
batch_shape = items[0].shape[:-2]
idxs = torch.rand((K,)+batch_shape+(Ns,)).argsort(-1)
return gather(items, idxs[...,:Ns]), gather(items, idxs[...,Ns:])
def sample_with_replacement(*items, num_samples=None, r_N=1.0, N_s=None):
K = num_samples or 1
N = items[0].shape[-2]
N_s = N_s or max(1, int(r_N * N))
batch_shape = items[0].shape[:-2]
idxs = torch.randint(N, size=(K,)+batch_shape+(N_s,))
return gather(items, idxs)
def sample_mask(B, N, num_samples=None, min_num=3, prob=0.5):
min_num = min(min_num, N)
K = num_samples or 1
fixed = torch.ones(K, B, min_num)
if N - min_num > 0:
rand = torch.bernoulli(prob*torch.ones(K, B, N-min_num))
mask = torch.cat([fixed, rand], -1)
return mask.squeeze(0)
else:
return fixed.squeeze(0)
| 1,334 | 32.375 | 73 | py |
bnp | bnp-master/bayesian_optimization/data/gp.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import MultivariateNormal, StudentT
from attrdict import AttrDict
import math
__all__ = ['GPPriorSampler', 'GPSampler', 'RBFKernel', 'PeriodicKernel', 'Matern52Kernel']
class GPPriorSampler(object):
def __init__(self, kernel, t_noise=None):
self.kernel = kernel
self.t_noise = t_noise
def sample(self,
bx,
device='cuda:0'):
# bx: 1 * num_points * 1
# 1 * num_points * num_points
cov = self.kernel(bx)
mean = torch.zeros(1, bx.shape[1], device=device)
mean = mean.cuda()
by = MultivariateNormal(mean, cov).rsample().unsqueeze(-1)
if self.t_noise is not None:
by += self.t_noise * StudentT(2.1).rsample(by.shape).to(device)
return by
class GPSampler(object):
def __init__(self, kernel, t_noise=None):
self.kernel = kernel
self.t_noise = t_noise
def sample(self,
batch_size=16,
num_ctx=None,
max_num_points=50,
x_range=(-2, 2),
device='cpu'):
batch = AttrDict()
num_ctx = num_ctx or torch.randint(low=3, high=max_num_points-3, size=[1]).item()
num_tar = torch.randint(low=3, high=max_num_points-num_ctx, size=[1]).item()
num_points = num_ctx + num_tar
batch.x = x_range[0] + (x_range[1] - x_range[0]) \
* torch.rand([batch_size, num_points, 1], device=device)
batch.xc = batch.x[:,:num_ctx]
batch.xt = batch.x[:,num_ctx:]
# batch_size * num_points * num_points
cov = self.kernel(batch.x)
mean = torch.zeros(batch_size, num_points, device=device)
batch.y = MultivariateNormal(mean, cov).rsample().unsqueeze(-1)
batch.yc = batch.y[:,:num_ctx]
batch.yt = batch.y[:,num_ctx:]
if self.t_noise is not None:
batch.y += self.t_noise * StudentT(2.1).rsample(batch.y.shape).to(device)
return batch
class RBFKernel(object):
def __init__(self, sigma_eps=2e-2, max_length=0.6, max_scale=1.0):
self.sigma_eps = sigma_eps
self.max_length = max_length
self.max_scale = max_scale
# x: batch_size * num_points * dim
def __call__(self, x):
length = 0.1 + (self.max_length-0.1) \
* torch.rand([x.shape[0], 1, 1, 1], device=x.device)
scale = 0.1 + (self.max_scale-0.1) \
* torch.rand([x.shape[0], 1, 1], device=x.device)
# batch_size * num_points * num_points * dim
dist = (x.unsqueeze(-2) - x.unsqueeze(-3))/length
# batch_size * num_points * num_points
cov = scale.pow(2) * torch.exp(-0.5 * dist.pow(2).sum(-1)) \
+ self.sigma_eps**2 * torch.eye(x.shape[-2]).to(x.device)
return cov
class Matern52Kernel(object):
def __init__(self, sigma_eps=2e-2, max_length=0.6, max_scale=1.0):
self.sigma_eps = sigma_eps
self.max_length = max_length
self.max_scale = max_scale
# x: batch_size * num_points * dim
def __call__(self, x):
length = 0.1 + (self.max_length-0.1) \
* torch.rand([x.shape[0], 1, 1, 1], device=x.device)
scale = 0.1 + (self.max_scale-0.1) \
* torch.rand([x.shape[0], 1, 1], device=x.device)
# batch_size * num_points * num_points
dist = torch.norm((x.unsqueeze(-2) - x.unsqueeze(-3))/length, dim=-1)
cov = scale.pow(2)*(1 + math.sqrt(5.0)*dist + 5.0*dist.pow(2)/3.0) \
* torch.exp(-math.sqrt(5.0) * dist) \
+ self.sigma_eps**2 * torch.eye(x.shape[-2]).to(x.device)
return cov
class PeriodicKernel(object):
def __init__(self, sigma_eps=2e-2, max_length=0.6, max_scale=1.0):
#self.p = p
self.sigma_eps = sigma_eps
self.max_length = max_length
self.max_scale = max_scale
# x: batch_size * num_points * dim
def __call__(self, x):
p = 0.1 + 0.4*torch.rand([x.shape[0], 1, 1], device=x.device)
length = 0.1 + (self.max_length-0.1) \
* torch.rand([x.shape[0], 1, 1], device=x.device)
scale = 0.1 + (self.max_scale-0.1) \
* torch.rand([x.shape[0], 1, 1], device=x.device)
dist = x.unsqueeze(-2) - x.unsqueeze(-3)
cov = scale.pow(2) * torch.exp(\
- 2*(torch.sin(math.pi*dist.abs().sum(-1)/p)/length).pow(2)) \
+ self.sigma_eps**2 * torch.eye(x.shape[-2]).to(x.device)
return cov
| 4,576 | 34.207692 | 90 | py |
bnp | bnp-master/regression/gp.py | import os
import os.path as osp
import argparse
import yaml
import torch
import torch.nn as nn
import math
import time
import matplotlib.pyplot as plt
from attrdict import AttrDict
from tqdm import tqdm
from copy import deepcopy
from data.gp import *
from utils.misc import load_module, logmeanexp
from utils.paths import results_path, evalsets_path
from utils.log import get_logger, RunningAverage
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--mode',
choices=['train', 'eval', 'plot', 'ensemble'],
default='train')
parser.add_argument('--expid', type=str, default='trial')
parser.add_argument('--resume', action='store_true', default=False)
parser.add_argument('--gpu', type=str, default='0')
parser.add_argument('--max_num_points', type=int, default=50)
parser.add_argument('--model', type=str, default='cnp')
parser.add_argument('--train_batch_size', type=int, default=100)
parser.add_argument('--train_num_samples', type=int, default=4)
parser.add_argument('--lr', type=float, default=5e-4)
parser.add_argument('--num_steps', type=int, default=100000)
parser.add_argument('--print_freq', type=int, default=200)
parser.add_argument('--eval_freq', type=int, default=5000)
parser.add_argument('--save_freq', type=int, default=1000)
parser.add_argument('--eval_seed', type=int, default=42)
parser.add_argument('--eval_num_batches', type=int, default=3000)
parser.add_argument('--eval_batch_size', type=int, default=16)
parser.add_argument('--eval_num_samples', type=int, default=50)
parser.add_argument('--eval_logfile', type=str, default=None)
parser.add_argument('--plot_seed', type=int, default=None)
parser.add_argument('--plot_batch_size', type=int, default=16)
parser.add_argument('--plot_num_samples', type=int, default=30)
parser.add_argument('--plot_num_ctx', type=int, default=None)
# OOD settings
parser.add_argument('--eval_kernel', type=str, default='rbf')
parser.add_argument('--t_noise', type=float, default=None)
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
model_cls = getattr(load_module(f'models/{args.model}.py'), args.model.upper())
with open(f'configs/gp/{args.model}.yaml', 'r') as f:
config = yaml.safe_load(f)
model = model_cls(**config).cuda()
args.root = osp.join(results_path, 'gp', args.model, args.expid)
if args.mode == 'train':
train(args, model)
elif args.mode == 'eval':
eval(args, model)
elif args.mode == 'plot':
plot(args, model)
elif args.mode == 'ensemble':
ensemble(args, model)
def train(args, model):
if not osp.isdir(args.root):
os.makedirs(args.root)
with open(osp.join(args.root, 'args.yaml'), 'w') as f:
yaml.dump(args.__dict__, f)
sampler = GPSampler(RBFKernel())
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, T_max=args.num_steps)
if args.resume:
ckpt = torch.load(os.path.join(args.root, 'ckpt.tar'))
model.load_state_dict(ckpt.model)
optimizer.load_state_dict(ckpt.optimizer)
scheduler.load_state_dict(ckpt.scheduler)
logfilename = ckpt.logfilename
start_step = ckpt.step
else:
logfilename = os.path.join(args.root,
f'train_{time.strftime("%Y%m%d-%H%M")}.log')
start_step = 1
logger = get_logger(logfilename)
ravg = RunningAverage()
if not args.resume:
logger.info('Total number of parameters: {}\n'.format(
sum(p.numel() for p in model.parameters())))
for step in range(start_step, args.num_steps+1):
model.train()
optimizer.zero_grad()
batch = sampler.sample(
batch_size=args.train_batch_size,
max_num_points=args.max_num_points,
device='cuda')
outs = model(batch, num_samples=args.train_num_samples)
outs.loss.backward()
optimizer.step()
scheduler.step()
for key, val in outs.items():
ravg.update(key, val)
if step % args.print_freq == 0:
line = f'{args.model}:{args.expid} step {step} '
line += f'lr {optimizer.param_groups[0]["lr"]:.3e} '
line += ravg.info()
logger.info(line)
if step % args.eval_freq == 0:
line = eval(args, model)
logger.info(line + '\n')
ravg.reset()
if step % args.save_freq == 0 or step == args.num_steps:
ckpt = AttrDict()
ckpt.model = model.state_dict()
ckpt.optimizer = optimizer.state_dict()
ckpt.scheduler = scheduler.state_dict()
ckpt.logfilename = logfilename
ckpt.step = step + 1
torch.save(ckpt, os.path.join(args.root, 'ckpt.tar'))
args.mode = 'eval'
eval(args, model)
def gen_evalset(args):
if args.eval_kernel == 'rbf':
kernel = RBFKernel()
elif args.eval_kernel == 'matern':
kernel = Matern52Kernel()
elif args.eval_kernel == 'periodic':
kernel = PeriodicKernel()
else:
raise ValueError(f'Invalid kernel {args.eval_kernel}')
torch.manual_seed(args.eval_seed)
torch.cuda.manual_seed(args.eval_seed)
sampler = GPSampler(kernel, t_noise=args.t_noise)
batches = []
for i in tqdm(range(args.eval_num_batches)):
batches.append(sampler.sample(
batch_size=args.eval_batch_size,
max_num_points=args.max_num_points))
torch.manual_seed(time.time())
torch.cuda.manual_seed(time.time())
path = osp.join(evalsets_path, 'gp')
if not osp.isdir(path):
os.makedirs(path)
filename = f'{args.eval_kernel}'
if args.t_noise is not None:
filename += f'_{args.t_noise}'
filename += '.tar'
torch.save(batches, osp.join(path, filename))
def eval(args, model):
if args.mode == 'eval':
ckpt = torch.load(os.path.join(args.root, 'ckpt.tar'))
model.load_state_dict(ckpt.model)
if args.eval_logfile is None:
eval_logfile = f'eval_{args.eval_kernel}'
if args.t_noise is not None:
eval_logfile += f'_tn_{args.t_noise}'
eval_logfile += '.log'
else:
eval_logfile = args.eval_logfile
filename = os.path.join(args.root, eval_logfile)
logger = get_logger(filename, mode='w')
else:
logger = None
if args.eval_kernel == 'rbf':
kernel = RBFKernel()
elif args.eval_kernel == 'matern':
kernel = Matern52Kernel()
elif args.eval_kernel == 'periodic':
kernel = PeriodicKernel()
else:
raise ValueError(f'Invalid kernel {args.eval_kernel}')
path = osp.join(evalsets_path, 'gp')
filename = f'{args.eval_kernel}'
if args.t_noise is not None:
filename += f'_{args.t_noise}'
filename += '.tar'
if not osp.isfile(osp.join(path, filename)):
print('generating evaluation sets...')
gen_evalset(args)
eval_batches = torch.load(osp.join(path, filename))
torch.manual_seed(args.eval_seed)
torch.cuda.manual_seed(args.eval_seed)
ravg = RunningAverage()
model.eval()
with torch.no_grad():
for batch in tqdm(eval_batches):
for key, val in batch.items():
batch[key] = val.cuda()
outs = model(batch, num_samples=args.eval_num_samples)
for key, val in outs.items():
ravg.update(key, val)
torch.manual_seed(time.time())
torch.cuda.manual_seed(time.time())
line = f'{args.model}:{args.expid} {args.eval_kernel} '
if args.t_noise is not None:
line += f'tn {args.t_noise} '
line += ravg.info()
if logger is not None:
logger.info(line)
return line
def plot(args, model):
ckpt = torch.load(os.path.join(args.root, 'ckpt.tar'))
model.load_state_dict(ckpt.model)
def tnp(x):
return x.squeeze().cpu().data.numpy()
if args.plot_seed is not None:
torch.manual_seed(args.plot_seed)
torch.cuda.manual_seed(args.plot_seed)
kernel = RBFKernel() if args.pp is None else PeriodicKernel(p=args.pp)
sampler = GPSampler(kernel, t_noise=args.t_noise)
xp = torch.linspace(-2, 2, 200).cuda()
batch = sampler.sample(
batch_size=args.plot_batch_size,
max_num_points=args.max_num_points,
num_ctx=args.plot_num_ctx,
device='cuda')
model.eval()
with torch.no_grad():
outs = model(batch, num_samples=args.eval_num_samples)
print(f'ctx_ll {outs.ctx_ll.item():.4f}, tar_ll {outs.tar_ll.item():.4f}')
py = model.predict(batch.xc, batch.yc,
xp[None,:,None].repeat(args.plot_batch_size, 1, 1),
num_samples=args.plot_num_samples)
mu, sigma = py.mean.squeeze(0), py.scale.squeeze(0)
if args.plot_batch_size > 1:
nrows = max(args.plot_batch_size//4, 1)
ncols = min(4, args.plot_batch_size)
fig, axes = plt.subplots(nrows, ncols,
figsize=(5*ncols, 5*nrows))
axes = axes.flatten()
else:
fig = plt.figure(figsize=(5, 5))
axes = [plt.gca()]
# multi sample
if mu.dim() == 4:
#var = sigma.pow(2).mean(0) + mu.pow(2).mean(0) - mu.mean(0).pow(2)
#sigma = var.sqrt()
#mu = mu.mean(0)
for i, ax in enumerate(axes):
#ax.plot(tnp(xp), tnp(mu[i]), color='steelblue', alpha=0.5)
#ax.fill_between(tnp(xp), tnp(mu[i]-sigma[i]), tnp(mu[i]+sigma[i]),
# color='skyblue', alpha=0.2, linewidth=0.0)
for s in range(mu.shape[0]):
ax.plot(tnp(xp), tnp(mu[s][i]), color='steelblue',
alpha=max(0.5/args.plot_num_samples, 0.1))
ax.fill_between(tnp(xp), tnp(mu[s][i])-tnp(sigma[s][i]),
tnp(mu[s][i])+tnp(sigma[s][i]),
color='skyblue',
alpha=max(0.2/args.plot_num_samples, 0.02),
linewidth=0.0)
ax.scatter(tnp(batch.xc[i]), tnp(batch.yc[i]),
color='k', label='context', zorder=mu.shape[0]+1)
ax.scatter(tnp(batch.xt[i]), tnp(batch.yt[i]),
color='orchid', label='target',
zorder=mu.shape[0]+1)
ax.legend()
else:
for i, ax in enumerate(axes):
ax.plot(tnp(xp), tnp(mu[i]), color='steelblue', alpha=0.5)
ax.fill_between(tnp(xp), tnp(mu[i]-sigma[i]), tnp(mu[i]+sigma[i]),
color='skyblue', alpha=0.2, linewidth=0.0)
ax.scatter(tnp(batch.xc[i]), tnp(batch.yc[i]),
color='k', label='context')
ax.scatter(tnp(batch.xt[i]), tnp(batch.yt[i]),
color='orchid', label='target')
ax.legend()
plt.tight_layout()
plt.show()
def ensemble(args, model):
num_runs = 5
models = []
for i in range(num_runs):
model_ = deepcopy(model)
ckpt = torch.load(osp.join(results_path, 'gp', args.model, f'run{i+1}', 'ckpt.tar'))
model_.load_state_dict(ckpt['model'])
model_.cuda()
model_.eval()
models.append(model_)
path = osp.join(evalsets_path, 'gp')
filename = f'{args.eval_kernel}'
if args.t_noise is not None:
filename += f'_{args.t_noise}'
filename += '.tar'
if not osp.isfile(osp.join(path, filename)):
print('generating evaluation sets...')
gen_evalset(args)
eval_batches = torch.load(osp.join(path, filename))
torch.manual_seed(args.eval_seed)
torch.cuda.manual_seed(args.eval_seed)
ravg = RunningAverage()
with torch.no_grad():
for batch in tqdm(eval_batches):
for key, val in batch.items():
batch[key] = val.cuda()
ctx_ll = []
tar_ll = []
for model in models:
outs = model(batch,
num_samples=args.eval_num_samples,
reduce_ll=False)
ctx_ll.append(outs.ctx_ll)
tar_ll.append(outs.tar_ll)
if ctx_ll[0].dim() == 2:
ctx_ll = torch.stack(ctx_ll)
tar_ll = torch.stack(tar_ll)
else:
ctx_ll = torch.cat(ctx_ll)
tar_ll = torch.cat(tar_ll)
ctx_ll = logmeanexp(ctx_ll).mean()
tar_ll = logmeanexp(tar_ll).mean()
ravg.update('ctx_ll', ctx_ll)
ravg.update('tar_ll', tar_ll)
filename = f'ensemble_{args.eval_kernel}'
if args.t_noise is not None:
filename += f'_{args.t_noise}'
filename += '.log'
logger = get_logger(osp.join(results_path, 'gp', args.model, filename), mode='w')
logger.info(ravg.info())
if __name__ == '__main__':
main()
| 13,075 | 32.875648 | 92 | py |
bnp | bnp-master/regression/emnist.py | import os
import os.path as osp
import argparse
import yaml
import torch
import torch.nn as nn
import math
import time
import matplotlib.pyplot as plt
from attrdict import AttrDict
from tqdm import tqdm
from copy import deepcopy
from data.image import img_to_task, task_to_img
from data.emnist import EMNIST
from utils.misc import load_module, logmeanexp
from utils.paths import results_path, evalsets_path
from utils.log import get_logger, RunningAverage
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--mode',
choices=['train', 'eval', 'plot', 'ensemble'],
default='train')
parser.add_argument('--expid', type=str, default='trial')
parser.add_argument('--resume', action='store_true', default=False)
parser.add_argument('--gpu', type=str, default='0')
parser.add_argument('--max_num_points', type=int, default=200)
parser.add_argument('--class_range', type=int, nargs='*', default=[0,10])
parser.add_argument('--model', type=str, default='cnp')
parser.add_argument('--train_batch_size', type=int, default=100)
parser.add_argument('--train_num_samples', type=int, default=4)
parser.add_argument('--lr', type=float, default=5e-4)
parser.add_argument('--num_epochs', type=int, default=200)
parser.add_argument('--eval_freq', type=int, default=10)
parser.add_argument('--save_freq', type=int, default=10)
parser.add_argument('--eval_seed', type=int, default=42)
parser.add_argument('--eval_batch_size', type=int, default=16)
parser.add_argument('--eval_num_samples', type=int, default=50)
parser.add_argument('--eval_logfile', type=str, default=None)
parser.add_argument('--plot_seed', type=int, default=None)
parser.add_argument('--plot_batch_size', type=int, default=16)
parser.add_argument('--plot_num_samples', type=int, default=30)
parser.add_argument('--plot_num_ctx', type=int, default=100)
# OOD settings
parser.add_argument('--t_noise', type=float, default=None)
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
model_cls = getattr(load_module(f'models/{args.model}.py'), args.model.upper())
with open(f'configs/emnist/{args.model}.yaml', 'r') as f:
config = yaml.safe_load(f)
model = model_cls(**config).cuda()
args.root = osp.join(results_path, 'emnist', args.model, args.expid)
if args.mode == 'train':
train(args, model)
elif args.mode == 'eval':
eval(args, model)
elif args.mode == 'plot':
plot(args, model)
elif args.mode == 'ensemble':
ensemble(args, model)
def train(args, model):
if not osp.isdir(args.root):
os.makedirs(args.root)
with open(osp.join(args.root, 'args.yaml'), 'w') as f:
yaml.dump(args.__dict__, f)
train_ds = EMNIST(train=True, class_range=args.class_range)
eval_ds = EMNIST(train=False, class_range=args.class_range)
train_loader = torch.utils.data.DataLoader(train_ds,
batch_size=args.train_batch_size,
shuffle=True, num_workers=4)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, T_max=len(train_loader)*args.num_epochs)
if args.resume:
ckpt = torch.load(osp.join(args.root, 'ckpt.tar'))
model.load_state_dict(ckpt.model)
optimizer.load_state_dict(ckpt.optimizer)
scheduler.load_state_dict(ckpt.scheduler)
logfilename = ckpt.logfilename
start_epoch = ckpt.epoch
else:
logfilename = osp.join(args.root, 'train_{}.log'.format(
time.strftime('%Y%m%d-%H%M')))
start_epoch = 1
logger = get_logger(logfilename)
ravg = RunningAverage()
if not args.resume:
logger.info('Total number of parameters: {}\n'.format(
sum(p.numel() for p in model.parameters())))
for epoch in range(start_epoch, args.num_epochs+1):
model.train()
for (x, _) in tqdm(train_loader):
batch = img_to_task(x,
max_num_points=args.max_num_points,
device='cuda')
optimizer.zero_grad()
outs = model(batch, num_samples=args.train_num_samples)
outs.loss.backward()
optimizer.step()
scheduler.step()
for key, val in outs.items():
ravg.update(key, val)
line = f'{args.model}:{args.expid} epoch {epoch} '
line += f'lr {optimizer.param_groups[0]["lr"]:.3e} '
line += ravg.info()
logger.info(line)
if epoch % args.eval_freq == 0:
logger.info(eval(args, model) + '\n')
ravg.reset()
if epoch % args.save_freq == 0 or epoch == args.num_epochs:
ckpt = AttrDict()
ckpt.model = model.state_dict()
ckpt.optimizer = optimizer.state_dict()
ckpt.scheduler = scheduler.state_dict()
ckpt.logfilename = logfilename
ckpt.epoch = epoch + 1
torch.save(ckpt, osp.join(args.root, 'ckpt.tar'))
args.mode = 'eval'
eval(args, model)
def gen_evalset(args):
torch.manual_seed(args.eval_seed)
torch.cuda.manual_seed(args.eval_seed)
eval_ds = EMNIST(train=False, class_range=args.class_range)
eval_loader = torch.utils.data.DataLoader(eval_ds,
batch_size=args.eval_batch_size,
shuffle=False, num_workers=4)
batches = []
for x, _ in tqdm(eval_loader):
batches.append(img_to_task(x,
t_noise=args.t_noise,
max_num_points=args.max_num_points))
torch.manual_seed(time.time())
torch.cuda.manual_seed(time.time())
path = osp.join(evalsets_path, 'emnist')
if not osp.isdir(path):
os.makedirs(path)
c1, c2 = args.class_range
filename = f'{c1}-{c2}'
if args.t_noise is not None:
filename += f'_{args.t_noise}'
filename += '.tar'
torch.save(batches, osp.join(path, filename))
def eval(args, model):
if args.mode == 'eval':
ckpt = torch.load(osp.join(args.root, 'ckpt.tar'))
model.load_state_dict(ckpt.model)
if args.eval_logfile is None:
c1, c2 = args.class_range
eval_logfile = f'eval_{c1}-{c2}'
if args.t_noise is not None:
eval_logfile += f'_{args.t_noise}'
eval_logfile += '.log'
else:
eval_logfile = args.eval_logfile
filename = osp.join(args.root, eval_logfile)
logger = get_logger(filename, mode='w')
else:
logger = None
path = osp.join(evalsets_path, 'emnist')
c1, c2 = args.class_range
filename = f'{c1}-{c2}'
if args.t_noise is not None:
filename += f'_{args.t_noise}'
filename += '.tar'
if not osp.isfile(osp.join(path, filename)):
print('generating evaluation sets...')
gen_evalset(args)
eval_batches = torch.load(osp.join(path, filename))
torch.manual_seed(args.eval_seed)
torch.cuda.manual_seed(args.eval_seed)
ravg = RunningAverage()
model.eval()
with torch.no_grad():
for batch in tqdm(eval_batches):
for key, val in batch.items():
batch[key] = val.cuda()
outs = model(batch, num_samples=args.eval_num_samples)
for key, val in outs.items():
ravg.update(key, val)
torch.manual_seed(time.time())
torch.cuda.manual_seed(time.time())
c1, c2 = args.class_range
line = f'{args.model}:{args.expid} {c1}-{c2} '
if args.t_noise is not None:
line += f'tn {args.t_noise} '
line += ravg.info()
if logger is not None:
logger.info(line)
return line
def ensemble(args, model):
num_runs = 5
models = []
for i in range(num_runs):
model_ = deepcopy(model)
ckpt = torch.load(osp.join(results_path, 'emnist', args.model, f'run{i+1}', 'ckpt.tar'))
model_.load_state_dict(ckpt['model'])
model_.cuda()
model_.eval()
models.append(model_)
path = osp.join(evalsets_path, 'emnist')
c1, c2 = args.class_range
filename = f'{c1}-{c2}'
if args.t_noise is not None:
filename += f'_{args.t_noise}'
filename += '.tar'
if not osp.isfile(osp.join(path, filename)):
print('generating evaluation sets...')
gen_evalset(args)
eval_batches = torch.load(osp.join(path, filename))
ravg = RunningAverage()
with torch.no_grad():
for batch in tqdm(eval_batches):
for key, val in batch.items():
batch[key] = val.cuda()
ctx_ll = []
tar_ll = []
for model in models:
outs = model(batch,
num_samples=args.eval_num_samples,
reduce_ll=False)
ctx_ll.append(outs.ctx_ll)
tar_ll.append(outs.tar_ll)
if ctx_ll[0].dim() == 2:
ctx_ll = torch.stack(ctx_ll)
tar_ll = torch.stack(tar_ll)
else:
ctx_ll = torch.cat(ctx_ll)
tar_ll = torch.cat(tar_ll)
ctx_ll = logmeanexp(ctx_ll).mean()
tar_ll = logmeanexp(tar_ll).mean()
ravg.update('ctx_ll', ctx_ll)
ravg.update('tar_ll', tar_ll)
torch.manual_seed(time.time())
torch.cuda.manual_seed(time.time())
filename = f'ensemble_{c1}-{c2}'
if args.t_noise is not None:
filename += f'_{args.t_noise}'
filename += '.log'
logger = get_logger(osp.join(results_path, 'emnist', args.model, filename), mode='w')
logger.info(ravg.info())
if __name__ == '__main__':
main()
| 9,732 | 31.335548 | 96 | py |
bnp | bnp-master/regression/lotka_volterra.py | import os
import os.path as osp
import argparse
import yaml
import torch
import torch.nn as nn
import math
import time
import matplotlib.pyplot as plt
from attrdict import AttrDict
from tqdm import tqdm
from copy import deepcopy
from utils.misc import load_module, logmeanexp
from utils.paths import results_path, datasets_path, evalsets_path
from utils.log import get_logger, RunningAverage
from data.lotka_volterra import load_hare_lynx
def standardize(batch):
with torch.no_grad():
mu, sigma = batch.xc.mean(-2, keepdim=True), batch.xc.std(-2, keepdim=True)
sigma[sigma==0] = 1.0
batch.x = (batch.x - mu) / (sigma + 1e-5)
batch.xc = (batch.xc - mu) / (sigma + 1e-5)
batch.xt = (batch.xt - mu) / (sigma + 1e-5)
mu, sigma = batch.yc.mean(-2, keepdim=True), batch.yc.std(-2, keepdim=True)
batch.y = (batch.y - mu) / (sigma + 1e-5)
batch.yc = (batch.yc - mu) / (sigma + 1e-5)
batch.yt = (batch.yt - mu) / (sigma + 1e-5)
return batch
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--mode',
choices=['train', 'eval', 'plot', 'ensemble'],
default='train')
parser.add_argument('--expid', type=str, default='trial')
parser.add_argument('--resume', action='store_true', default=False)
parser.add_argument('--gpu', type=str, default='0')
parser.add_argument('--max_num_points', type=int, default=50)
parser.add_argument('--model', type=str, default='cnp')
parser.add_argument('--train_batch_size', type=int, default=100)
parser.add_argument('--train_num_samples', type=int, default=4)
parser.add_argument('--lr', type=float, default=5e-4)
parser.add_argument('--print_freq', type=int, default=200)
parser.add_argument('--eval_freq', type=int, default=5000)
parser.add_argument('--save_freq', type=int, default=1000)
parser.add_argument('--eval_seed', type=int, default=42)
parser.add_argument('--hare_lynx', action='store_true')
parser.add_argument('--eval_num_samples', type=int, default=50)
parser.add_argument('--eval_logfile', type=str, default=None)
parser.add_argument('--plot_seed', type=int, default=None)
parser.add_argument('--plot_batch_size', type=int, default=16)
parser.add_argument('--plot_num_samples', type=int, default=30)
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
model_cls = getattr(load_module(f'models/{args.model}.py'), args.model.upper())
with open(f'configs/lotka_volterra/{args.model}.yaml', 'r') as f:
config = yaml.safe_load(f)
model = model_cls(**config).cuda()
args.root = osp.join(results_path, 'lotka_volterra', args.model, args.expid)
if args.mode == 'train':
train(args, model)
elif args.mode == 'eval':
eval(args, model)
elif args.mode == 'plot':
plot(args, model)
elif args.mode == 'ensemble':
ensemble(args, model)
def train(args, model):
if not osp.isdir(args.root):
os.makedirs(args.root)
with open(osp.join(args.root, 'args.yaml'), 'w') as f:
yaml.dump(args.__dict__, f)
train_data = torch.load(osp.join(datasets_path, 'lotka_volterra', 'train.tar'))
eval_data = torch.load(osp.join(datasets_path, 'lotka_volterra', 'eval.tar'))
num_steps = len(train_data)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, T_max=num_steps)
if args.resume:
ckpt = torch.load(osp.join(args.root, 'ckpt.tar'))
model.load_state_dict(ckpt.model)
optimizer.load_state_dict(ckpt.optimizer)
scheduler.load_state_dict(ckpt.scheduler)
logfilename = ckpt.logfilename
start_step = ckpt.step
else:
logfilename = osp.join(args.root,
f'train_{time.strftime("%Y%m%d-%H%M")}.log')
start_step = 1
logger = get_logger(logfilename)
ravg = RunningAverage()
if not args.resume:
logger.info('Total number of parameters: {}\n'.format(
sum(p.numel() for p in model.parameters())))
for step in range(start_step, num_steps+1):
model.train()
optimizer.zero_grad()
batch = standardize(train_data[step-1])
for key, val in batch.items():
batch[key] = val.cuda()
outs = model(batch, num_samples=args.train_num_samples)
outs.loss.backward()
optimizer.step()
scheduler.step()
for key, val in outs.items():
ravg.update(key, val)
if step % args.print_freq == 0:
line = f'{args.model}:{args.expid} step {step} '
line += f'lr {optimizer.param_groups[0]["lr"]:.3e} '
line += ravg.info()
logger.info(line)
if step % args.eval_freq == 0:
line = eval(args, model, eval_data=eval_data)
logger.info(line + '\n')
ravg.reset()
if step % args.save_freq == 0 or step == num_steps:
ckpt = AttrDict()
ckpt.model = model.state_dict()
ckpt.optimizer = optimizer.state_dict()
ckpt.scheduler = scheduler.state_dict()
ckpt.logfilename = logfilename
ckpt.step = step + 1
torch.save(ckpt, osp.join(args.root, 'ckpt.tar'))
args.mode = 'eval'
eval(args, model, eval_data=eval_data)
def eval(args, model, eval_data=None):
if args.mode == 'eval':
ckpt = torch.load(osp.join(args.root, 'ckpt.tar'))
model.load_state_dict(ckpt.model)
if args.eval_logfile is None:
if args.hare_lynx:
eval_logfile = 'hare_lynx.log'
else:
eval_logfile = 'eval.log'
else:
eval_logfile = args.eval_logfile
filename = osp.join(args.root, eval_logfile)
logger = get_logger(filename, mode='w')
else:
logger = None
torch.manual_seed(args.eval_seed)
torch.cuda.manual_seed(args.eval_seed)
if eval_data is None:
if args.hare_lynx:
eval_data = load_hare_lynx(1000, 16)
else:
eval_data = torch.load(osp.join(datasets_path, 'lotka_volterra', 'eval.tar'))
ravg = RunningAverage()
model.eval()
with torch.no_grad():
for batch in tqdm(eval_data):
batch = standardize(batch)
for key, val in batch.items():
batch[key] = val.cuda()
outs = model(batch, num_samples=args.eval_num_samples)
for key, val in outs.items():
ravg.update(key, val)
torch.manual_seed(time.time())
torch.cuda.manual_seed(time.time())
line = f'{args.model}:{args.expid} '
line += ravg.info()
if logger is not None:
logger.info(line)
return line
def ensemble(args, model):
num_runs = 5
models = []
for i in range(num_runs):
model_ = deepcopy(model)
ckpt = torch.load(osp.join(results_path, 'lotka_volterra', args.model, f'run{i+1}', 'ckpt.tar'))
model_.load_state_dict(ckpt['model'])
model_.cuda()
model_.eval()
models.append(model_)
torch.manual_seed(args.eval_seed)
torch.cuda.manual_seed(args.eval_seed)
if args.hare_lynx:
eval_data = load_hare_lynx(1000, 16)
else:
eval_data = torch.load(osp.join(datasets_path, 'lotka_volterra', 'eval.tar'))
ravg = RunningAverage()
with torch.no_grad():
for batch in tqdm(eval_data):
batch = standardize(batch)
for key, val in batch.items():
batch[key] = val.cuda()
ctx_ll = []
tar_ll = []
for model_ in models:
outs = model_(batch,
num_samples=args.eval_num_samples,
reduce_ll=False)
ctx_ll.append(outs.ctx_ll)
tar_ll.append(outs.tar_ll)
if ctx_ll[0].dim() == 2:
ctx_ll = torch.stack(ctx_ll)
tar_ll = torch.stack(tar_ll)
else:
ctx_ll = torch.cat(ctx_ll)
tar_ll = torch.cat(tar_ll)
ctx_ll = logmeanexp(ctx_ll).mean()
tar_ll = logmeanexp(tar_ll).mean()
ravg.update('ctx_ll', ctx_ll)
ravg.update('tar_ll', tar_ll)
torch.manual_seed(time.time())
torch.cuda.manual_seed(time.time())
filename = 'ensemble'
if args.hare_lynx:
filename += '_hare_lynx'
filename += '.log'
logger = get_logger(osp.join(results_path, 'lotka_volterra', args.model, filename), mode='w')
logger.info(ravg.info())
def plot(args, model):
ckpt = torch.load(osp.join(args.root, 'ckpt.tar'))
model.load_state_dict(ckpt.model)
def tnp(x):
return x.squeeze().cpu().data.numpy()
if args.hare_lynx:
eval_data = load_hare_lynx(1000, 16)
else:
eval_data = torch.load(osp.join(datasets_path, 'lotka_volterra', 'eval.tar'))
bid = torch.randint(len(eval_data), [1]).item()
batch = standardize(eval_data[bid])
for k, v in batch.items():
batch[k] = v.cuda()
model.eval()
outs = model(batch, num_samples=args.eval_num_samples)
print(outs.tar_ll)
fig, axes = plt.subplots(4, 4, figsize=(20, 20))
xp = []
for b in range(batch.x.shape[0]):
bx = batch.x[b]
xp.append(torch.linspace(bx.min()-0.1, bx.max()+0.1, 200))
xp = torch.stack(xp).unsqueeze(-1).cuda()
model.eval()
with torch.no_grad():
py = model.predict(batch.xc, batch.yc, xp, num_samples=args.plot_num_samples)
mu, sigma = py.mean, py.scale
if mu.dim() > 3:
bmu = mu.mean(0)
bvar = sigma.pow(2).mean(0) + mu.pow(2).mean(0) - mu.mean(0).pow(2)
bsigma = bvar.sqrt()
else:
bmu = mu
bsigma = sigma
for i, ax in enumerate(axes.flatten()):
ax.plot(tnp(xp[i]), tnp(bmu[i]), alpha=0.5)
upper = tnp(bmu[i][:,0] + bsigma[i][:,0])
lower = tnp(bmu[i][:,0] - bsigma[i][:,0])
ax.fill_between(tnp(xp[i]), lower, upper,
alpha=0.2, linewidth=0.0, label='predator')
upper = tnp(bmu[i][:,1] + bsigma[i][:,1])
lower = tnp(bmu[i][:,1] - bsigma[i][:,1])
ax.fill_between(tnp(xp[i]), lower, upper,
alpha=0.2, linewidth=0.0, label='prey')
ax.scatter(tnp(batch.xc[i]), tnp(batch.yc[i][:,0]), color='k', marker='*')
ax.scatter(tnp(batch.xc[i]), tnp(batch.yc[i][:,1]), color='k', marker='*')
ax.scatter(tnp(batch.xt[i]), tnp(batch.yt[i][:,0]), color='orchid', marker='x')
ax.scatter(tnp(batch.xt[i]), tnp(batch.yt[i][:,1]), color='orchid', marker='x')
plt.tight_layout()
plt.show()
if __name__ == '__main__':
main()
| 10,852 | 32.291411 | 104 | py |
bnp | bnp-master/regression/celeba.py | import os
import os.path as osp
import argparse
import yaml
import torch
import torch.nn as nn
import math
import time
import matplotlib.pyplot as plt
from attrdict import AttrDict
from tqdm import tqdm
from copy import deepcopy
from data.image import img_to_task, task_to_img
from data.celeba import CelebA
from utils.misc import load_module, logmeanexp
from utils.paths import results_path, evalsets_path
from utils.log import get_logger, RunningAverage
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--mode',
choices=['train', 'eval', 'plot', 'ensemble'],
default='train')
parser.add_argument('--expid', type=str, default='trial')
parser.add_argument('--resume', action='store_true', default=False)
parser.add_argument('--gpu', type=str, default='0')
parser.add_argument('--max_num_points', type=int, default=200)
parser.add_argument('--model', type=str, default='cnp')
parser.add_argument('--train_batch_size', type=int, default=100)
parser.add_argument('--train_num_samples', type=int, default=4)
parser.add_argument('--lr', type=float, default=5e-4)
parser.add_argument('--num_epochs', type=int, default=200)
parser.add_argument('--eval_freq', type=int, default=10)
parser.add_argument('--save_freq', type=int, default=10)
parser.add_argument('--eval_seed', type=int, default=42)
parser.add_argument('--eval_batch_size', type=int, default=16)
parser.add_argument('--eval_num_samples', type=int, default=50)
parser.add_argument('--eval_logfile', type=str, default=None)
parser.add_argument('--plot_seed', type=int, default=None)
parser.add_argument('--plot_batch_size', type=int, default=16)
parser.add_argument('--plot_num_samples', type=int, default=30)
parser.add_argument('--plot_num_ctx', type=int, default=100)
# OOD settings
parser.add_argument('--t_noise', type=float, default=None)
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
model_cls = getattr(load_module(f'models/{args.model}.py'), args.model.upper())
with open(f'configs/celeba/{args.model}.yaml', 'r') as f:
config = yaml.safe_load(f)
model = model_cls(**config).cuda()
args.root = osp.join(results_path, 'celeba', args.model, args.expid)
if args.mode == 'train':
train(args, model)
elif args.mode == 'eval':
eval(args, model)
elif args.mode == 'plot':
plot(args, model)
elif args.mode == 'ensemble':
ensemble(args, model)
def train(args, model):
if not osp.isdir(args.root):
os.makedirs(args.root)
with open(osp.join(args.root, 'args.yaml'), 'w') as f:
yaml.dump(args.__dict__, f)
train_ds = CelebA(train=True)
eval_ds = CelebA(train=False)
train_loader = torch.utils.data.DataLoader(train_ds,
batch_size=args.train_batch_size,
shuffle=True, num_workers=4)
#eval_loader = torch.utils.data.DataLoader(eval_ds,
# batch_size=args.eval_batch_size,
# shuffle=False, num_workers=4)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, T_max=len(train_loader)*args.num_epochs)
if args.resume:
ckpt = torch.load(osp.join(args.root, 'ckpt.tar'))
model.load_state_dict(ckpt.model)
optimizer.load_state_dict(ckpt.optimizer)
scheduler.load_state_dict(ckpt.scheduler)
logfilename = ckpt.logfilename
start_epoch = ckpt.epoch
else:
logfilename = osp.join(args.root, 'train_{}.log'.format(
time.strftime('%Y%m%d-%H%M')))
start_epoch = 1
logger = get_logger(logfilename)
ravg = RunningAverage()
if not args.resume:
logger.info('Total number of parameters: {}\n'.format(
sum(p.numel() for p in model.parameters())))
for epoch in range(start_epoch, args.num_epochs+1):
model.train()
for (x, _) in tqdm(train_loader):
batch = img_to_task(x,
max_num_points=args.max_num_points,
device='cuda')
optimizer.zero_grad()
outs = model(batch, num_samples=args.train_num_samples)
outs.loss.backward()
optimizer.step()
scheduler.step()
for key, val in outs.items():
ravg.update(key, val)
line = f'{args.model}:{args.expid} epoch {epoch} '
line += f'lr {optimizer.param_groups[0]["lr"]:.3e} '
line += ravg.info()
logger.info(line)
if epoch % args.eval_freq == 0:
logger.info(eval(args, model) + '\n')
ravg.reset()
if epoch % args.save_freq == 0 or epoch == args.num_epochs:
ckpt = AttrDict()
ckpt.model = model.state_dict()
ckpt.optimizer = optimizer.state_dict()
ckpt.scheduler = scheduler.state_dict()
ckpt.logfilename = logfilename
ckpt.epoch = epoch + 1
torch.save(ckpt, osp.join(args.root, 'ckpt.tar'))
args.mode = 'eval'
eval(args, model)
def gen_evalset(args):
torch.manual_seed(args.eval_seed)
torch.cuda.manual_seed(args.eval_seed)
eval_ds = CelebA(train=False)
eval_loader = torch.utils.data.DataLoader(eval_ds,
batch_size=args.eval_batch_size,
shuffle=False, num_workers=4)
batches = []
for x, _ in tqdm(eval_loader):
batches.append(img_to_task(x,
t_noise=args.t_noise,
max_num_points=args.max_num_points))
torch.manual_seed(time.time())
torch.cuda.manual_seed(time.time())
path = osp.join(evalsets_path, 'celeba')
if not osp.isdir(path):
os.makedirs(path)
filename = 'no_noise.tar' if args.t_noise is None else \
f'{args.t_noise}.tar'
torch.save(batches, osp.join(path, filename))
def eval(args, model):
if args.mode == 'eval':
ckpt = torch.load(osp.join(args.root, 'ckpt.tar'))
model.load_state_dict(ckpt.model)
if args.eval_logfile is None:
eval_logfile = f'eval'
if args.t_noise is not None:
eval_logfile += f'_{args.t_noise}'
eval_logfile += '.log'
else:
eval_logfile = args.eval_logfile
filename = osp.join(args.root, eval_logfile)
logger = get_logger(filename, mode='w')
else:
logger = None
path = osp.join(evalsets_path, 'celeba')
if not osp.isdir(path):
os.makedirs(path)
filename = 'no_noise.tar' if args.t_noise is None else \
f'{args.t_noise}.tar'
if not osp.isfile(osp.join(path, filename)):
print('generating evaluation sets...')
gen_evalset(args)
eval_batches = torch.load(osp.join(path, filename))
torch.manual_seed(args.eval_seed)
torch.cuda.manual_seed(args.eval_seed)
ravg = RunningAverage()
model.eval()
with torch.no_grad():
for batch in tqdm(eval_batches):
for key, val in batch.items():
batch[key] = val.cuda()
outs = model(batch, num_samples=args.eval_num_samples)
for key, val in outs.items():
ravg.update(key, val)
torch.manual_seed(time.time())
torch.cuda.manual_seed(time.time())
line = f'{args.model}:{args.expid} '
if args.t_noise is not None:
line += f'tn {args.t_noise} '
line += ravg.info()
if logger is not None:
logger.info(line)
return line
def ensemble(args, model):
num_runs = 5
models = []
for i in range(num_runs):
model_ = deepcopy(model)
ckpt = torch.load(osp.join(results_path, 'celeba', args.model, f'run{i+1}', 'ckpt.tar'))
model_.load_state_dict(ckpt['model'])
model_.cuda()
model_.eval()
models.append(model_)
path = osp.join(evalsets_path, 'celeba')
if not osp.isdir(path):
os.makedirs(path)
filename = 'no_noise.tar' if args.t_noise is None else \
f'{args.t_noise}.tar'
if not osp.isfile(osp.join(path, filename)):
print('generating evaluation sets...')
gen_evalset(args)
eval_batches = torch.load(osp.join(path, filename))
ravg = RunningAverage()
with torch.no_grad():
for batch in tqdm(eval_batches):
for key, val in batch.items():
batch[key] = val.cuda()
ctx_ll = []
tar_ll = []
for model in models:
outs = model(batch,
num_samples=args.eval_num_samples,
reduce_ll=False)
ctx_ll.append(outs.ctx_ll)
tar_ll.append(outs.tar_ll)
if ctx_ll[0].dim() == 2:
ctx_ll = torch.stack(ctx_ll)
tar_ll = torch.stack(tar_ll)
else:
ctx_ll = torch.cat(ctx_ll)
tar_ll = torch.cat(tar_ll)
ctx_ll = logmeanexp(ctx_ll).mean()
tar_ll = logmeanexp(tar_ll).mean()
ravg.update('ctx_ll', ctx_ll)
ravg.update('tar_ll', tar_ll)
torch.manual_seed(time.time())
torch.cuda.manual_seed(time.time())
filename = f'ensemble'
if args.t_noise is not None:
filename += f'_{args.t_noise}'
filename += '.log'
logger = get_logger(osp.join(results_path, 'celeba', args.model, filename), mode='w')
logger.info(ravg.info())
if __name__ == '__main__':
main()
| 9,536 | 31.328814 | 96 | py |
bnp | bnp-master/regression/models/anp.py | import torch
import torch.nn as nn
from torch.distributions import kl_divergence
from attrdict import AttrDict
from utils.misc import stack, logmeanexp
from utils.sampling import sample_subset
from models.modules import CrossAttnEncoder, PoolingEncoder, Decoder
class ANP(nn.Module):
def __init__(self,
dim_x=1,
dim_y=1,
dim_hid=128,
dim_lat=128,
enc_v_depth=4,
enc_qk_depth=2,
enc_pre_depth=4,
enc_post_depth=2,
dec_depth=3):
super().__init__()
self.denc = CrossAttnEncoder(
dim_x=dim_x,
dim_y=dim_y,
dim_hid=dim_hid,
v_depth=enc_v_depth,
qk_depth=enc_qk_depth)
self.lenc = PoolingEncoder(
dim_x=dim_x,
dim_y=dim_y,
dim_hid=dim_hid,
dim_lat=dim_lat,
self_attn=True,
pre_depth=enc_pre_depth,
post_depth=enc_post_depth)
self.dec = Decoder(
dim_x=dim_x,
dim_y=dim_y,
dim_enc=dim_hid+dim_lat,
dim_hid=dim_hid,
depth=dec_depth)
def predict(self, xc, yc, xt, z=None, num_samples=None):
theta = stack(self.denc(xc, yc, xt), num_samples)
if z is None:
pz = self.lenc(xc, yc)
z = pz.rsample() if num_samples is None \
else pz.rsample([num_samples])
z = stack(z, xt.shape[-2], -2)
encoded = torch.cat([theta, z], -1)
return self.dec(encoded, stack(xt, num_samples))
def forward(self, batch, num_samples=None, reduce_ll=True):
outs = AttrDict()
if self.training:
pz = self.lenc(batch.xc, batch.yc)
qz = self.lenc(batch.x, batch.y)
z = qz.rsample() if num_samples is None else \
qz.rsample([num_samples])
py = self.predict(batch.xc, batch.yc, batch.x,
z=z, num_samples=num_samples)
if num_samples > 1:
# K * B * N
recon = py.log_prob(stack(batch.y, num_samples)).sum(-1)
# K * B
log_qz = qz.log_prob(z).sum(-1)
log_pz = pz.log_prob(z).sum(-1)
# K * B
log_w = recon.sum(-1) + log_pz - log_qz
outs.loss = -logmeanexp(log_w).mean() / batch.x.shape[-2]
else:
outs.recon = py.log_prob(batch.y).sum(-1).mean()
outs.kld = kl_divergence(qz, pz).sum(-1).mean()
outs.loss = -outs.recon + outs.kld / batch.x.shape[-2]
else:
py = self.predict(batch.xc, batch.yc, batch.x, num_samples=num_samples)
if num_samples is None:
ll = py.log_prob(batch.y).sum(-1)
else:
y = torch.stack([batch.y]*num_samples)
if reduce_ll:
ll = logmeanexp(py.log_prob(y).sum(-1))
else:
ll = py.log_prob(y).sum(-1)
num_ctx = batch.xc.shape[-2]
if reduce_ll:
outs.ctx_ll = ll[...,:num_ctx].mean()
outs.tar_ll = ll[...,num_ctx:].mean()
else:
outs.ctx_ll = ll[...,:num_ctx]
outs.tar_ll = ll[...,num_ctx:]
return outs
| 3,447 | 32.153846 | 83 | py |
bnp | bnp-master/regression/models/cnp.py | import torch
import torch.nn as nn
from attrdict import AttrDict
from models.modules import PoolingEncoder, Decoder
class CNP(nn.Module):
def __init__(self,
dim_x=1,
dim_y=1,
dim_hid=128,
enc_pre_depth=4,
enc_post_depth=2,
dec_depth=3):
super().__init__()
self.enc1 = PoolingEncoder(
dim_x=dim_x,
dim_y=dim_y,
dim_hid=dim_hid,
pre_depth=enc_pre_depth,
post_depth=enc_post_depth)
self.enc2 = PoolingEncoder(
dim_x=dim_x,
dim_y=dim_y,
dim_hid=dim_hid,
pre_depth=enc_pre_depth,
post_depth=enc_post_depth)
self.dec = Decoder(
dim_x=dim_x,
dim_y=dim_y,
dim_enc=2*dim_hid,
dim_hid=dim_hid,
depth=dec_depth)
def predict(self, xc, yc, xt, num_samples=None):
encoded = torch.cat([self.enc1(xc, yc), self.enc2(xc, yc)], -1)
encoded = torch.stack([encoded]*xt.shape[-2], -2)
return self.dec(encoded, xt)
def forward(self, batch, num_samples=None, reduce_ll=True):
outs = AttrDict()
py = self.predict(batch.xc, batch.yc, batch.x)
ll = py.log_prob(batch.y).sum(-1)
if self.training:
outs.loss = -ll.mean()
else:
num_ctx = batch.xc.shape[-2]
if reduce_ll:
outs.ctx_ll = ll[...,:num_ctx].mean()
outs.tar_ll = ll[...,num_ctx:].mean()
else:
outs.ctx_ll = ll[...,:num_ctx]
outs.tar_ll = ll[...,num_ctx:]
return outs
| 1,748 | 27.672131 | 71 | py |
bnp | bnp-master/regression/models/modules.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Normal
from models.attention import MultiHeadAttn, SelfAttn
__all__ = ['PoolingEncoder', 'CrossAttnEncoder', 'Decoder']
def build_mlp(dim_in, dim_hid, dim_out, depth):
modules = [nn.Linear(dim_in, dim_hid), nn.ReLU(True)]
for _ in range(depth-2):
modules.append(nn.Linear(dim_hid, dim_hid))
modules.append(nn.ReLU(True))
modules.append(nn.Linear(dim_hid, dim_out))
return nn.Sequential(*modules)
class PoolingEncoder(nn.Module):
def __init__(self, dim_x=1, dim_y=1,
dim_hid=128, dim_lat=None, self_attn=False,
pre_depth=4, post_depth=2):
super().__init__()
self.use_lat = dim_lat is not None
self.net_pre = build_mlp(dim_x+dim_y, dim_hid, dim_hid, pre_depth) \
if not self_attn else \
nn.Sequential(
build_mlp(dim_x+dim_y, dim_hid, dim_hid, pre_depth-2),
nn.ReLU(True),
SelfAttn(dim_hid, dim_hid))
self.net_post = build_mlp(dim_hid, dim_hid,
2*dim_lat if self.use_lat else dim_hid,
post_depth)
def forward(self, xc, yc, mask=None):
out = self.net_pre(torch.cat([xc, yc], -1))
if mask is None:
out = out.mean(-2)
else:
mask = mask.to(xc.device)
out = (out * mask.unsqueeze(-1)).sum(-2) / \
(mask.sum(-1, keepdim=True).detach() + 1e-5)
if self.use_lat:
mu, sigma = self.net_post(out).chunk(2, -1)
sigma = 0.1 + 0.9 * torch.sigmoid(sigma)
return Normal(mu, sigma)
else:
return self.net_post(out)
class CrossAttnEncoder(nn.Module):
def __init__(self, dim_x=1, dim_y=1, dim_hid=128,
dim_lat=None, self_attn=True,
v_depth=4, qk_depth=2):
super().__init__()
self.use_lat = dim_lat is not None
if not self_attn:
self.net_v = build_mlp(dim_x+dim_y, dim_hid, dim_hid, v_depth)
else:
self.net_v = build_mlp(dim_x+dim_y, dim_hid, dim_hid, v_depth-2)
self.self_attn = SelfAttn(dim_hid, dim_hid)
self.net_qk = build_mlp(dim_x, dim_hid, dim_hid, qk_depth)
self.attn = MultiHeadAttn(dim_hid, dim_hid, dim_hid,
2*dim_lat if self.use_lat else dim_hid)
def forward(self, xc, yc, xt, mask=None):
q, k = self.net_qk(xt), self.net_qk(xc)
v = self.net_v(torch.cat([xc, yc], -1))
if hasattr(self, 'self_attn'):
v = self.self_attn(v, mask=mask)
out = self.attn(q, k, v, mask=mask)
if self.use_lat:
mu, sigma = out.chunk(2, -1)
sigma = 0.1 + 0.9 * torch.sigmoid(sigma)
return Normal(mu, sigma)
else:
return out
class Decoder(nn.Module):
def __init__(self, dim_x=1, dim_y=1,
dim_enc=128, dim_hid=128, depth=3):
super().__init__()
self.fc = nn.Linear(dim_x+dim_enc, dim_hid)
self.dim_hid = dim_hid
modules = [nn.ReLU(True)]
for _ in range(depth-2):
modules.append(nn.Linear(dim_hid, dim_hid))
modules.append(nn.ReLU(True))
modules.append(nn.Linear(dim_hid, 2*dim_y))
self.mlp = nn.Sequential(*modules)
def add_ctx(self, dim_ctx):
self.dim_ctx = dim_ctx
self.fc_ctx = nn.Linear(dim_ctx, self.dim_hid, bias=False)
def forward(self, encoded, x, ctx=None):
packed = torch.cat([encoded, x], -1)
hid = self.fc(packed)
if ctx is not None:
hid = hid + self.fc_ctx(ctx)
out = self.mlp(hid)
mu, sigma = out.chunk(2, -1)
sigma = 0.1 + 0.9 * F.softplus(sigma)
return Normal(mu, sigma)
| 3,867 | 33.535714 | 78 | py |
bnp | bnp-master/regression/models/banp.py | import torch
import torch.nn as nn
from attrdict import AttrDict
from models.canp import CANP
from utils.misc import stack, logmeanexp
from utils.sampling import sample_with_replacement as SWR, sample_subset
class BANP(CANP):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dec.add_ctx(2*kwargs['dim_hid'])
def encode(self, xc, yc, xt, mask=None):
theta1 = self.enc1(xc, yc, xt)
theta2 = self.enc2(xc, yc)
encoded = torch.cat([theta1,
torch.stack([theta2]*xt.shape[-2], -2)], -1)
return encoded
def predict(self, xc, yc, xt, num_samples=None, return_base=False):
with torch.no_grad():
bxc, byc = SWR(xc, yc, num_samples=num_samples)
sxc, syc = stack(xc, num_samples), stack(yc, num_samples)
encoded = self.encode(bxc, byc, sxc)
py_res = self.dec(encoded, sxc)
mu, sigma = py_res.mean, py_res.scale
res = SWR((syc - mu)/sigma).detach()
res = (res - res.mean(-2, keepdim=True))
bxc = sxc
byc = mu + sigma * res
encoded_base = self.encode(xc, yc, xt)
sxt = stack(xt, num_samples)
encoded_bs = self.encode(bxc, byc, sxt)
py = self.dec(stack(encoded_base, num_samples),
sxt, ctx=encoded_bs)
if self.training or return_base:
py_base = self.dec(encoded_base, xt)
return py_base, py
else:
return py
def forward(self, batch, num_samples=None, reduce_ll=True):
outs = AttrDict()
def compute_ll(py, y):
ll = py.log_prob(y).sum(-1)
if ll.dim() == 3 and reduce_ll:
ll = logmeanexp(ll)
return ll
if self.training:
py_base, py = self.predict(batch.xc, batch.yc, batch.x,
num_samples=num_samples)
outs.ll_base = compute_ll(py_base, batch.y).mean()
outs.ll = compute_ll(py, batch.y).mean()
outs.loss = -outs.ll_base - outs.ll
else:
py = self.predict(batch.xc, batch.yc, batch.x,
num_samples=num_samples)
ll = compute_ll(py, batch.y)
num_ctx = batch.xc.shape[-2]
if reduce_ll:
outs.ctx_ll = ll[...,:num_ctx].mean()
outs.tar_ll = ll[...,num_ctx:].mean()
else:
outs.ctx_ll = ll[...,:num_ctx]
outs.tar_ll = ll[...,num_ctx:]
return outs
| 2,555 | 31.35443 | 72 | py |
bnp | bnp-master/regression/models/canp.py | import torch
import torch.nn as nn
from attrdict import AttrDict
from models.modules import CrossAttnEncoder, Decoder, PoolingEncoder
class CANP(nn.Module):
def __init__(self,
dim_x=1,
dim_y=1,
dim_hid=128,
enc_v_depth=4,
enc_qk_depth=2,
enc_pre_depth=4,
enc_post_depth=2,
dec_depth=3):
super().__init__()
self.enc1 = CrossAttnEncoder(
dim_x=dim_x,
dim_y=dim_y,
dim_hid=dim_hid,
v_depth=enc_v_depth,
qk_depth=enc_qk_depth)
self.enc2 = PoolingEncoder(
dim_x=dim_x,
dim_y=dim_y,
dim_hid=dim_hid,
self_attn=True,
pre_depth=enc_pre_depth,
post_depth=enc_post_depth)
self.dec = Decoder(
dim_x=dim_x,
dim_y=dim_y,
dim_enc=2*dim_hid,
dim_hid=dim_hid,
depth=dec_depth)
def predict(self, xc, yc, xt, num_samples=None):
theta1 = self.enc1(xc, yc, xt)
theta2 = self.enc2(xc, yc)
encoded = torch.cat([theta1,
torch.stack([theta2]*xt.shape[-2], -2)], -1)
return self.dec(encoded, xt)
def forward(self, batch, num_samples=None, reduce_ll=True):
outs = AttrDict()
py = self.predict(batch.xc, batch.yc, batch.x)
ll = py.log_prob(batch.y).sum(-1)
if self.training:
outs.loss = -ll.mean()
else:
num_ctx = batch.xc.shape[-2]
if reduce_ll:
outs.ctx_ll = ll[...,:num_ctx].mean()
outs.tar_ll = ll[...,num_ctx:].mean()
else:
outs.ctx_ll = ll[...,:num_ctx]
outs.tar_ll = ll[...,num_ctx:]
return outs
| 1,886 | 27.590909 | 68 | py |
bnp | bnp-master/regression/models/bnp.py | import torch
import torch.nn as nn
from attrdict import AttrDict
from models.cnp import CNP
from utils.misc import stack, logmeanexp
from utils.sampling import sample_with_replacement as SWR, sample_subset
class BNP(CNP):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dec.add_ctx(2*kwargs['dim_hid'])
def encode(self, xc, yc, xt, mask=None):
encoded = torch.cat([
self.enc1(xc, yc, mask=mask),
self.enc2(xc, yc, mask=mask)], -1)
return stack(encoded, xt.shape[-2], -2)
def predict(self, xc, yc, xt, num_samples=None, return_base=False):
with torch.no_grad():
bxc, byc = SWR(xc, yc, num_samples=num_samples)
sxc, syc = stack(xc, num_samples), stack(yc, num_samples)
encoded = self.encode(bxc, byc, sxc)
py_res = self.dec(encoded, sxc)
mu, sigma = py_res.mean, py_res.scale
res = SWR((syc - mu)/sigma).detach()
res = (res - res.mean(-2, keepdim=True))
bxc = sxc
byc = mu + sigma * res
encoded_base = self.encode(xc, yc, xt)
sxt = stack(xt, num_samples)
encoded_bs = self.encode(bxc, byc, sxt)
py = self.dec(stack(encoded_base, num_samples),
sxt, ctx=encoded_bs)
if self.training or return_base:
py_base = self.dec(encoded_base, xt)
return py_base, py
else:
return py
def forward(self, batch, num_samples=None, reduce_ll=True):
outs = AttrDict()
def compute_ll(py, y):
ll = py.log_prob(y).sum(-1)
if ll.dim() == 3 and reduce_ll:
ll = logmeanexp(ll)
return ll
if self.training:
py_base, py = self.predict(batch.xc, batch.yc, batch.x,
num_samples=num_samples)
outs.ll_base = compute_ll(py_base, batch.y).mean()
outs.ll = compute_ll(py, batch.y).mean()
outs.loss = -outs.ll_base - outs.ll
else:
py = self.predict(batch.xc, batch.yc, batch.x,
num_samples=num_samples)
ll = compute_ll(py, batch.y)
num_ctx = batch.xc.shape[-2]
if reduce_ll:
outs.ctx_ll = ll[...,:num_ctx].mean()
outs.tar_ll = ll[...,num_ctx:].mean()
else:
outs.ctx_ll = ll[...,:num_ctx]
outs.tar_ll = ll[...,num_ctx:]
return outs
| 2,527 | 31.410256 | 72 | py |
bnp | bnp-master/regression/models/np.py | import torch
import torch.nn as nn
from torch.distributions import kl_divergence
from attrdict import AttrDict
from utils.misc import stack, logmeanexp
from utils.sampling import sample_subset
from models.modules import PoolingEncoder, Decoder
class NP(nn.Module):
def __init__(self,
dim_x=1,
dim_y=1,
dim_hid=128,
dim_lat=128,
enc_pre_depth=4,
enc_post_depth=2,
dec_depth=3):
super().__init__()
self.denc = PoolingEncoder(
dim_x=dim_x,
dim_y=dim_y,
dim_hid=dim_hid,
pre_depth=enc_pre_depth,
post_depth=enc_post_depth)
self.lenc = PoolingEncoder(
dim_x=dim_x,
dim_y=dim_y,
dim_hid=dim_hid,
dim_lat=dim_lat,
pre_depth=enc_pre_depth,
post_depth=enc_post_depth)
self.dec = Decoder(
dim_x=dim_x,
dim_y=dim_y,
dim_enc=dim_hid+dim_lat,
dim_hid=dim_hid,
depth=dec_depth)
def predict(self, xc, yc, xt, z=None, num_samples=None):
theta = stack(self.denc(xc, yc), num_samples)
if z is None:
pz = self.lenc(xc, yc)
z = pz.rsample() if num_samples is None \
else pz.rsample([num_samples])
encoded = torch.cat([theta, z], -1)
encoded = stack(encoded, xt.shape[-2], -2)
return self.dec(encoded, stack(xt, num_samples))
def forward(self, batch, num_samples=None, reduce_ll=True):
outs = AttrDict()
if self.training:
pz = self.lenc(batch.xc, batch.yc)
qz = self.lenc(batch.x, batch.y)
z = qz.rsample() if num_samples is None else \
qz.rsample([num_samples])
py = self.predict(batch.xc, batch.yc, batch.x,
z=z, num_samples=num_samples)
if num_samples > 1:
# K * B * N
recon = py.log_prob(stack(batch.y, num_samples)).sum(-1)
# K * B
log_qz = qz.log_prob(z).sum(-1)
log_pz = pz.log_prob(z).sum(-1)
# K * B
log_w = recon.sum(-1) + log_pz - log_qz
outs.loss = -logmeanexp(log_w).mean() / batch.x.shape[-2]
else:
outs.recon = py.log_prob(batch.y).sum(-1).mean()
outs.kld = kl_divergence(qz, pz).sum(-1).mean()
outs.loss = -outs.recon + outs.kld / batch.x.shape[-2]
else:
py = self.predict(batch.xc, batch.yc, batch.x, num_samples=num_samples)
if num_samples is None:
ll = py.log_prob(batch.y).sum(-1)
else:
y = torch.stack([batch.y]*num_samples)
if reduce_ll:
ll = logmeanexp(py.log_prob(y).sum(-1))
else:
ll = py.log_prob(y).sum(-1)
num_ctx = batch.xc.shape[-2]
if reduce_ll:
outs.ctx_ll = ll[...,:num_ctx].mean()
outs.tar_ll = ll[...,num_ctx:].mean()
else:
outs.ctx_ll = ll[...,:num_ctx]
outs.tar_ll = ll[...,num_ctx:]
return outs
| 3,352 | 33.214286 | 83 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.