repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
cGAN-KD | cGAN-KD-main/CIFAR-100/RepDistiller/distiller_zoo/PKT.py | from __future__ import print_function
import torch
import torch.nn as nn
class PKT(nn.Module):
"""Probabilistic Knowledge Transfer for deep representation learning
Code from author: https://github.com/passalis/probabilistic_kt"""
def __init__(self):
super(PKT, self).__init__()
def forward(self, f_s, f_t):
return self.cosine_similarity_loss(f_s, f_t)
@staticmethod
def cosine_similarity_loss(output_net, target_net, eps=0.0000001):
# Normalize each vector by its norm
output_net_norm = torch.sqrt(torch.sum(output_net ** 2, dim=1, keepdim=True))
output_net = output_net / (output_net_norm + eps)
output_net[output_net != output_net] = 0
target_net_norm = torch.sqrt(torch.sum(target_net ** 2, dim=1, keepdim=True))
target_net = target_net / (target_net_norm + eps)
target_net[target_net != target_net] = 0
# Calculate the cosine similarity
model_similarity = torch.mm(output_net, output_net.transpose(0, 1))
target_similarity = torch.mm(target_net, target_net.transpose(0, 1))
# Scale cosine similarity to 0..1
model_similarity = (model_similarity + 1.0) / 2.0
target_similarity = (target_similarity + 1.0) / 2.0
# Transform them into probabilities
model_similarity = model_similarity / torch.sum(model_similarity, dim=1, keepdim=True)
target_similarity = target_similarity / torch.sum(target_similarity, dim=1, keepdim=True)
# Calculate the KL-divergence
loss = torch.mean(target_similarity * torch.log((target_similarity + eps) / (model_similarity + eps)))
return loss
| 1,675 | 37.976744 | 110 | py |
cGAN-KD | cGAN-KD-main/CIFAR-100/RepDistiller/distiller_zoo/SP.py | from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
class Similarity(nn.Module):
"""Similarity-Preserving Knowledge Distillation, ICCV2019, verified by original author"""
def __init__(self):
super(Similarity, self).__init__()
def forward(self, g_s, g_t):
return [self.similarity_loss(f_s, f_t) for f_s, f_t in zip(g_s, g_t)]
def similarity_loss(self, f_s, f_t):
bsz = f_s.shape[0]
f_s = f_s.view(bsz, -1)
f_t = f_t.view(bsz, -1)
G_s = torch.mm(f_s, torch.t(f_s))
# G_s = G_s / G_s.norm(2)
G_s = torch.nn.functional.normalize(G_s)
G_t = torch.mm(f_t, torch.t(f_t))
# G_t = G_t / G_t.norm(2)
G_t = torch.nn.functional.normalize(G_t)
G_diff = G_t - G_s
loss = (G_diff * G_diff).view(-1, 1).sum(0) / (bsz * bsz)
return loss
| 908 | 28.322581 | 93 | py |
cGAN-KD | cGAN-KD-main/CIFAR-100/RepDistiller/distiller_zoo/AT.py | from __future__ import print_function
import torch.nn as nn
import torch.nn.functional as F
class Attention(nn.Module):
"""Paying More Attention to Attention: Improving the Performance of Convolutional Neural Networks
via Attention Transfer
code: https://github.com/szagoruyko/attention-transfer"""
def __init__(self, p=2):
super(Attention, self).__init__()
self.p = p
def forward(self, g_s, g_t):
return [self.at_loss(f_s, f_t) for f_s, f_t in zip(g_s, g_t)]
def at_loss(self, f_s, f_t):
s_H, t_H = f_s.shape[2], f_t.shape[2]
if s_H > t_H:
f_s = F.adaptive_avg_pool2d(f_s, (t_H, t_H))
elif s_H < t_H:
f_t = F.adaptive_avg_pool2d(f_t, (s_H, s_H))
else:
pass
return (self.at(f_s) - self.at(f_t)).pow(2).mean()
def at(self, f):
return F.normalize(f.pow(self.p).mean(1).view(f.size(0), -1))
| 930 | 30.033333 | 101 | py |
cGAN-KD | cGAN-KD-main/CIFAR-100/RepDistiller/distiller_zoo/FitNet.py | from __future__ import print_function
import torch.nn as nn
class HintLoss(nn.Module):
"""Fitnets: hints for thin deep nets, ICLR 2015"""
def __init__(self):
super(HintLoss, self).__init__()
self.crit = nn.MSELoss()
def forward(self, f_s, f_t):
loss = self.crit(f_s, f_t)
return loss
| 332 | 21.2 | 54 | py |
cGAN-KD | cGAN-KD-main/CIFAR-100/RepDistiller/distiller_zoo/KDSVD.py | from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
class KDSVD(nn.Module):
"""
Self-supervised Knowledge Distillation using Singular Value Decomposition
original Tensorflow code: https://github.com/sseung0703/SSKD_SVD
"""
def __init__(self, k=1):
super(KDSVD, self).__init__()
self.k = k
def forward(self, g_s, g_t):
v_sb = None
v_tb = None
losses = []
for i, f_s, f_t in zip(range(len(g_s)), g_s, g_t):
u_t, s_t, v_t = self.svd(f_t, self.k)
u_s, s_s, v_s = self.svd(f_s, self.k + 3)
v_s, v_t = self.align_rsv(v_s, v_t)
s_t = s_t.unsqueeze(1)
v_t = v_t * s_t
v_s = v_s * s_t
if i > 0:
s_rbf = torch.exp(-(v_s.unsqueeze(2) - v_sb.unsqueeze(1)).pow(2) / 8)
t_rbf = torch.exp(-(v_t.unsqueeze(2) - v_tb.unsqueeze(1)).pow(2) / 8)
l2loss = (s_rbf - t_rbf.detach()).pow(2)
l2loss = torch.where(torch.isfinite(l2loss), l2loss, torch.zeros_like(l2loss))
losses.append(l2loss.sum())
v_tb = v_t
v_sb = v_s
bsz = g_s[0].shape[0]
losses = [l / bsz for l in losses]
return losses
def svd(self, feat, n=1):
size = feat.shape
assert len(size) == 4
x = feat.view(-1, size[1], size[2] * size[2]).transpose(-2, -1)
u, s, v = torch.svd(x)
u = self.removenan(u)
s = self.removenan(s)
v = self.removenan(v)
if n > 0:
u = F.normalize(u[:, :, :n], dim=1)
s = F.normalize(s[:, :n], dim=1)
v = F.normalize(v[:, :, :n], dim=1)
return u, s, v
@staticmethod
def removenan(x):
x = torch.where(torch.isfinite(x), x, torch.zeros_like(x))
return x
@staticmethod
def align_rsv(a, b):
cosine = torch.matmul(a.transpose(-2, -1), b)
max_abs_cosine, _ = torch.max(torch.abs(cosine), 1, keepdim=True)
mask = torch.where(torch.eq(max_abs_cosine, torch.abs(cosine)),
torch.sign(cosine), torch.zeros_like(cosine))
a = torch.matmul(a, mask)
return a, b
| 2,275 | 28.947368 | 94 | py |
cGAN-KD | cGAN-KD-main/CIFAR-100/RepDistiller/distiller_zoo/RKD.py | from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
class RKDLoss(nn.Module):
"""Relational Knowledge Disitllation, CVPR2019"""
def __init__(self, w_d=25, w_a=50):
super(RKDLoss, self).__init__()
self.w_d = w_d
self.w_a = w_a
def forward(self, f_s, f_t):
student = f_s.view(f_s.shape[0], -1)
teacher = f_t.view(f_t.shape[0], -1)
# RKD distance loss
with torch.no_grad():
t_d = self.pdist(teacher, squared=False)
mean_td = t_d[t_d > 0].mean()
t_d = t_d / mean_td
d = self.pdist(student, squared=False)
mean_d = d[d > 0].mean()
d = d / mean_d
loss_d = F.smooth_l1_loss(d, t_d)
# RKD Angle loss
with torch.no_grad():
td = (teacher.unsqueeze(0) - teacher.unsqueeze(1))
norm_td = F.normalize(td, p=2, dim=2)
t_angle = torch.bmm(norm_td, norm_td.transpose(1, 2)).view(-1)
sd = (student.unsqueeze(0) - student.unsqueeze(1))
norm_sd = F.normalize(sd, p=2, dim=2)
s_angle = torch.bmm(norm_sd, norm_sd.transpose(1, 2)).view(-1)
loss_a = F.smooth_l1_loss(s_angle, t_angle)
loss = self.w_d * loss_d + self.w_a * loss_a
return loss
@staticmethod
def pdist(e, squared=False, eps=1e-12):
e_square = e.pow(2).sum(dim=1)
prod = e @ e.t()
res = (e_square.unsqueeze(1) + e_square.unsqueeze(0) - 2 * prod).clamp(min=eps)
if not squared:
res = res.sqrt()
res = res.clone()
res[range(len(e)), range(len(e))] = 0
return res
| 1,681 | 27.508475 | 87 | py |
cGAN-KD | cGAN-KD-main/CIFAR-100/RepDistiller/distiller_zoo/VID.py | from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class VIDLoss(nn.Module):
"""Variational Information Distillation for Knowledge Transfer (CVPR 2019),
code from author: https://github.com/ssahn0215/variational-information-distillation"""
def __init__(self,
num_input_channels,
num_mid_channel,
num_target_channels,
init_pred_var=5.0,
eps=1e-5):
super(VIDLoss, self).__init__()
def conv1x1(in_channels, out_channels, stride=1):
return nn.Conv2d(
in_channels, out_channels,
kernel_size=1, padding=0,
bias=False, stride=stride)
self.regressor = nn.Sequential(
conv1x1(num_input_channels, num_mid_channel),
nn.ReLU(),
conv1x1(num_mid_channel, num_mid_channel),
nn.ReLU(),
conv1x1(num_mid_channel, num_target_channels),
)
self.log_scale = torch.nn.Parameter(
np.log(np.exp(init_pred_var-eps)-1.0) * torch.ones(num_target_channels)
)
self.eps = eps
def forward(self, input, target):
# pool for dimentsion match
s_H, t_H = input.shape[2], target.shape[2]
if s_H > t_H:
input = F.adaptive_avg_pool2d(input, (t_H, t_H))
elif s_H < t_H:
target = F.adaptive_avg_pool2d(target, (s_H, s_H))
else:
pass
pred_mean = self.regressor(input)
pred_var = torch.log(1.0+torch.exp(self.log_scale))+self.eps
pred_var = pred_var.view(1, -1, 1, 1)
neg_log_prob = 0.5*(
(pred_mean-target)**2/pred_var+torch.log(pred_var)
)
loss = torch.mean(neg_log_prob)
return loss
| 1,862 | 32.872727 | 90 | py |
cGAN-KD | cGAN-KD-main/CIFAR-100/RepDistiller/distiller_zoo/CC.py | from __future__ import print_function
import torch
import torch.nn as nn
class Correlation(nn.Module):
"""Correlation Congruence for Knowledge Distillation, ICCV 2019.
The authors nicely shared the code with me. I restructured their code to be
compatible with my running framework. Credits go to the original author"""
def __init__(self):
super(Correlation, self).__init__()
def forward(self, f_s, f_t):
delta = torch.abs(f_s - f_t)
loss = torch.mean((delta[:-1] * delta[1:]).sum(1))
return loss
# class Correlation(nn.Module):
# """Similarity-preserving loss. My origianl own reimplementation
# based on the paper before emailing the original authors."""
# def __init__(self):
# super(Correlation, self).__init__()
#
# def forward(self, f_s, f_t):
# return self.similarity_loss(f_s, f_t)
# # return [self.similarity_loss(f_s, f_t) for f_s, f_t in zip(g_s, g_t)]
#
# def similarity_loss(self, f_s, f_t):
# bsz = f_s.shape[0]
# f_s = f_s.view(bsz, -1)
# f_t = f_t.view(bsz, -1)
#
# G_s = torch.mm(f_s, torch.t(f_s))
# G_s = G_s / G_s.norm(2)
# G_t = torch.mm(f_t, torch.t(f_t))
# G_t = G_t / G_t.norm(2)
#
# G_diff = G_t - G_s
# loss = (G_diff * G_diff).view(-1, 1).sum(0) / (bsz * bsz)
# return loss
| 1,384 | 31.209302 | 81 | py |
cGAN-KD | cGAN-KD-main/CIFAR-100/RepDistiller/distiller_zoo/FT.py | from __future__ import print_function
import torch.nn as nn
import torch.nn.functional as F
class FactorTransfer(nn.Module):
"""Paraphrasing Complex Network: Network Compression via Factor Transfer, NeurIPS 2018"""
def __init__(self, p1=2, p2=1):
super(FactorTransfer, self).__init__()
self.p1 = p1
self.p2 = p2
def forward(self, f_s, f_t):
return self.factor_loss(f_s, f_t)
def factor_loss(self, f_s, f_t):
s_H, t_H = f_s.shape[2], f_t.shape[2]
if s_H > t_H:
f_s = F.adaptive_avg_pool2d(f_s, (t_H, t_H))
elif s_H < t_H:
f_t = F.adaptive_avg_pool2d(f_t, (s_H, s_H))
else:
pass
if self.p2 == 1:
return (self.factor(f_s) - self.factor(f_t)).abs().mean()
else:
return (self.factor(f_s) - self.factor(f_t)).pow(self.p2).mean()
def factor(self, f):
return F.normalize(f.pow(self.p1).mean(1).view(f.size(0), -1))
| 981 | 29.6875 | 93 | py |
cGAN-KD | cGAN-KD-main/CIFAR-100/RepDistiller/distiller_zoo/KD.py | from __future__ import print_function
import torch.nn as nn
import torch.nn.functional as F
class DistillKL(nn.Module):
"""Distilling the Knowledge in a Neural Network"""
def __init__(self, T):
super(DistillKL, self).__init__()
self.T = T
def forward(self, y_s, y_t):
p_s = F.log_softmax(y_s/self.T, dim=1)
p_t = F.softmax(y_t/self.T, dim=1)
loss = F.kl_div(p_s, p_t, size_average=False) * (self.T**2) / y_s.shape[0]
return loss
| 493 | 26.444444 | 82 | py |
cGAN-KD | cGAN-KD-main/CIFAR-100/RepDistiller/distiller_zoo/AB.py | from __future__ import print_function
import torch
import torch.nn as nn
class ABLoss(nn.Module):
"""Knowledge Transfer via Distillation of Activation Boundaries Formed by Hidden Neurons
code: https://github.com/bhheo/AB_distillation
"""
def __init__(self, feat_num, margin=1.0):
super(ABLoss, self).__init__()
self.w = [2**(i-feat_num+1) for i in range(feat_num)]
self.margin = margin
def forward(self, g_s, g_t):
bsz = g_s[0].shape[0]
losses = [self.criterion_alternative_l2(s, t) for s, t in zip(g_s, g_t)]
losses = [w * l for w, l in zip(self.w, losses)]
# loss = sum(losses) / bsz
# loss = loss / 1000 * 3
losses = [l / bsz for l in losses]
losses = [l / 1000 * 3 for l in losses]
return losses
def criterion_alternative_l2(self, source, target):
loss = ((source + self.margin) ** 2 * ((source > -self.margin) & (target <= 0)).float() +
(source - self.margin) ** 2 * ((source <= self.margin) & (target > 0)).float())
return torch.abs(loss).sum()
| 1,100 | 35.7 | 97 | py |
cGAN-KD | cGAN-KD-main/CIFAR-100/RepDistiller/distiller_zoo/FSP.py | from __future__ import print_function
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
class FSP(nn.Module):
"""A Gift from Knowledge Distillation:
Fast Optimization, Network Minimization and Transfer Learning"""
def __init__(self, s_shapes, t_shapes):
super(FSP, self).__init__()
assert len(s_shapes) == len(t_shapes), 'unequal length of feat list'
s_c = [s[1] for s in s_shapes]
t_c = [t[1] for t in t_shapes]
if np.any(np.asarray(s_c) != np.asarray(t_c)):
raise ValueError('num of channels not equal (error in FSP)')
def forward(self, g_s, g_t):
s_fsp = self.compute_fsp(g_s)
t_fsp = self.compute_fsp(g_t)
loss_group = [self.compute_loss(s, t) for s, t in zip(s_fsp, t_fsp)]
return loss_group
@staticmethod
def compute_loss(s, t):
return (s - t).pow(2).mean()
@staticmethod
def compute_fsp(g):
fsp_list = []
for i in range(len(g) - 1):
bot, top = g[i], g[i + 1]
b_H, t_H = bot.shape[2], top.shape[2]
if b_H > t_H:
bot = F.adaptive_avg_pool2d(bot, (t_H, t_H))
elif b_H < t_H:
top = F.adaptive_avg_pool2d(top, (b_H, b_H))
else:
pass
bot = bot.unsqueeze(1)
top = top.unsqueeze(2)
bot = bot.view(bot.shape[0], bot.shape[1], bot.shape[2], -1)
top = top.view(top.shape[0], top.shape[1], top.shape[2], -1)
fsp = (bot * top).mean(-1)
fsp_list.append(fsp)
return fsp_list
| 1,625 | 32.183673 | 76 | py |
cGAN-KD | cGAN-KD-main/CIFAR-100/RepDistiller/distiller_zoo/NST.py | from __future__ import print_function
import torch.nn as nn
import torch.nn.functional as F
class NSTLoss(nn.Module):
"""like what you like: knowledge distill via neuron selectivity transfer"""
def __init__(self):
super(NSTLoss, self).__init__()
pass
def forward(self, g_s, g_t):
return [self.nst_loss(f_s, f_t) for f_s, f_t in zip(g_s, g_t)]
def nst_loss(self, f_s, f_t):
s_H, t_H = f_s.shape[2], f_t.shape[2]
if s_H > t_H:
f_s = F.adaptive_avg_pool2d(f_s, (t_H, t_H))
elif s_H < t_H:
f_t = F.adaptive_avg_pool2d(f_t, (s_H, s_H))
else:
pass
f_s = f_s.view(f_s.shape[0], f_s.shape[1], -1)
f_s = F.normalize(f_s, dim=2)
f_t = f_t.view(f_t.shape[0], f_t.shape[1], -1)
f_t = F.normalize(f_t, dim=2)
# set full_loss as False to avoid unnecessary computation
full_loss = True
if full_loss:
return (self.poly_kernel(f_t, f_t).mean().detach() + self.poly_kernel(f_s, f_s).mean()
- 2 * self.poly_kernel(f_s, f_t).mean())
else:
return self.poly_kernel(f_s, f_s).mean() - 2 * self.poly_kernel(f_s, f_t).mean()
def poly_kernel(self, a, b):
a = a.unsqueeze(1)
b = b.unsqueeze(2)
res = (a * b).sum(-1).pow(2)
return res | 1,366 | 31.547619 | 98 | py |
cGAN-KD | cGAN-KD-main/CIFAR-100/RepDistiller/helper/pretrain.py | from __future__ import print_function, division
import time
import sys
import torch
import torch.optim as optim
import torch.backends.cudnn as cudnn
from .util import AverageMeter
def init(model_s, model_t, init_modules, criterion, train_loader, opt):
model_t.eval()
model_s.eval()
init_modules.train()
if torch.cuda.is_available():
model_s.cuda()
model_t.cuda()
init_modules.cuda()
cudnn.benchmark = True
if opt.model_s in ['resnet8', 'resnet14', 'resnet20', 'resnet32', 'resnet44', 'resnet56', 'resnet110',
'resnet8x4', 'resnet32x4', 'wrn_16_1', 'wrn_16_2', 'wrn_40_1', 'wrn_40_2'] and \
opt.distill == 'factor':
lr = 0.01
else:
lr = opt.learning_rate
optimizer = optim.SGD(init_modules.parameters(),
lr=lr,
momentum=opt.momentum,
weight_decay=opt.weight_decay)
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
for epoch in range(1, opt.init_epochs + 1):
batch_time.reset()
data_time.reset()
losses.reset()
end = time.time()
for idx, data in enumerate(train_loader):
if opt.distill in ['crd']:
input, target, index, contrast_idx = data
else:
input, target, index = data
data_time.update(time.time() - end)
input = input.float()
if torch.cuda.is_available():
input = input.cuda()
target = target.cuda()
index = index.cuda()
if opt.distill in ['crd']:
contrast_idx = contrast_idx.cuda()
# ============= forward ==============
preact = (opt.distill == 'abound')
feat_s, _ = model_s(input, is_feat=True, preact=preact)
with torch.no_grad():
feat_t, _ = model_t(input, is_feat=True, preact=preact)
feat_t = [f.detach() for f in feat_t]
if opt.distill == 'abound':
g_s = init_modules[0](feat_s[1:-1])
g_t = feat_t[1:-1]
loss_group = criterion(g_s, g_t)
loss = sum(loss_group)
elif opt.distill == 'factor':
f_t = feat_t[-2]
_, f_t_rec = init_modules[0](f_t)
loss = criterion(f_t_rec, f_t)
elif opt.distill == 'fsp':
loss_group = criterion(feat_s[:-1], feat_t[:-1])
loss = sum(loss_group)
else:
raise NotImplemented('Not supported in init training: {}'.format(opt.distill))
losses.update(loss.item(), input.size(0))
# ===================backward=====================
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_time.update(time.time() - end)
end = time.time()
# end of epoch
print('Epoch: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'losses: {losses.val:.3f} ({losses.avg:.3f})'.format(
epoch, opt.init_epochs, batch_time=batch_time, losses=losses))
sys.stdout.flush()
| 3,305 | 34.170213 | 106 | py |
cGAN-KD | cGAN-KD-main/CIFAR-100/RepDistiller/helper/loops.py | from __future__ import print_function, division
import sys
import time
import torch
from .util import AverageMeter, accuracy
def train_vanilla(epoch, train_loader, model, criterion, optimizer, opt):
"""vanilla training"""
model.train()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
end = time.time()
for idx, (input, target) in enumerate(train_loader):
data_time.update(time.time() - end)
input = input.float()
if torch.cuda.is_available():
input = input.cuda()
target = target.cuda()
target = target.type(torch.long)
# ===================forward=====================
output = model(input)
loss = criterion(output, target)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(acc1[0], input.size(0))
top5.update(acc5[0], input.size(0))
# ===================backward=====================
optimizer.zero_grad()
loss.backward()
optimizer.step()
# ===================meters=====================
batch_time.update(time.time() - end)
end = time.time()
# tensorboard logger
pass
# print info
if idx % opt.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Acc@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, idx, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
sys.stdout.flush()
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg, losses.avg
def train_distill(epoch, train_loader, module_list, criterion_list, optimizer, opt):
"""One epoch distillation"""
# set modules as train()
for module in module_list:
module.train()
# set teacher as eval()
module_list[-1].eval()
if opt.distill == 'abound':
module_list[1].eval()
elif opt.distill == 'factor':
module_list[2].eval()
criterion_cls = criterion_list[0]
criterion_div = criterion_list[1]
criterion_kd = criterion_list[2]
model_s = module_list[0]
model_t = module_list[-1]
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
end = time.time()
for idx, data in enumerate(train_loader):
if opt.distill in ['crd']:
input, target, index, contrast_idx = data
else:
input, target, index = data
data_time.update(time.time() - end)
input = input.float()
target = target.type(torch.long)
if torch.cuda.is_available():
input = input.cuda()
target = target.cuda()
index = index.cuda()
if opt.distill in ['crd']:
contrast_idx = contrast_idx.cuda()
# ===================forward=====================
preact = False
if opt.distill in ['abound']:
preact = True
feat_s, logit_s = model_s(input, is_feat=True, preact=preact)
with torch.no_grad():
if opt.model_t[0:8]=="densenet":
assert opt.distill=='kd' ##other KD method does not support densenet
logit_t = model_t(input)
else:
feat_t, logit_t = model_t(input, is_feat=True, preact=preact)
feat_t = [f.detach() for f in feat_t]
# cls + kl div
loss_cls = criterion_cls(logit_s, target)
loss_div = criterion_div(logit_s, logit_t)
# other kd beyond KL divergence
if opt.distill == 'kd':
loss_kd = 0
elif opt.distill == 'hint':
f_s = module_list[1](feat_s[opt.hint_layer])
f_t = feat_t[opt.hint_layer]
loss_kd = criterion_kd(f_s, f_t)
elif opt.distill == 'crd':
f_s = feat_s[-1]
f_t = feat_t[-1]
loss_kd = criterion_kd(f_s, f_t, index, contrast_idx)
elif opt.distill == 'attention':
g_s = feat_s[1:-1]
g_t = feat_t[1:-1]
loss_group = criterion_kd(g_s, g_t)
loss_kd = sum(loss_group)
elif opt.distill == 'nst':
g_s = feat_s[1:-1]
g_t = feat_t[1:-1]
loss_group = criterion_kd(g_s, g_t)
loss_kd = sum(loss_group)
elif opt.distill == 'similarity':
g_s = [feat_s[-2]]
g_t = [feat_t[-2]]
loss_group = criterion_kd(g_s, g_t)
loss_kd = sum(loss_group)
elif opt.distill == 'rkd':
f_s = feat_s[-1]
f_t = feat_t[-1]
loss_kd = criterion_kd(f_s, f_t)
elif opt.distill == 'pkt':
f_s = feat_s[-1]
f_t = feat_t[-1]
loss_kd = criterion_kd(f_s, f_t)
elif opt.distill == 'kdsvd':
g_s = feat_s[1:-1]
g_t = feat_t[1:-1]
loss_group = criterion_kd(g_s, g_t)
loss_kd = sum(loss_group)
elif opt.distill == 'correlation':
f_s = module_list[1](feat_s[-1])
f_t = module_list[2](feat_t[-1])
loss_kd = criterion_kd(f_s, f_t)
elif opt.distill == 'vid':
g_s = feat_s[1:-1]
g_t = feat_t[1:-1]
loss_group = [c(f_s, f_t) for f_s, f_t, c in zip(g_s, g_t, criterion_kd)]
loss_kd = sum(loss_group)
elif opt.distill == 'abound':
# can also add loss to this stage
loss_kd = 0
elif opt.distill == 'fsp':
# can also add loss to this stage
loss_kd = 0
elif opt.distill == 'factor':
factor_s = module_list[1](feat_s[-2])
factor_t = module_list[2](feat_t[-2], is_factor=True)
loss_kd = criterion_kd(factor_s, factor_t)
else:
raise NotImplementedError(opt.distill)
loss = opt.gamma * loss_cls + opt.alpha * loss_div + opt.beta * loss_kd
acc1, acc5 = accuracy(logit_s, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(acc1[0], input.size(0))
top5.update(acc5[0], input.size(0))
# ===================backward=====================
optimizer.zero_grad()
loss.backward()
optimizer.step()
# ===================meters=====================
batch_time.update(time.time() - end)
end = time.time()
# print info
if idx % opt.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Acc@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, idx, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
sys.stdout.flush()
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg, losses.avg
def validate(val_loader, model, criterion, opt):
"""validation"""
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for idx, (input, target) in enumerate(val_loader):
input = input.float()
if torch.cuda.is_available():
input = input.cuda()
target = target.cuda()
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(acc1[0], input.size(0))
top5.update(acc5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if idx % opt.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Acc@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
idx, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg, top5.avg, losses.avg
| 9,306 | 33.343173 | 85 | py |
cGAN-KD | cGAN-KD-main/CIFAR-100/RepDistiller/helper/util.py | from __future__ import print_function
import torch
import numpy as np
def adjust_learning_rate_new(epoch, optimizer, LUT):
"""
new learning rate schedule according to RotNet
"""
lr = next((lr for (max_epoch, lr) in LUT if max_epoch > epoch), LUT[-1][1])
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def adjust_learning_rate(epoch, opt, optimizer):
"""Sets the learning rate to the initial LR decayed by decay rate every steep step"""
steps = np.sum(epoch > np.asarray(opt.lr_decay_epochs))
if steps > 0:
new_lr = opt.learning_rate * (opt.lr_decay_rate ** steps)
for param_group in optimizer.param_groups:
param_group['lr'] = new_lr
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
# correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
pass
| 1,786 | 26.921875 | 89 | py |
cGAN-KD | cGAN-KD-main/CIFAR-100/RepDistiller/crd/memory.py | import torch
from torch import nn
import math
class ContrastMemory(nn.Module):
"""
memory buffer that supplies large amount of negative samples.
"""
def __init__(self, inputSize, outputSize, K, T=0.07, momentum=0.5):
super(ContrastMemory, self).__init__()
self.nLem = outputSize
self.unigrams = torch.ones(self.nLem)
self.multinomial = AliasMethod(self.unigrams)
self.multinomial.cuda()
self.K = K
self.register_buffer('params', torch.tensor([K, T, -1, -1, momentum]))
stdv = 1. / math.sqrt(inputSize / 3)
self.register_buffer('memory_v1', torch.rand(outputSize, inputSize).mul_(2 * stdv).add_(-stdv))
self.register_buffer('memory_v2', torch.rand(outputSize, inputSize).mul_(2 * stdv).add_(-stdv))
def forward(self, v1, v2, y, idx=None):
K = int(self.params[0].item())
T = self.params[1].item()
Z_v1 = self.params[2].item()
Z_v2 = self.params[3].item()
momentum = self.params[4].item()
batchSize = v1.size(0)
outputSize = self.memory_v1.size(0)
inputSize = self.memory_v1.size(1)
# original score computation
if idx is None:
idx = self.multinomial.draw(batchSize * (self.K + 1)).view(batchSize, -1)
idx.select(1, 0).copy_(y.data)
# sample
weight_v1 = torch.index_select(self.memory_v1, 0, idx.view(-1)).detach()
weight_v1 = weight_v1.view(batchSize, K + 1, inputSize)
out_v2 = torch.bmm(weight_v1, v2.view(batchSize, inputSize, 1))
out_v2 = torch.exp(torch.div(out_v2, T))
# sample
weight_v2 = torch.index_select(self.memory_v2, 0, idx.view(-1)).detach()
weight_v2 = weight_v2.view(batchSize, K + 1, inputSize)
out_v1 = torch.bmm(weight_v2, v1.view(batchSize, inputSize, 1))
out_v1 = torch.exp(torch.div(out_v1, T))
# set Z if haven't been set yet
if Z_v1 < 0:
self.params[2] = out_v1.mean() * outputSize
Z_v1 = self.params[2].clone().detach().item()
print("normalization constant Z_v1 is set to {:.1f}".format(Z_v1))
if Z_v2 < 0:
self.params[3] = out_v2.mean() * outputSize
Z_v2 = self.params[3].clone().detach().item()
print("normalization constant Z_v2 is set to {:.1f}".format(Z_v2))
# compute out_v1, out_v2
out_v1 = torch.div(out_v1, Z_v1).contiguous()
out_v2 = torch.div(out_v2, Z_v2).contiguous()
# update memory
with torch.no_grad():
l_pos = torch.index_select(self.memory_v1, 0, y.view(-1))
l_pos.mul_(momentum)
l_pos.add_(torch.mul(v1, 1 - momentum))
l_norm = l_pos.pow(2).sum(1, keepdim=True).pow(0.5)
updated_v1 = l_pos.div(l_norm)
self.memory_v1.index_copy_(0, y, updated_v1)
ab_pos = torch.index_select(self.memory_v2, 0, y.view(-1))
ab_pos.mul_(momentum)
ab_pos.add_(torch.mul(v2, 1 - momentum))
ab_norm = ab_pos.pow(2).sum(1, keepdim=True).pow(0.5)
updated_v2 = ab_pos.div(ab_norm)
self.memory_v2.index_copy_(0, y, updated_v2)
return out_v1, out_v2
class AliasMethod(object):
"""
From: https://hips.seas.harvard.edu/blog/2013/03/03/the-alias-method-efficient-sampling-with-many-discrete-outcomes/
"""
def __init__(self, probs):
if probs.sum() > 1:
probs.div_(probs.sum())
K = len(probs)
self.prob = torch.zeros(K)
self.alias = torch.LongTensor([0]*K)
# Sort the data into the outcomes with probabilities
# that are larger and smaller than 1/K.
smaller = []
larger = []
for kk, prob in enumerate(probs):
self.prob[kk] = K*prob
if self.prob[kk] < 1.0:
smaller.append(kk)
else:
larger.append(kk)
# Loop though and create little binary mixtures that
# appropriately allocate the larger outcomes over the
# overall uniform mixture.
while len(smaller) > 0 and len(larger) > 0:
small = smaller.pop()
large = larger.pop()
self.alias[small] = large
self.prob[large] = (self.prob[large] - 1.0) + self.prob[small]
if self.prob[large] < 1.0:
smaller.append(large)
else:
larger.append(large)
for last_one in smaller+larger:
self.prob[last_one] = 1
def cuda(self):
self.prob = self.prob.cuda()
self.alias = self.alias.cuda()
def draw(self, N):
""" Draw N samples from multinomial """
K = self.alias.size(0)
kk = torch.zeros(N, dtype=torch.long, device=self.prob.device).random_(0, K)
prob = self.prob.index_select(0, kk)
alias = self.alias.index_select(0, kk)
# b is whether a random number is greater than q
b = torch.bernoulli(prob)
oq = kk.mul(b.long())
oj = alias.mul((1-b).long())
return oq + oj | 5,126 | 35.884892 | 120 | py |
cGAN-KD | cGAN-KD-main/CIFAR-100/RepDistiller/crd/criterion.py | import torch
from torch import nn
from .memory import ContrastMemory
eps = 1e-7
class CRDLoss(nn.Module):
"""CRD Loss function
includes two symmetric parts:
(a) using teacher as anchor, choose positive and negatives over the student side
(b) using student as anchor, choose positive and negatives over the teacher side
Args:
opt.s_dim: the dimension of student's feature
opt.t_dim: the dimension of teacher's feature
opt.feat_dim: the dimension of the projection space
opt.nce_k: number of negatives paired with each positive
opt.nce_t: the temperature
opt.nce_m: the momentum for updating the memory buffer
opt.n_data: the number of samples in the training set, therefor the memory buffer is: opt.n_data x opt.feat_dim
"""
def __init__(self, opt):
super(CRDLoss, self).__init__()
self.embed_s = Embed(opt.s_dim, opt.feat_dim)
self.embed_t = Embed(opt.t_dim, opt.feat_dim)
self.contrast = ContrastMemory(opt.feat_dim, opt.n_data, opt.nce_k, opt.nce_t, opt.nce_m)
self.criterion_t = ContrastLoss(opt.n_data)
self.criterion_s = ContrastLoss(opt.n_data)
def forward(self, f_s, f_t, idx, contrast_idx=None):
"""
Args:
f_s: the feature of student network, size [batch_size, s_dim]
f_t: the feature of teacher network, size [batch_size, t_dim]
idx: the indices of these positive samples in the dataset, size [batch_size]
contrast_idx: the indices of negative samples, size [batch_size, nce_k]
Returns:
The contrastive loss
"""
f_s = self.embed_s(f_s)
f_t = self.embed_t(f_t)
out_s, out_t = self.contrast(f_s, f_t, idx, contrast_idx)
s_loss = self.criterion_s(out_s)
t_loss = self.criterion_t(out_t)
loss = s_loss + t_loss
return loss
class ContrastLoss(nn.Module):
"""
contrastive loss, corresponding to Eq (18)
"""
def __init__(self, n_data):
super(ContrastLoss, self).__init__()
self.n_data = n_data
def forward(self, x):
bsz = x.shape[0]
m = x.size(1) - 1
# noise distribution
Pn = 1 / float(self.n_data)
# loss for positive pair
P_pos = x.select(1, 0)
log_D1 = torch.div(P_pos, P_pos.add(m * Pn + eps)).log_()
# loss for K negative pair
P_neg = x.narrow(1, 1, m)
log_D0 = torch.div(P_neg.clone().fill_(m * Pn), P_neg.add(m * Pn + eps)).log_()
loss = - (log_D1.sum(0) + log_D0.view(-1, 1).sum(0)) / bsz
return loss
class Embed(nn.Module):
"""Embedding module"""
def __init__(self, dim_in=1024, dim_out=128):
super(Embed, self).__init__()
self.linear = nn.Linear(dim_in, dim_out)
self.l2norm = Normalize(2)
def forward(self, x):
x = x.view(x.shape[0], -1)
x = self.linear(x)
x = self.l2norm(x)
return x
class Normalize(nn.Module):
"""normalization layer"""
def __init__(self, power=2):
super(Normalize, self).__init__()
self.power = power
def forward(self, x):
norm = x.pow(self.power).sum(1, keepdim=True).pow(1. / self.power)
out = x.div(norm)
return out
| 3,309 | 31.135922 | 119 | py |
CodeMixedTreebank | CodeMixedTreebank-master/UDParser/driver/TrainTest.py | import sys
sys.path.extend(["../../","../","./"])
import time
import torch.optim.lr_scheduler
import torch.nn as nn
import random
import argparse
from driver.Config import *
from driver.Model import *
from driver.Parser import *
from data.Dataloader import *
import pickle
def train(data, parser, vocab, config, others):
optimizer = Optimizer(filter(lambda p: p.requires_grad, parser.model.parameters()), config)
global_step = 0
best_UAS = 0
batch_num = int(np.ceil(len(data) / float(config.train_batch_size)))
for iter in range(config.train_iters):
start_time = time.time()
print('Iteration: ' + str(iter))
batch_iter = 0
overall_arc_correct, overall_label_correct, overall_total_arcs = 0, 0, 0
for onebatch in data_iter(data, config.train_batch_size, True):
clusters, extwords, tags, heads, rels, lengths, masks = \
batch_data_variable(onebatch, vocab)
parser.model.train()
parser.forward(clusters, extwords, tags, masks)
loss = parser.compute_hierarch_loss(heads, rels, lengths)
loss = loss / config.update_every
loss_value = loss.detach().cpu().numpy()
loss.backward()
arc_correct, label_correct, total_arcs = parser.compute_accuracy(heads, rels)
overall_arc_correct += arc_correct
overall_label_correct += label_correct
overall_total_arcs += total_arcs
uas = overall_arc_correct * 100.0 / overall_total_arcs
las = overall_label_correct * 100.0 / overall_total_arcs
during_time = float(time.time() - start_time)
print("Step:%d, ARC:%.2f, REL:%.2f, Iter:%d, batch:%d, length:%d,time:%.2f, loss:%.2f" \
%(global_step, uas, las, iter, batch_iter, overall_total_arcs, during_time, loss_value))
batch_iter += 1
if batch_iter % config.update_every == 0 or batch_iter == batch_num:
nn.utils.clip_grad_norm_(filter(lambda p: p.requires_grad, parser.model.parameters()), \
max_norm=config.clip)
optimizer.step()
parser.model.zero_grad()
global_step += 1
if batch_iter % config.validate_every == 0 or batch_iter == batch_num:
arc_correct, rel_correct, arc_total, dev_uas, dev_las = \
evaluate(config.dev_file, parser, vocab, global_step)
print("Dev %s: uas = %d/%d = %.2f, las = %d/%d =%.2f, (%.2f\t%.2f)" % \
(config.dev_file, arc_correct, arc_total, dev_uas, rel_correct, arc_total, dev_las, dev_uas, dev_las))
arc_correct, rel_correct, arc_total, test_uas, test_las = \
evaluate(config.test_file, parser, vocab, global_step)
print("Test %s: uas = %d/%d = %.2f, las = %d/%d =%.2f, (%.2f\t%.2f)" % \
(config.test_file, arc_correct, arc_total, test_uas, rel_correct, arc_total, test_las, test_uas, test_las))
for otherFile in others:
arc_correct, rel_correct, arc_total, test_uas, test_las = \
evaluate(otherFile, parser, vocab, global_step)
print("Test %s: uas = %d/%d = %.2f, las = %d/%d =%.2f, (%.2f\t%.2f)" % \
(otherFile, arc_correct, arc_total, test_uas, rel_correct, arc_total, test_las, test_uas, test_las))
if dev_uas > best_UAS:
print("Exceed best uas: history = %.2f, current = %.2f" %(best_UAS, dev_uas))
best_UAS = dev_uas
if config.save_after > 0 and iter > config.save_after:
torch.save(parser.model.state_dict(), config.save_model_path)
def evaluate(inputFile, parser, vocab, step):
data = read_corpus(inputFile, vocab)
start = time.time()
parser.model.eval()
output = open(inputFile + '.' + str(step), 'w', encoding='utf-8')
arc_total_test, arc_correct_test, rel_total_test, rel_correct_test = 0, 0, 0, 0
for onebatch in data_iter(data, config.test_batch_size, False):
clusters, extwords, tags, heads, rels, lengths, masks = \
batch_data_variable(onebatch, vocab)
count = 0
arcs_batch, rels_batch = parser.parse(clusters, extwords, tags, lengths, masks)
for tree in batch_variable_depTree(onebatch, arcs_batch, rels_batch, lengths, vocab):
printDepTree(output, tree)
arc_total, arc_correct, rel_total, rel_correct = evalDepTree(tree, onebatch[count])
arc_total_test += arc_total
arc_correct_test += arc_correct
rel_total_test += rel_total
rel_correct_test += rel_correct
count += 1
output.close()
uas = arc_correct_test * 100.0 / arc_total_test
las = rel_correct_test * 100.0 / rel_total_test
end = time.time()
during_time = float(end - start)
print("sentence num: %d, parser time = %.2f " % (len(data), during_time))
return arc_correct_test, rel_correct_test, arc_total_test, uas, las
class Optimizer:
def __init__(self, parameter, config):
self.optim = torch.optim.Adam(parameter, lr=config.learning_rate, betas=(config.beta_1, config.beta_2),
eps=config.epsilon)
decay, decay_step = config.decay, config.decay_steps
l = lambda epoch: decay ** (epoch // decay_step)
self.scheduler = torch.optim.lr_scheduler.LambdaLR(self.optim, lr_lambda=l)
def step(self):
self.optim.step()
self.schedule()
self.optim.zero_grad()
def schedule(self):
self.scheduler.step()
def zero_grad(self):
self.optim.zero_grad()
@property
def lr(self):
return self.scheduler.get_lr()
if __name__ == '__main__':
random.seed(666)
np.random.seed(666)
torch.cuda.manual_seed(666)
torch.manual_seed(666)
argparser = argparse.ArgumentParser()
argparser.add_argument('--config_file', default='examples/default.cfg')
argparser.add_argument('--thread', default=4, type=int, help='thread num')
argparser.add_argument('--gpu', default=-1, type=int, help='gpu id')
args, extra_args = argparser.parse_known_args()
config = Configurable(args.config_file, extra_args)
vocab = creatVocab(config.train_file, config.min_occur_count)
vec = vocab.load_pretrained_embs(config.pretrained_embeddings_file)
vocab.set_cluster(config.cluster_file)
pickle.dump(vocab, open(config.save_vocab_path, 'wb'))
args, extra_args = argparser.parse_known_args()
config = Configurable(args.config_file, extra_args)
torch.set_num_threads(args.thread)
if args.gpu >= 0:
torch.cuda.set_device(args.gpu)
config.use_cuda = args.gpu >= 0 and torch.cuda.is_available()
print("GPU ID: ", args.gpu)
# print(config.use_cuda)
model = ParserModel(vocab, config, vec)
if config.use_cuda:
model = model.cuda()
parser = BiaffineParser(model, vocab.ROOT)
data = read_corpus(config.train_file, vocab)
others = config.others.strip().split(' ')
train(data, parser, vocab, config, others)
| 7,271 | 40.318182 | 129 | py |
CodeMixedTreebank | CodeMixedTreebank-master/UDParser/driver/Parser.py | import torch.nn.functional as F
from driver.MST import *
import torch.optim.lr_scheduler
from driver.Layer import *
import numpy as np
def pad_sequence(xs, length=None, padding=-1, dtype=np.float64):
lengths = [len(x) for x in xs]
if length is None:
length = max(lengths)
y = np.array([np.pad(x.astype(dtype), (0, length - l),
mode="constant", constant_values=padding)
for x, l in zip(xs, lengths)])
return torch.from_numpy(y)
def obtain_rel_answer(rels, heads, length, rel_size, padding=-1):
batch_size = len(rels)
lengths = [len(x) for x in rels]
y = np.full((batch_size, length), padding, dtype=np.int64)
b = 0
for rs, hs, l in zip(rels, heads, lengths):
for k in range(l):
y[b, k] = hs[k] * rel_size + rs[k]
b = b + 1
return torch.from_numpy(y)
class BiaffineParser(object):
def __init__(self, model, root_id):
self.model = model
self.root = root_id
p = next(filter(lambda p: p.requires_grad, model.parameters()))
self.use_cuda = p.is_cuda
self.device = p.get_device() if self.use_cuda else None
def forward(self, clusters, extwords, tags, masks):
if self.use_cuda:
clusters, extwords = clusters.cuda(self.device), extwords.cuda(self.device),
tags = tags.cuda(self.device)
masks = masks.cuda(self.device)
arc_logits, rel_logits = self.model.forward(clusters, extwords, tags, masks)
# cache
self.arc_logits = arc_logits
self.rel_logits = rel_logits
def compute_loss(self, true_arcs, true_rels, lengths):
b, l1, l2 = self.arc_logits.size()
index_true_arcs = pad_sequence(true_arcs, length=l1, padding=0, dtype=np.int64)
true_arcs = pad_sequence(true_arcs, length=l1, padding=-1, dtype=np.int64)
masks = []
for length in lengths:
mask = torch.FloatTensor([0] * length + [-10000] * (l2 - length))
mask = torch.unsqueeze(mask, dim=1).expand(-1, l1)
masks.append(mask.transpose(0, 1))
length_mask = torch.stack(masks, 0)
if self.use_cuda:
index_true_arcs = index_true_arcs.cuda(self.device)
true_arcs = true_arcs.cuda(self.device)
length_mask = length_mask.cuda(self.device)
arc_logits = self.arc_logits + length_mask
arc_loss = F.cross_entropy(
arc_logits.view(b * l1, l2), true_arcs.view(b * l1),
ignore_index=-1)
size = self.rel_logits.size()
true_rels = pad_sequence(true_rels, padding=-1, dtype=np.int64)
label_logits = torch.zeros(size[0], size[1], size[3])
if self.use_cuda:
true_rels = true_rels.cuda(self.device)
label_logits = label_logits.cuda(self.device)
for batch_index, (logits, arcs) in enumerate(zip(self.rel_logits, index_true_arcs)):
rel_probs = []
for i in range(l1):
rel_probs.append(logits[i][int(arcs[i])])
rel_probs = torch.stack(rel_probs, dim=0)
label_logits[batch_index] = torch.squeeze(rel_probs, dim=1)
b, l1, d = label_logits.size()
rel_loss = F.cross_entropy(
label_logits.view(b * l1, d), true_rels.view(b * l1), ignore_index=-1)
loss = arc_loss + rel_loss
return loss
def compute_hierarch_loss(self, true_arcs, true_rels, lengths):
b, l1, l2 = self.arc_logits.size()
assert l1 == l2
arc_answers = pad_sequence(true_arcs, length=l1, padding=-1, dtype=np.int64)
masks = []
for length in lengths:
mask = torch.FloatTensor([0] * length + [-10000] * (l2 - length))
mask = torch.unsqueeze(mask, dim=1).expand(-1, l1)
masks.append(mask.transpose(0, 1))
length_masks = torch.stack(masks, 0)
if self.use_cuda:
arc_answers = arc_answers.cuda(self.device)
length_masks = length_masks.cuda(self.device)
arc_logits = self.arc_logits + length_masks
arc_loss = F.cross_entropy(
arc_logits.view(b * l1, l2), arc_answers.view(b * l1),
ignore_index=-1)
b, l1, l2, d = self.rel_logits.size()
label_masks = torch.unsqueeze(length_masks, dim=3).expand(-1, -1, -1, d)
label_logits = self.rel_logits + label_masks
rel_answers = obtain_rel_answer(true_rels, true_arcs, l2, d, padding=-1)
if self.use_cuda:
rel_answers = rel_answers.cuda(self.device)
label_logits = label_logits.cuda(self.device)
rel_loss = F.cross_entropy(
label_logits.view(b * l1, l2 * d), rel_answers.view(b * l1), ignore_index=-1)
loss = arc_loss + rel_loss
return loss
def compute_accuracy(self, true_arcs, true_rels):
b, l1, l2 = self.arc_logits.size()
pred_arcs = self.arc_logits.detach().max(2)[1].cpu()
index_true_arcs = pad_sequence(true_arcs, padding=-1, dtype=np.int64)
true_arcs = pad_sequence(true_arcs, padding=-1, dtype=np.int64)
arc_correct = pred_arcs.eq(true_arcs).cpu().sum().item()
size = self.rel_logits.size()
rel_logits = self.rel_logits.cpu()
label_logits = torch.zeros(size[0], size[1], size[3])
for batch_index, (logits, arcs) in enumerate(zip(rel_logits, index_true_arcs)):
rel_probs = []
for i in range(l1):
rel_probs.append(logits[i][arcs[i]])
rel_probs = torch.stack(rel_probs, dim=0)
label_logits[batch_index] = torch.squeeze(rel_probs, dim=1)
pred_rels = label_logits.max(2)[1].cpu()
true_rels = pad_sequence(true_rels, padding=-1, dtype=np.int64)
label_correct = pred_rels.eq(true_rels).cpu().sum().item()
total_arcs = b * l1 - np.sum(true_arcs.cpu().numpy() == -1)
return arc_correct, label_correct, total_arcs
def parse(self, clusters, extwords, tags, lengths, masks):
if clusters is not None:
self.forward(clusters, extwords, tags, masks)
ROOT = self.root
arcs_batch, rels_batch = [], []
arc_logits = self.arc_logits.detach().cpu().numpy()
rel_logits = self.rel_logits.detach().cpu().numpy()
for arc_logit, rel_logit, length in zip(arc_logits, rel_logits, lengths):
arc_probs = softmax2d(arc_logit, length, length)
arc_pred = arc_argmax(arc_probs, length)
rel_probs = rel_logit[np.arange(len(arc_pred)), arc_pred]
rel_pred = rel_argmax(rel_probs, length, ROOT)
arcs_batch.append(arc_pred)
rels_batch.append(rel_pred)
return arcs_batch, rels_batch
| 6,780 | 37.310734 | 92 | py |
CodeMixedTreebank | CodeMixedTreebank-master/UDParser/driver/Layer.py | import torch
import torch.nn as nn
import numpy as np
from torch.nn import functional, init
def get_tensor_np(t):
return t.data.cpu().numpy()
def orthonormal_initializer(output_size, input_size):
"""
adopted from Timothy Dozat https://github.com/tdozat/Parser/blob/master/lib/linalg.py
"""
print(output_size, input_size)
I = np.eye(output_size)
lr = .1
eps = .05 / (output_size + input_size)
success = False
tries = 0
while not success and tries < 10:
Q = np.random.randn(input_size, output_size) / np.sqrt(output_size)
for i in range(100):
QTQmI = Q.T.dot(Q) - I
loss = np.sum(QTQmI ** 2 / 2)
Q2 = Q ** 2
Q -= lr * Q.dot(QTQmI) / (
np.abs(Q2 + Q2.sum(axis=0, keepdims=True) + Q2.sum(axis=1, keepdims=True) - 1) + eps)
if np.max(Q) > 1e6 or loss > 1e6 or not np.isfinite(loss):
tries += 1
lr /= 2
break
success = True
if success:
print('Orthogonal pretrainer loss: %.2e' % loss)
else:
print('Orthogonal pretrainer failed, using non-orthogonal random matrix')
Q = np.random.randn(input_size, output_size) / np.sqrt(output_size)
return np.transpose(Q.astype(np.float32))
class NonLinear(nn.Module):
def __init__(self, input_size, hidden_size, activation=None):
super(NonLinear, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.linear = nn.Linear(in_features=input_size, out_features=hidden_size)
if activation is None:
self._activate = lambda x: x
else:
if not callable(activation):
raise ValueError("activation must be callable: type={}".format(type(activation)))
self._activate = activation
self.reset_parameters()
def forward(self, x):
y = self.linear(x)
return self._activate(y)
def reset_parameters(self):
W = orthonormal_initializer(self.hidden_size, self.input_size)
self.linear.weight.data.copy_(torch.from_numpy(W))
b = np.zeros(self.hidden_size, dtype=np.float32)
self.linear.bias.data.copy_(torch.from_numpy(b))
class Biaffine(nn.Module):
def __init__(self, in1_features, in2_features, out_features,
bias=(True, True)):
super(Biaffine, self).__init__()
self.in1_features = in1_features
self.in2_features = in2_features
self.out_features = out_features
self.bias = bias
self.linear_input_size = in1_features + int(bias[0])
self.linear_output_size = out_features * (in2_features + int(bias[1]))
self.linear = nn.Linear(in_features=self.linear_input_size,
out_features=self.linear_output_size,
bias=False)
self.reset_parameters()
def reset_parameters(self):
W = np.zeros((self.linear_output_size, self.linear_input_size), dtype=np.float32)
self.linear.weight.data.copy_(torch.from_numpy(W))
def forward(self, input1, input2):
batch_size, len1, dim1 = input1.size()
batch_size, len2, dim2 = input2.size()
if self.bias[0]:
ones = input1.new_ones((batch_size, len1, 1))
input1 = torch.cat((input1, ones), dim=2)
dim1 += 1
if self.bias[1]:
ones = input2.new_ones((batch_size, len2, 1))
input2 = torch.cat((input2, ones), dim=2)
dim2 += 1
affine = self.linear(input1)
affine = affine.view(batch_size, len1*self.out_features, dim2)
input2 = torch.transpose(input2, 1, 2)
biaffine = torch.transpose(torch.bmm(affine, input2), 1, 2)
biaffine = biaffine.contiguous().view(batch_size, len2, len1, self.out_features)
return biaffine
def __repr__(self):
return self.__class__.__name__ + ' (' \
+ 'in1_features=' + str(self.in1_features) \
+ ', in2_features=' + str(self.in2_features) \
+ ', out_features=' + str(self.out_features) + ')'
class LSTM(nn.LSTM):
def reset_parameters(self):
for name, param in self.named_parameters():
if "weight" in name:
for i in range(4):
nn.init.orthogonal(self.__getattr__(name)[self.hidden_size*i:self.hidden_size*(i+1),:])
if "bias" in name:
nn.init.constant(self.__getattr__(name), 0)
class MyLSTM(nn.Module):
"""A module that runs multiple steps of LSTM."""
def __init__(self, input_size, hidden_size, num_layers=1, batch_first=False, \
bidirectional=False, dropout_in=0, dropout_out=0):
super(MyLSTM, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.batch_first = batch_first
self.bidirectional = bidirectional
self.dropout_in = dropout_in
self.dropout_out = dropout_out
self.num_directions = 2 if bidirectional else 1
self.fcells = []
self.bcells = []
for layer in range(num_layers):
layer_input_size = input_size if layer == 0 else hidden_size * self.num_directions
self.fcells.append(nn.LSTMCell(input_size=layer_input_size, hidden_size=hidden_size))
if self.bidirectional:
self.bcells.append(nn.LSTMCell(input_size=layer_input_size, hidden_size=hidden_size))
self._all_weights = []
for layer in range(num_layers):
layer_params = (self.fcells[layer].weight_ih, self.fcells[layer].weight_hh, \
self.fcells[layer].bias_ih, self.fcells[layer].bias_hh)
suffix = ''
param_names = ['weight_ih_l{}{}', 'weight_hh_l{}{}']
param_names += ['bias_ih_l{}{}', 'bias_hh_l{}{}']
param_names = [x.format(layer, suffix) for x in param_names]
for name, param in zip(param_names, layer_params):
setattr(self, name, param)
self._all_weights.append(param_names)
if self.bidirectional:
layer_params = (self.bcells[layer].weight_ih, self.bcells[layer].weight_hh, \
self.bcells[layer].bias_ih, self.bcells[layer].bias_hh)
suffix = '_reverse'
param_names = ['weight_ih_l{}{}', 'weight_hh_l{}{}']
param_names += ['bias_ih_l{}{}', 'bias_hh_l{}{}']
param_names = [x.format(layer, suffix) for x in param_names]
for name, param in zip(param_names, layer_params):
setattr(self, name, param)
self._all_weights.append(param_names)
self.reset_parameters()
def reset_parameters(self):
for layer in range(self.num_layers):
if self.bidirectional:
param_ih_name = 'weight_ih_l{}{}'.format(layer, '_reverse')
param_hh_name = 'weight_hh_l{}{}'.format(layer, '_reverse')
param_ih = self.__getattr__(param_ih_name)
param_hh = self.__getattr__(param_hh_name)
if layer == 0:
W = orthonormal_initializer(self.hidden_size, self.hidden_size + self.input_size)
else:
W = orthonormal_initializer(self.hidden_size, self.hidden_size + 2 * self.hidden_size)
W_h, W_x = W[:, :self.hidden_size], W[:, self.hidden_size:]
param_ih.data.copy_(torch.from_numpy(np.concatenate([W_x] * 4, 0)))
param_hh.data.copy_(torch.from_numpy(np.concatenate([W_h] * 4, 0)))
else:
param_ih_name = 'weight_ih_l{}{}'.format(layer, '')
param_hh_name = 'weight_hh_l{}{}'.format(layer, '')
param_ih = self.__getattr__(param_ih_name)
param_hh = self.__getattr__(param_hh_name)
if layer == 0:
W = orthonormal_initializer(self.hidden_size, self.hidden_size + self.input_size)
else:
W = orthonormal_initializer(self.hidden_size, self.hidden_size + self.hidden_size)
W_h, W_x = W[:, :self.hidden_size], W[:, self.hidden_size:]
param_ih.data.copy_(torch.from_numpy(np.concatenate([W_x] * 4, 0)))
param_hh.data.copy_(torch.from_numpy(np.concatenate([W_h] * 4, 0)))
for name, param in self.named_parameters():
if "bias" in name:
nn.init.constant_(self.__getattr__(name), 0)
@staticmethod
def _forward_rnn(cell, input, masks, initial, drop_masks):
max_time = input.size(0)
output = []
hx = initial
for time in range(max_time):
h_next, c_next = cell(input=input[time], hx=hx)
h_next = h_next*masks[time] + initial[0]*(1-masks[time])
c_next = c_next*masks[time] + initial[1]*(1-masks[time])
output.append(h_next)
if drop_masks is not None: h_next = h_next * drop_masks
hx = (h_next, c_next)
output = torch.stack(output, 0)
return output, hx
@staticmethod
def _forward_brnn(cell, input, masks, initial, drop_masks):
max_time = input.size(0)
output = []
hx = initial
for time in reversed(range(max_time)):
h_next, c_next = cell(input=input[time], hx=hx)
h_next = h_next*masks[time] + initial[0]*(1-masks[time])
c_next = c_next*masks[time] + initial[1]*(1-masks[time])
output.append(h_next)
if drop_masks is not None: h_next = h_next * drop_masks
hx = (h_next, c_next)
output.reverse()
output = torch.stack(output, 0)
return output, hx
def forward(self, input, masks, initial=None):
if self.batch_first:
input = input.transpose(0, 1)
masks = torch.unsqueeze(masks.transpose(0, 1), dim=2)
max_time, batch_size, _ = input.size()
masks = masks.expand(-1, -1, self.hidden_size)
if initial is None:
initial = input.new_zeros((batch_size, self.hidden_size))
initial = (initial, initial)
h_n = []
c_n = []
for layer in range(self.num_layers):
max_time, batch_size, input_size = input.size()
input_mask, hidden_mask = None, None
if self.training:
input_mask = input.new_full((batch_size, input_size), 1-self.dropout_in)
input_mask = torch.bernoulli(input_mask)
input_mask = input_mask / (1 - self.dropout_in)
input_mask = torch.unsqueeze(input_mask, dim=2).expand(-1, -1, max_time).permute(2, 0, 1)
input = input * input_mask
hidden_mask = input.new_full((batch_size, self.hidden_size), 1-self.dropout_out)
hidden_mask = torch.bernoulli(hidden_mask)
hidden_mask = hidden_mask / (1 - self.dropout_out)
layer_output, (layer_h_n, layer_c_n) = MyLSTM._forward_rnn(cell=self.fcells[layer], \
input=input, masks=masks, initial=initial, drop_masks=hidden_mask)
if self.bidirectional:
blayer_output, (blayer_h_n, blayer_c_n) = MyLSTM._forward_brnn(cell=self.bcells[layer], \
input=input, masks=masks, initial=initial, drop_masks=hidden_mask)
h_n.append(torch.cat([layer_h_n, blayer_h_n], 1) if self.bidirectional else layer_h_n)
c_n.append(torch.cat([layer_c_n, blayer_c_n], 1) if self.bidirectional else layer_c_n)
input = torch.cat([layer_output, blayer_output], 2) if self.bidirectional else layer_output
h_n = torch.stack(h_n, 0)
c_n = torch.stack(c_n, 0)
return input, (h_n, c_n)
| 11,929 | 41.913669 | 107 | py |
CodeMixedTreebank | CodeMixedTreebank-master/UDParser/driver/Model.py | from driver.Layer import *
from data.Vocab import *
def drop_tri_input_independent(word_embeddings, tag_embeddings, context_embeddings, dropout_emb):
batch_size, seq_length, _ = word_embeddings.size()
word_masks = word_embeddings.new_full((batch_size, seq_length), 1-dropout_emb)
word_masks = torch.bernoulli(word_masks)
tag_masks = tag_embeddings.new_full((batch_size, seq_length), 1-dropout_emb)
tag_masks = torch.bernoulli(tag_masks)
context_masks = context_embeddings.new_full((batch_size, seq_length), 1-dropout_emb)
context_masks = torch.bernoulli(context_masks)
scale = 3.0 / (word_masks + tag_masks + context_masks + 1e-12)
word_masks *= scale
tag_masks *= scale
context_masks *= scale
word_masks = word_masks.unsqueeze(dim=2)
tag_masks = tag_masks.unsqueeze(dim=2)
context_masks = context_masks.unsqueeze(dim=2)
word_embeddings = word_embeddings * word_masks
tag_embeddings = tag_embeddings * tag_masks
context_embeddings = context_embeddings * context_masks
return word_embeddings, tag_embeddings, context_embeddings
def drop_bi_input_independent(word_embeddings, tag_embeddings, dropout_emb):
batch_size, seq_length, _ = word_embeddings.size()
word_masks = word_embeddings.new_full((batch_size, seq_length), 1-dropout_emb)
word_masks = torch.bernoulli(word_masks)
tag_masks = tag_embeddings.new_full((batch_size, seq_length), 1-dropout_emb)
tag_masks = torch.bernoulli(tag_masks)
scale = 2.0 / (word_masks + tag_masks + 1e-12)
word_masks *= scale
tag_masks *= scale
word_masks = word_masks.unsqueeze(dim=2)
tag_masks = tag_masks.unsqueeze(dim=2)
word_embeddings = word_embeddings * word_masks
tag_embeddings = tag_embeddings * tag_masks
return word_embeddings, tag_embeddings
def drop_uni_input_independent(word_embeddings, dropout_emb):
batch_size, seq_length, _ = word_embeddings.size()
word_masks = word_embeddings.new_full((batch_size, seq_length), 1-dropout_emb)
word_masks = torch.bernoulli(word_masks)
scale = 1.0 / (1.0 * word_masks + 1e-12)
word_masks *= scale
word_masks = word_masks.unsqueeze(dim=2)
word_embeddings = word_embeddings * word_masks
return word_embeddings
def drop_sequence_sharedmask(inputs, dropout, batch_first=True):
if batch_first:
inputs = inputs.transpose(0, 1)
seq_length, batch_size, hidden_size = inputs.size()
drop_masks = inputs.new_full((batch_size, hidden_size), 1 - dropout)
drop_masks = torch.bernoulli(drop_masks)
drop_masks = drop_masks / (1 - dropout)
drop_masks = torch.unsqueeze(drop_masks, dim=2).expand(-1, -1, seq_length).permute(2, 0, 1)
inputs = inputs * drop_masks
return inputs.transpose(1, 0)
class ParserModel(nn.Module):
def __init__(self, vocab, config, pretrained_embedding):
super(ParserModel, self).__init__()
self.config = config
self.cluster_embed = nn.Embedding(vocab.cluster_size, config.cluster_dims, padding_idx=0)
self.extword_embed = nn.Embedding(vocab.extvocab_size, config.word_dims, padding_idx=0)
self.tag_embed = nn.Embedding(vocab.tag_size, config.tag_dims, padding_idx=0)
cluster_init = np.random.randn(vocab.cluster_size, config.cluster_dims).astype(np.float32)
self.cluster_embed.weight.data.copy_(torch.from_numpy(cluster_init))
tag_init = np.random.randn(vocab.tag_size, config.tag_dims).astype(np.float32)
self.tag_embed.weight.data.copy_(torch.from_numpy(tag_init))
self.extword_embed.weight.data.copy_(torch.from_numpy(pretrained_embedding))
self.extword_embed.weight.requires_grad = False
self.lstm = MyLSTM(
input_size=config.word_dims + config.tag_dims + config.cluster_dims,
hidden_size=config.lstm_hiddens,
num_layers=config.lstm_layers,
batch_first=True,
bidirectional=True,
dropout_in = config.dropout_lstm_input,
dropout_out=config.dropout_lstm_hidden,
)
self.mlp_arc_dep = NonLinear(
input_size = 2*config.lstm_hiddens,
hidden_size = config.mlp_arc_size+config.mlp_rel_size,
activation = nn.LeakyReLU(0.1))
self.mlp_arc_head = NonLinear(
input_size = 2*config.lstm_hiddens,
hidden_size = config.mlp_arc_size+config.mlp_rel_size,
activation = nn.LeakyReLU(0.1))
self.total_num = int((config.mlp_arc_size+config.mlp_rel_size) / 100)
self.arc_num = int(config.mlp_arc_size / 100)
self.rel_num = int(config.mlp_rel_size / 100)
self.arc_biaffine = Biaffine(config.mlp_arc_size, config.mlp_arc_size, \
1, bias=(True, False))
self.rel_biaffine = Biaffine(config.mlp_rel_size, config.mlp_rel_size, \
vocab.rel_size, bias=(True, True))
def forward(self, clusters, extwords, tags, masks):
# x = (batch size, sequence length, dimension of embedding)
x_cluster_embed = self.cluster_embed(clusters)
x_extword_embed = self.extword_embed(extwords)
x_tag_embed = self.tag_embed(tags)
if self.training:
x_extword_embed, x_tag_embed, x_cluster_embed = \
drop_tri_input_independent(x_extword_embed, x_tag_embed, x_cluster_embed, self.config.dropout_emb)
x_lexical = torch.cat((x_extword_embed, x_tag_embed, x_cluster_embed), dim=2)
outputs, _ = self.lstm(x_lexical, masks, None)
outputs = outputs.transpose(1, 0)
if self.training:
outputs = drop_sequence_sharedmask(outputs, self.config.dropout_mlp)
x_all_dep = self.mlp_arc_dep(outputs)
x_all_head = self.mlp_arc_head(outputs)
if self.training:
x_all_dep = drop_sequence_sharedmask(x_all_dep, self.config.dropout_mlp)
x_all_head = drop_sequence_sharedmask(x_all_head, self.config.dropout_mlp)
x_all_dep_splits = torch.split(x_all_dep, split_size_or_sections=100, dim=2)
x_all_head_splits = torch.split(x_all_head, split_size_or_sections=100, dim=2)
x_arc_dep = torch.cat(x_all_dep_splits[:self.arc_num], dim=2)
x_arc_head = torch.cat(x_all_head_splits[:self.arc_num], dim=2)
arc_logit = self.arc_biaffine(x_arc_dep, x_arc_head)
arc_logit = torch.squeeze(arc_logit, dim=3)
x_rel_dep = torch.cat(x_all_dep_splits[self.arc_num:], dim=2)
x_rel_head = torch.cat(x_all_head_splits[self.arc_num:], dim=2)
rel_logit_cond = self.rel_biaffine(x_rel_dep, x_rel_head)
return arc_logit, rel_logit_cond | 6,687 | 43.586667 | 114 | py |
CodeMixedTreebank | CodeMixedTreebank-master/UDParser/data/Dataloader.py | from data.Vocab import *
import numpy as np
import torch
def read_corpus(file_path, vocab=None):
data = []
with open(file_path, 'r', encoding='UTF-8') as infile:
for sentence in readDepTree(infile, vocab):
data.append(sentence)
return data
def sentences_numberize(sentences, vocab):
for sentence in sentences:
yield sentence2id(sentence, vocab)
def sentence2id(sentence, vocab):
result = []
for dep in sentence:
clusterid = vocab.word2clusterid(dep.form)
extwordid = vocab.extword2id(dep.form)
tagid = vocab.tag2id(dep.tag)
head = dep.head
relid = vocab.rel2id(dep.rel)
result.append([clusterid, extwordid, tagid, head, relid])
return result
def batch_slice(data, batch_size):
batch_num = int(np.ceil(len(data) / float(batch_size)))
for i in range(batch_num):
cur_batch_size = batch_size if i < batch_num - 1 else len(data) - batch_size * i
sentences = [data[i * batch_size + b] for b in range(cur_batch_size)]
yield sentences
def data_iter(data, batch_size, shuffle=True):
"""
randomly permute data, then sort by source length, and partition into batches
ensure that the length of sentences in each batch
"""
batched_data = []
if shuffle: np.random.shuffle(data)
batched_data.extend(list(batch_slice(data, batch_size)))
if shuffle: np.random.shuffle(batched_data)
for batch in batched_data:
yield batch
def batch_data_variable(batch, vocab):
length = len(batch[0])
batch_size = len(batch)
for b in range(1, batch_size):
if len(batch[b]) > length: length = len(batch[b])
clusters = torch.LongTensor(batch_size, length).zero_()
extwords = torch.LongTensor(batch_size, length).zero_()
tags = torch.LongTensor(batch_size, length).zero_()
masks = torch.Tensor(batch_size, length).zero_()
heads = []
rels = []
lengths = []
b = 0
for sentence in sentences_numberize(batch, vocab):
index = 0
length = len(sentence)
lengths.append(length)
head = np.zeros((length), dtype=np.int32)
rel = np.zeros((length), dtype=np.int32)
for dep in sentence:
clusters[b, index] = dep[0]
extwords[b, index] = dep[1]
tags[b, index] = dep[2]
head[index] = dep[3]
rel[index] = dep[4]
masks[b, index] = 1
index += 1
b += 1
heads.append(head)
rels.append(rel)
return clusters, extwords, tags, heads, rels, lengths, masks
def batch_variable_depTree(trees, heads, rels, lengths, vocab):
for tree, head, rel, length in zip(trees, heads, rels, lengths):
sentence = []
for idx in range(length):
sentence.append(Dependency(idx, tree[idx].org_form, tree[idx].lang, tree[idx].tag, head[idx], vocab.id2rel(rel[idx])))
yield sentence
| 2,952 | 29.132653 | 130 | py |
LSMOL | LSMOL-main/evaluation/generate_3d_instance_gt.py | import torch
import pickle
import os
import numpy as np
from PIL import Image
from mmdet3d.core.bbox import CameraInstance3DBoxes,get_box_type
def main():
# change the path to your own path
pickle_data_path = '/data/waymo_root/kitti_format/waymo_infos_val.pkl'
root = '/data/waymo_root/kitti_format'
f = open(pickle_data_path,'rb')
info = pickle.load(f)
num = len(info)
print('total point clouds:',num)
gt_instance_list = []
for idx in range(num):
gt_instance = {}
info_c = info[idx]
pc_path = os.path.join(root,info_c['point_cloud']['velodyne_path'])
pc_data = np.fromfile(pc_path,dtype=np.float)
pc_data = pc_data.reshape(-1,3)
rect = info_c['calib']['R0_rect'].astype(np.float32)
Trv2c = info_c['calib']['Tr_velo_to_cam'].astype(np.float32)
annos = info_c['annos']
loc = annos['location']
dims = annos['dimensions']
rots = annos['rotation_y']
gt_names = annos['name']
gt_bboxes_3d = np.concatenate([loc, dims, rots[..., np.newaxis]],
axis=1).astype(np.float32)
box_type_3d, box_mode_3d = get_box_type('LiDAR')
gt_bboxes_3d = CameraInstance3DBoxes(gt_bboxes_3d).convert_to(box_mode_3d, np.linalg.inv(rect @ Trv2c))
points = torch.tensor(pc_data,dtype=torch.float32).cuda()
labels = gt_bboxes_3d.points_in_boxes(points).long()
labels = np.array(labels.cpu())
gt_instance['timestamp'] = info_c['timestamp']
gt_instance['velodyne_path'] = info_c['point_cloud']['velodyne_path']
gt_instance['labels'] = labels
gt_instance_list.append(gt_instance)
print('finish:',idx)
print('saving for gt instance segmentation')
pickle_out_path = os.path.join(root,'val_instance_gt.pkl')
pickle_file = open(pickle_out_path,'wb')
pickle.dump(gt_instance_list,pickle_file)
pickle_file.close()
if __name__ == "__main__":
main() | 2,020 | 30.578125 | 111 | py |
LSMOL | LSMOL-main/evaluation/eval_3d_instance.py | import torch
import pickle
import os
import cv2
import numpy as np
from PIL import Image
from mmdet3d.core.bbox import CameraInstance3DBoxes,get_box_type
from matplotlib import pyplot as plt
import matplotlib.patches as patches
from scipy import stats
def calculate_PR(class_tp, class_fp, class_score, class_gt_num):
'''
说明: 此函数用于计算某一类的PR曲线
输入:
class_tp: list, 该类下的tp, 每个元素为0或1, 代表当前样本是否为正样本
class_fp: list, 该类下的fp, 每个元素为0或1, 代表当前样本是否为负样本
class_score: list, 该类下预测bbox对应的score
class_gt_num: int, 类别数
输出:
P: list, 该类下的查准率曲线
R: list, 该类下的查全率曲线
'''
# 按照score排序
sort_inds = np.argsort(class_score)[::-1].tolist()
tp = [class_tp[i] for i in sort_inds]
fp = [class_fp[i] for i in sort_inds]
# 累加
tp = np.cumsum(tp).tolist()
fp = np.cumsum(fp).tolist()
# 计算PR
P = [tp[i] / (tp[i] + fp[i]) for i in range(len(tp))]
R = [tp[i] / class_gt_num for i in range(len(tp))]
return P, R
def calculate_map_single(P, R):
'''
说明: 此函数用于计算PR曲线的面积, 即AP
输入:
P: list, 查准率曲线
R: list, 查全率曲线
输出:
single_map: float, 曲线面积, 即AP
'''
mpre = np.concatenate(([0.], P, [0.]))
mrec = np.concatenate(([0.], R, [1.]))
for i in range(np.size(mpre) - 1, 0, -1):
# mpre的平整化
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# 寻找mrec变化的坐标
i = np.where(mrec[1:] != mrec[:-1])[0]
# 计算面积
single_map = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return single_map
def compute_iou_one_frame(points_ins_id,pred_labels,gt_ins_id):
pred_num = points_ins_id.shape[0]
gt_num = np.where(gt_ins_id[:]!=-1,1,0).sum()
match_num = np.where(pred_labels[:]!=-1,1,0).sum()
# print('pred_num',pred_num)
# print('gt_num',gt_num)
# print('match_num',match_num)
return pred_num,gt_num,match_num
def compute_ap_metric_one_frame(points_ins_id,pred_labels,pred_score,gt_ins_id,iou_thresh):
ins_id = np.unique(points_ins_id)
real_id = np.unique(gt_ins_id)
# print('pred num:',len(ins_id))
# print('gt_num:',len(real_id)-1)
# 1.Match to GT
ins_match = {}
for iid in ins_id:
obj = np.where(points_ins_id[:]==iid,1,0)
real_labels = pred_labels[obj==1]
most = stats.mode(real_labels)[0][0]
ins_match[iid]=most
count_num_list= []
for y in ins_match.values():
if y!=-1:
count_num_list.append(y)
# print('match num:',len(set(count_num_list)))
# 2. GT
gt_num = {}
for rid in real_id:
if rid!=-1:
gt_p = np.where(gt_ins_id[:]==rid,1,0)
gt_num[rid]=gt_p.sum()
# 3. Pred
ins_num = {}
for j in ins_id:
ins_num[j]=np.where(points_ins_id[:]==j,1,0).sum()
# 4. Match
match_num={}
for ins_key,ins_value in ins_match.items():
if ins_value!=-1:
m_obj = np.where((pred_labels[:]==ins_value) & (points_ins_id[:]==ins_key),1,0)
match_num[ins_key]=m_obj.sum()
IoU = {}
for I_key,I_value in match_num.items():
IoU[I_key]=match_num[I_key]/(gt_num[ins_match[I_key]]+ins_num[I_key]-match_num[I_key])
final_result = {}
fp = []
tp = []
fp_num = 0
tp_num = 0
fn_num = 0
IoU_05 =[]
scores = []
for r_key,r_value in ins_match.items():
if r_value==-1:
fp_num=fp_num+1
fp.append(1)
tp.append(0)
else:
iou = IoU[r_key]
if iou>=iou_thresh:
tp_num = tp_num+1
tp.append(1)
fp.append(0)
score_idx = np.where(points_ins_id[:]==r_key,1,0)
score_mean = pred_score[score_idx==1].mean()
IoU_05.append(iou)
scores.append(score_mean)
fn_num = len(real_id)-1-tp_num
final_result['FP']=fp
final_result['TP']=tp
final_result['TP_NUM']=tp_num
final_result['FP_NUM']=fp_num
final_result['FN_NUM']=fn_num
final_result['IoU_05']=IoU_05
final_result['scores']=scores
return final_result
def get_data_one_frame(idx,info,gt_ins,pred_ins):
pred_test = pred_ins[idx]
val_test = info[idx]
gt_test = gt_ins[idx]
pred_name = pred_test['img_metas'][0]['pts_filename'].split('/')[-1]
val_name = val_test['point_cloud']['velodyne_path'].split('/')[-1]
gt_name = gt_test['velodyne_path'].split('/')[-1]
assert pred_name==val_name
assert val_name==gt_name
fg_ground = pred_test['points_fg']
points_ins_id = pred_test['points_ins_id']
rect = val_test['calib']['R0_rect'].astype(np.float32)
Trv2c = val_test['calib']['Tr_velo_to_cam'].astype(np.float32)
annos = val_test['annos']
loc = annos['location']
dims = annos['dimensions']
rots = annos['rotation_y']
gt_bboxes_3d = np.concatenate([loc, dims, rots[..., np.newaxis]],
axis=1).astype(np.float32)
box_type_3d, box_mode_3d = get_box_type('LiDAR')
gt_bboxes_3d = CameraInstance3DBoxes(gt_bboxes_3d).convert_to(box_mode_3d, np.linalg.inv(rect @ Trv2c))
points = torch.tensor(fg_ground,dtype=torch.float32).cuda()
pred_labels = gt_bboxes_3d.points_in_boxes(points).long()
pred_labels = np.array(pred_labels.cpu())
points_ins_id = np.array(points_ins_id)
gt_ins_id = gt_test['labels']
pred_score = np.array(pred_test['points_score'])
return points_ins_id,pred_labels,pred_score,gt_ins_id
def compute_ap(info,gt_ins,pred_ins,iou_threshold=0.7):
FP,TP,FN = 0,0,0
pred,gt,match = 0,0,0
FP_list,TP_list,scores_list = [],[],[]
IoU_05 = []
for idx in range(len(info)):
points_ins_id,pred_labels,pred_score,gt_ins_id= get_data_one_frame(idx,info,gt_ins,pred_ins)
# AP
final_result = compute_ap_metric_one_frame(points_ins_id,pred_labels,pred_score,gt_ins_id,iou_thresh=iou_threshold)
pred_num,gt_num,match_num = compute_iou_one_frame(points_ins_id,pred_labels,gt_ins_id)
# IoU
pred=pred+pred_num
gt = gt+gt_num
match = match+match_num
FP=FP+final_result['FP_NUM']
TP = TP+final_result['TP_NUM']
FN = FN+final_result['FN_NUM']
TP_list.extend(final_result['TP'])
FP_list.extend(final_result['FP'])
IoU_05.extend(final_result['IoU_05'])
scores_list.extend(final_result['scores'])
if idx%5000==0:
print('finish:',idx)
# Compute AP
print('Compute AP:')
P, R = calculate_PR(TP_list,FP_list,scores_list,TP+FN)
single_ap = calculate_map_single(P,R)
print('AP:',single_ap)
# Compute IoU
print('Compute IoU:')
IoU = match/(pred+gt-match)
print('IoU:',IoU)
Precision = TP/(TP+FP)
Recall = TP/(TP+FN)
print('precision:',Precision)
print('Recall:',Recall)
def main():
# load basic information
# generate from mmdetection3d (kitti_format/waymo_infos_val.pkl)
pickle_data_path = '/waymo_root/kitti_format/waymo_infos_val.pkl'
f0 = open(pickle_data_path,'rb')
info = pickle.load(f0)
# load gt instance segmentation
# generate by generate_3d_instance_gt.py
eval_gt_path = '/waymo_root/kitti_format/val_instance_gt.pkl'
f1 = open(eval_gt_path,'rb')
gt_ins = pickle.load(f1)
# load pred instance segmentation
# generate by the model
pred_ins_path = '/log/results_eval_gt.pkl'
f2 = open(pred_ins_path,'rb')
pred_ins = pickle.load(f2)
# change the threshold to get different AP results
compute_ap(info,gt_ins,pred_ins,iou_threshold=0.7)
if __name__ == "__main__":
main() | 7,685 | 31.987124 | 123 | py |
LSMOL | LSMOL-main/third_party/nspf/optimization.py | """optimize over a network structure."""
import argparse
import logging
import os
import copy
import matplotlib.pyplot as plt
import numpy as np
import open3d as o3d
import pandas as pd
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from model import Neural_Prior
import config
from data import (ArgoverseSceneFlowDataset, KITTISceneFlowDataset,
NuScenesSceneFlowDataset, FlyingThings3D, WaymoSceneFlowDataset)
from utils import scene_flow_metrics, Timers, GeneratorWrap, EarlyStopping
from loss import my_chamfer_fn
from visualize import show_flows, flow_to_rgb, custom_draw_geometry_with_key_callback
device = torch.device("cuda:0")
def solver(
pc1: torch.Tensor,
pc2: torch.Tensor,
flow: torch.Tensor,
options: argparse.Namespace,
net: torch.nn.Module,
i: int,
):
for param in net.parameters():
param.requires_grad = True
if options.backward_flow:
net_inv = copy.deepcopy(net)
params = [{'params': net.parameters(), 'lr': options.lr, 'weight_decay': options.weight_decay},
{'params': net_inv.parameters(), 'lr': options.lr, 'weight_decay': options.weight_decay}]
else:
params = net.parameters()
if options.optimizer == "sgd":
print('using SGD.')
optimizer = torch.optim.SGD(params, lr=options.lr, momentum=options.momentum, weight_decay=options.weight_decay)
elif options.optimizer == "adam":
print("Using Adam optimizer.")
optimizer = torch.optim.Adam(params, lr=options.lr, weight_decay=0)
total_losses = []
chamfer_losses = []
early_stopping = EarlyStopping(patience=options.early_patience, min_delta=0.0001)
if options.time:
timers = Timers()
timers.tic("solver_timer")
pc1 = pc1.cuda().contiguous()
pc2 = pc2.cuda().contiguous()
flow = flow.cuda().contiguous()
normal1 = None
normal2 = None
# ANCHOR: initialize best metrics
best_loss_1 = 10.
best_flow_1 = None
best_epe3d_1 = 1.
best_acc3d_strict_1 = 0.
best_acc3d_relax_1 = 0.
best_angle_error_1 = 1.
best_outliers_1 = 1.
best_epoch = 0
for epoch in range(options.iters):
optimizer.zero_grad()
flow_pred_1 = net(pc1)
pc1_deformed = pc1 + flow_pred_1
loss_chamfer_1, _ = my_chamfer_fn(pc2, pc1_deformed, normal2, normal1)
if options.backward_flow:
flow_pred_1_prime = net_inv(pc1_deformed)
pc1_prime_deformed = pc1_deformed - flow_pred_1_prime
loss_chamfer_1_prime, _ = my_chamfer_fn(pc1_prime_deformed, pc1, normal2, normal1)
if options.backward_flow:
loss_chamfer = loss_chamfer_1 + loss_chamfer_1_prime
else:
loss_chamfer = loss_chamfer_1
loss = loss_chamfer
flow_pred_1_final = pc1_deformed - pc1
if options.compute_metrics:
EPE3D_1, acc3d_strict_1, acc3d_relax_1, outlier_1, angle_error_1 = scene_flow_metrics(flow_pred_1_final, flow)
else:
EPE3D_1, acc3d_strict_1, acc3d_relax_1, outlier_1, angle_error_1 = 0, 0, 0, 0, 0
# ANCHOR: get best metrics
if loss <= best_loss_1:
best_loss_1 = loss.item()
best_epe3d_1 = EPE3D_1
best_flow_1 = flow_pred_1_final
best_epe3d_1 = EPE3D_1
best_acc3d_strict_1 = acc3d_strict_1
best_acc3d_relax_1 = acc3d_relax_1
best_angle_error_1 = angle_error_1
best_outliers_1 = outlier_1
best_epoch = epoch
if epoch % 50 == 0:
logging.info(f"[Sample: {i}]"
f"[Ep: {epoch}] [Loss: {loss:.5f}] "
f" Metrics: flow 1 --> flow 2"
f" [EPE: {EPE3D_1:.3f}] [Acc strict: {acc3d_strict_1 * 100:.3f}%]"
f" [Acc relax: {acc3d_relax_1 * 100:.3f}%] [Angle error (rad): {angle_error_1:.3f}]"
f" [Outl.: {outlier_1 * 100:.3f}%]")
total_losses.append(loss.item())
chamfer_losses.append(loss_chamfer)
if options.animation:
yield flow_pred_1_final.detach().cpu().numpy()
if early_stopping.step(loss):
break
loss.backward()
optimizer.step()
if options.time:
timers.toc("solver_timer")
time_avg = timers.get_avg("solver_timer")
logging.info(timers.print())
# ANCHOR: get the best metrics
info_dict = {
'loss': best_loss_1,
'EPE3D_1': best_epe3d_1,
'acc3d_strict_1': best_acc3d_strict_1,
'acc3d_relax_1': best_acc3d_relax_1,
'angle_error_1': best_angle_error_1,
'outlier_1': best_outliers_1,
'time': time_avg,
'epoch': best_epoch,
'best_flow_1':best_flow_1.detach().cpu().numpy()
}
# NOTE: visualization
if options.visualize:
fig = plt.figure(figsize=(13, 5))
ax = fig.gca()
ax.plot(total_losses, label="loss")
ax.legend(fontsize="14")
ax.set_xlabel("Iteration", fontsize="14")
ax.set_ylabel("Loss", fontsize="14")
ax.set_title("Loss vs iterations", fontsize="14")
plt.show()
idx = 0
show_flows(pc1[idx], pc2[idx], best_flow_1[idx])
# ANCHOR: new plot style
pc1_o3d = o3d.geometry.PointCloud()
colors_flow = flow_to_rgb(flow[0].cpu().numpy().copy())
pc1_o3d.points = o3d.utility.Vector3dVector(pc1[0].cpu().numpy().copy())
pc1_o3d.colors = o3d.utility.Vector3dVector(colors_flow / 255.0)
custom_draw_geometry_with_key_callback([pc1_o3d]) # Press 'k' to see with dark background.
return info_dict
def optimize_neural_prior(options, data_loader):
if options.time:
timers = Timers()
timers.tic("total_time")
save_dir_path = f"/data/yuqi_wang/waymo_v1.2/waymo_lsmol/{options.exp_name}"
outputs = []
if options.model == 'neural_prior':
net = Neural_Prior(filter_size=options.hidden_units, act_fn=options.act_fn, layer_size=options.layer_size).cuda()
else:
raise Exception("Model not available.")
output_dir = os.path.join(save_dir_path,'sceneflow_nsfp')
os.makedirs(output_dir,exist_ok=True)
for i, data in tqdm(enumerate(data_loader), total=len(data_loader), smoothing=0.9):
logging.info(f"# Working on sample: {data_loader.dataset.datapath[i]}...")
pc1, pc2, flow = data
if options.visualize:
idx = 0
# NOTE: ground truth flow
show_flows(pc1[idx], pc2[idx], flow[idx])
solver_generator = GeneratorWrap(solver(pc1, pc2, flow, options, net, i))
if options.animation:
#TODO: save frames to make video.
info_dict = solver_generator.value
else:
for _ in solver_generator: pass
info_dict = solver_generator.value
# Save flow
name = 'frame_'+ '%06d' % i + '.npz'
np.savez(os.path.join(output_dir,name),p1=pc1,flow = info_dict['best_flow_1'])
# Collect results.
info_dict['filepath'] = data_loader.dataset.datapath[i]
outputs.append(info_dict)
print(info_dict)
if options.time:
timers.toc("total_time")
time_avg = timers.get_avg("total_time")
logging.info(timers.print())
df = pd.DataFrame(outputs)
df.loc['mean'] = df.mean()
logging.info(df.mean())
df.loc['total time'] = time_avg
df.to_csv('{:}.csv'.format(f"{save_dir_path}/results"))
logging.info("Finish optimization!")
return
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Neural Scene Flow Prior.")
config.add_config(parser)
options = parser.parse_args()
exp_dir_path = f"/data/yuqi_wang/waymo_v1.2/checkpoints/{options.exp_name}"
if not os.path.exists(exp_dir_path):
os.makedirs(exp_dir_path)
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s [%(levelname)s] - %(message)s',
handlers=[logging.FileHandler(filename=f"{exp_dir_path}/run.log"), logging.StreamHandler()])
logging.info(options)
logging.getLogger("matplotlib").setLevel(logging.WARNING)
logging.info('---------------------------------------')
print_options = vars(options)
for key in print_options.keys():
logging.info(key+': '+str(print_options[key]))
logging.info('---------------------------------------')
torch.backends.cudnn.deterministic = True
torch.manual_seed(options.seed)
torch.cuda.manual_seed_all(options.seed)
np.random.seed(options.seed)
if options.dataset == "KITTISceneFlowDataset":
data_loader = DataLoader(
KITTISceneFlowDataset(options=options, train=False),
batch_size=options.batch_size, shuffle=False, drop_last=False, num_workers=12
)
elif options.dataset == "FlyingThings3D":
data_loader = DataLoader(
FlyingThings3D(options=options, partition="test"),
batch_size=options.batch_size, shuffle=False, drop_last=False, num_workers=12
)
elif options.dataset == "ArgoverseSceneFlowDataset":
data_loader = DataLoader(
ArgoverseSceneFlowDataset(options=options, partition=options.partition),
batch_size=options.batch_size, shuffle=False, drop_last=False, num_workers=12
)
elif options.dataset == "NuScenesSceneFlowDataset":
data_loader = DataLoader(
NuScenesSceneFlowDataset(options=options, partition="val"),
batch_size=options.batch_size, shuffle=False, drop_last=False, num_workers=12
)
elif options.dataset == "WaymoSceneFlowDataset":
data_loader = DataLoader(
WaymoSceneFlowDataset(options=options, train=True),
batch_size=options.batch_size, shuffle=False, drop_last=False, num_workers=12
)
optimize_neural_prior(options, data_loader)
| 10,110 | 33.158784 | 122 | py |
LSMOL | LSMOL-main/third_party/nspf/loss.py | # code mostly copied from pytorch3d
from typing import Union
import torch
import torch.nn.functional as F
from pytorch3d.ops.knn import knn_gather, knn_points
from pytorch3d.structures.pointclouds import Pointclouds
def _validate_chamfer_reduction_inputs(
batch_reduction: Union[str, None], point_reduction: str
):
"""Check the requested reductions are valid.
Args:
batch_reduction: Reduction operation to apply for the loss across the
batch, can be one of ["mean", "sum"] or None.
point_reduction: Reduction operation to apply for the loss across the
points, can be one of ["mean", "sum"].
"""
if batch_reduction is not None and batch_reduction not in ["mean", "sum"]:
raise ValueError('batch_reduction must be one of ["mean", "sum"] or None')
if point_reduction not in ["mean", "sum"]:
raise ValueError('point_reduction must be one of ["mean", "sum"]')
def _handle_pointcloud_input(
points: Union[torch.Tensor, Pointclouds],
lengths: Union[torch.Tensor, None],
normals: Union[torch.Tensor, None],
):
"""
If points is an instance of Pointclouds, retrieve the padded points tensor
along with the number of points per batch and the padded normals.
Otherwise, return the input points (and normals) with the number of points per cloud
set to the size of the second dimension of `points`.
"""
if isinstance(points, Pointclouds):
X = points.points_padded()
lengths = points.num_points_per_cloud()
normals = points.normals_padded() # either a tensor or None
elif torch.is_tensor(points):
if points.ndim != 3:
raise ValueError("Expected points to be of shape (N, P, D)")
X = points
if lengths is not None and (
lengths.ndim != 1 or lengths.shape[0] != X.shape[0]
):
raise ValueError("Expected lengths to be of shape (N,)")
if lengths is None:
lengths = torch.full(
(X.shape[0],), X.shape[1], dtype=torch.int64, device=points.device
)
if normals is not None and normals.ndim != 3:
raise ValueError("Expected normals to be of shape (N, P, 3")
else:
raise ValueError(
"The input pointclouds should be either "
+ "Pointclouds objects or torch.Tensor of shape "
+ "(minibatch, num_points, 3)."
)
return X, lengths, normals
def my_chamfer_fn(
x,
y,
x_lengths=None,
y_lengths=None,
x_normals=None,
y_normals=None,
weights=None,
batch_reduction: Union[str, None] = "mean",
point_reduction: str = "mean",
):
"""
Chamfer distance between two pointclouds x and y.
Args:
x: FloatTensor of shape (N, P1, D) or a Pointclouds object representing
a batch of point clouds with at most P1 points in each batch element,
batch size N and feature dimension D.
y: FloatTensor of shape (N, P2, D) or a Pointclouds object representing
a batch of point clouds with at most P2 points in each batch element,
batch size N and feature dimension D.
x_lengths: Optional LongTensor of shape (N,) giving the number of points in each
cloud in x.
y_lengths: Optional LongTensor of shape (N,) giving the number of points in each
cloud in x.
x_normals: Optional FloatTensor of shape (N, P1, D).
y_normals: Optional FloatTensor of shape (N, P2, D).
weights: Optional FloatTensor of shape (N,) giving weights for
batch elements for reduction operation.
batch_reduction: Reduction operation to apply for the loss across the
batch, can be one of ["mean", "sum"] or None.
point_reduction: Reduction operation to apply for the loss across the
points, can be one of ["mean", "sum"].
Returns:
2-element tuple containing
- **loss**: Tensor giving the reduced distance between the pointclouds
in x and the pointclouds in y.
- **loss_normals**: Tensor giving the reduced cosine distance of normals
between pointclouds in x and pointclouds in y. Returns None if
x_normals and y_normals are None.
"""
_validate_chamfer_reduction_inputs(batch_reduction, point_reduction)
x, x_lengths, x_normals = _handle_pointcloud_input(x, x_lengths, x_normals)
y, y_lengths, y_normals = _handle_pointcloud_input(y, y_lengths, y_normals)
return_normals = x_normals is not None and y_normals is not None
N, P1, D = x.shape
P2 = y.shape[1]
# Check if inputs are heterogeneous and create a lengths mask.
is_x_heterogeneous = (x_lengths != P1).any()
is_y_heterogeneous = (y_lengths != P2).any()
x_mask = (
torch.arange(P1, device=x.device)[None] >= x_lengths[:, None]
) # shape [N, P1]
y_mask = (
torch.arange(P2, device=y.device)[None] >= y_lengths[:, None]
) # shape [N, P2]
if y.shape[0] != N or y.shape[2] != D:
raise ValueError("y does not have the correct shape.")
if weights is not None:
if weights.size(0) != N:
raise ValueError("weights must be of shape (N,).")
if not (weights >= 0).all():
raise ValueError("weights cannot be negative.")
if weights.sum() == 0.0:
weights = weights.view(N, 1)
if batch_reduction in ["mean", "sum"]:
return (
(x.sum((1, 2)) * weights).sum() * 0.0,
(x.sum((1, 2)) * weights).sum() * 0.0,
)
return ((x.sum((1, 2)) * weights) * 0.0, (x.sum((1, 2)) * weights) * 0.0)
cham_norm_x = x.new_zeros(())
cham_norm_y = x.new_zeros(())
x_nn = knn_points(x, y, lengths1=x_lengths, lengths2=y_lengths, K=1)
y_nn = knn_points(y, x, lengths1=y_lengths, lengths2=x_lengths, K=1)
cham_x = x_nn.dists[..., 0] # (N, P1)
cham_y = y_nn.dists[..., 0] # (N, P2)
# NOTE: truncated Chamfer distance.
dist_thd = 2
x_mask[cham_x >= dist_thd] = True
y_mask[cham_y >= dist_thd] = True
cham_x[x_mask] = 0.0
cham_y[y_mask] = 0.0
if is_x_heterogeneous:
cham_x[x_mask] = 0.0
if is_y_heterogeneous:
cham_y[y_mask] = 0.0
if weights is not None:
cham_x *= weights.view(N, 1)
cham_y *= weights.view(N, 1)
if return_normals:
# Gather the normals using the indices and keep only value for k=0
x_normals_near = knn_gather(y_normals, x_nn.idx, y_lengths)[..., 0, :]
y_normals_near = knn_gather(x_normals, y_nn.idx, x_lengths)[..., 0, :]
cham_norm_x = 1 - torch.abs(
F.cosine_similarity(x_normals, x_normals_near, dim=2, eps=1e-6)
)
cham_norm_y = 1 - torch.abs(
F.cosine_similarity(y_normals, y_normals_near, dim=2, eps=1e-6)
)
if is_x_heterogeneous:
# pyre-fixme[16]: `int` has no attribute `__setitem__`.
cham_norm_x[x_mask] = 0.0
if is_y_heterogeneous:
cham_norm_y[y_mask] = 0.0
if weights is not None:
cham_norm_x *= weights.view(N, 1)
cham_norm_y *= weights.view(N, 1)
# Apply point reduction
cham_x = cham_x.sum(1) # (N,)
cham_y = cham_y.sum(1) # (N,)
if return_normals:
cham_norm_x = cham_norm_x.sum(1) # (N,)
cham_norm_y = cham_norm_y.sum(1) # (N,)
if point_reduction == "mean":
cham_x /= x_lengths
cham_y /= y_lengths
if return_normals:
cham_norm_x /= x_lengths
cham_norm_y /= y_lengths
if batch_reduction is not None:
# batch_reduction == "sum"
cham_x = cham_x.sum()
cham_y = cham_y.sum()
if return_normals:
cham_norm_x = cham_norm_x.sum()
cham_norm_y = cham_norm_y.sum()
if batch_reduction == "mean":
div = weights.sum() if weights is not None else N
cham_x /= div
cham_y /= div
if return_normals:
cham_norm_x /= div
cham_norm_y /= div
cham_dist = cham_x + cham_y
cham_normals = cham_norm_x + cham_norm_y if return_normals else None
return cham_dist, cham_normals
| 8,316 | 36.295964 | 88 | py |
LSMOL | LSMOL-main/third_party/nspf/utils.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torch
import time
from collections import defaultdict
# ANCHOR: metrics computation, follow FlowNet3D metrics....
def scene_flow_metrics(pred, labels):
l2_norm = torch.sqrt(torch.sum((pred - labels) ** 2, 2)).cpu() # Absolute distance error.
labels_norm = torch.sqrt(torch.sum(labels * labels, 2)).cpu()
relative_err = l2_norm / (labels_norm + 1e-20)
EPE3D = torch.mean(l2_norm).item() # Mean absolute distance error
# NOTE: Acc_5
error_lt_5 = torch.BoolTensor((l2_norm < 0.05))
relative_err_lt_5 = torch.BoolTensor((relative_err < 0.05))
acc3d_strict = torch.mean((error_lt_5 | relative_err_lt_5).float()).item()
# NOTE: Acc_10
error_lt_10 = torch.BoolTensor((l2_norm < 0.1))
relative_err_lt_10 = torch.BoolTensor((relative_err < 0.1))
acc3d_relax = torch.mean((error_lt_10 | relative_err_lt_10).float()).item()
# NOTE: outliers
l2_norm_gt_3 = torch.BoolTensor(l2_norm > 0.3)
relative_err_gt_10 = torch.BoolTensor(relative_err > 0.1)
outlier = torch.mean((l2_norm_gt_3 | relative_err_gt_10).float()).item()
# NOTE: angle error
unit_label = labels / labels.norm(dim=2, keepdim=True)
unit_pred = pred / pred.norm(dim=2, keepdim=True)
eps = 1e-7
dot_product = (unit_label * unit_pred).sum(2).clamp(min=-1+eps, max=1-eps)
dot_product[dot_product != dot_product] = 0 # Remove NaNs
angle_error = torch.acos(dot_product).mean().item()
return EPE3D, acc3d_strict, acc3d_relax, outlier, angle_error
# ANCHOR: timer!
class Timers(object):
def __init__(self):
self.timers = defaultdict(Timer)
def tic(self, key):
self.timers[key].tic()
def toc(self, key):
self.timers[key].toc()
def print(self, key=None):
if key is None:
for k, v in self.timers.items():
print("Average time for {:}: {:}".format(k, v.avg()))
else:
print("Average time for {:}: {:}".format(key, self.timers[key].avg()))
def get_avg(self, key):
return self.timers[key].avg()
class Timer(object):
def __init__(self):
self.reset()
def tic(self):
self.start_time = time.time()
def toc(self, average=True):
self.diff = time.time() - self.start_time
self.total_time += self.diff
self.calls += 1
def total(self):
return self.total_time
def avg(self):
return self.total_time / float(self.calls)
def reset(self):
self.total_time = 0.
self.calls = 0
self.start_time = 0.
self.diff = 0.
# ANCHOR: generator
class GeneratorWrap:
def __init__(self, gen):
self.gen = gen
def __iter__(self):
self.value = yield from self.gen
# ANCHOR: early stopping strategy
class EarlyStopping(object):
def __init__(self, mode='min', min_delta=0, patience=10, percentage=False):
self.mode = mode
self.min_delta = min_delta
self.patience = patience
self.best = None
self.num_bad_epochs = 0
self.is_better = None
self._init_is_better(mode, min_delta, percentage)
if patience == 0:
self.is_better = lambda a, b: True
self.step = lambda a: False
def step(self, metrics):
if self.best is None:
self.best = metrics
return False
if torch.isnan(metrics):
return True
if self.is_better(metrics, self.best):
self.num_bad_epochs = 0
self.best = metrics
else:
self.num_bad_epochs += 1
if self.num_bad_epochs >= self.patience:
return True
return False
def _init_is_better(self, mode, min_delta, percentage):
if mode not in {'min', 'max'}:
raise ValueError('mode ' + mode + ' is unknown!')
if not percentage:
if mode == 'min':
self.is_better = lambda a, best: a < best - min_delta
if mode == 'max':
self.is_better = lambda a, best: a > best + min_delta
else:
if mode == 'min':
self.is_better = lambda a, best: a < best - (
best * min_delta / 100)
if mode == 'max':
self.is_better = lambda a, best: a > best + (
best * min_delta / 100)
| 4,540 | 29.273333 | 94 | py |
LSMOL | LSMOL-main/third_party/nspf/model.py | import torch
class Neural_Prior(torch.nn.Module):
def __init__(self, dim_x=3, filter_size=128, act_fn='relu', layer_size=8):
super().__init__()
self.layer_size = layer_size
self.nn_layers = torch.nn.ModuleList([])
# input layer (default: xyz -> 128)
if layer_size >= 1:
self.nn_layers.append(torch.nn.Sequential(torch.nn.Linear(dim_x, filter_size)))
if act_fn == 'relu':
self.nn_layers.append(torch.nn.ReLU())
elif act_fn == 'sigmoid':
self.nn_layers.append(torch.nn.Sigmoid())
for _ in range(layer_size-1):
self.nn_layers.append(torch.nn.Sequential(torch.nn.Linear(filter_size, filter_size)))
if act_fn == 'relu':
self.nn_layers.append(torch.nn.ReLU())
elif act_fn == 'sigmoid':
self.nn_layers.append(torch.nn.Sigmoid())
self.nn_layers.append(torch.nn.Linear(filter_size, dim_x))
else:
self.nn_layers.append(torch.nn.Sequential(torch.nn.Linear(dim_x, dim_x)))
def forward(self, x):
""" points -> features
[B, N, 3] -> [B, K]
"""
for layer in self.nn_layers:
x = layer(x)
return x
| 1,322 | 36.8 | 101 | py |
LSMOL | LSMOL-main/third_party/nspf/data.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import glob
import numpy as np
from torch.utils.data import Dataset
class FlyingThings3D(Dataset):
def __init__(self, options, partition='test'):
self.options = options
self.partition = partition
if self.partition == 'train':
self.datapath = glob.glob(f"{self.options.dataset_path}/TRAIN*.npz")
else:
self.datapath = glob.glob(f"{self.options.dataset_path}/TEST*.npz")
self.cache = {}
self.cache_size = 30000
# Bad data..
bad_data = ["TRAIN_C_0140_left_0006-0",
"TRAIN_A_0658_left_0013-0", #pc1 has only 204 valid points (mask).
"TRAIN_B_0424_left_0011-0", #pc1 has only 0 valid points.
"TRAIN_A_0364_left_0008-0", #pc1 has only 0 valid points.
"TRAIN_A_0588_right_0006-0", #pc1 has only 1342 valid points.
"TRAIN_A_0074_left_0010-0", #pc1 has only 549 valid points.
"TRAIN_B_0727_left_0012-0", #pc1 has only 1456 valid points.
"TRAIN_A_0521_right_0012-0",
"TRAIN_B_0424_left_0010-0",
"TRAIN_A_0074_left_0009-0",
"TRAIN_B_0723_right_0006-0",
"TRAIN_A_0364_left_0007-0",
"TRAIN_B_0424_right_0009-0",
"TRAIN_A_0658_left_0012-0",
"TRAIN_A_0398_left_0007-0",
"TRAIN_A_0433_right_0006-0",
"TRAIN_B_0053_left_0009-0",
"TRAIN_A_0577_left_0009-0",
"TRAIN_A_0074_left_0011-0",
"TRAIN_A_0074_left_0011-0",
"TRAIN_B_0021_left_0009-0",
"TRAIN_B_0727_right_0011-0",
"TRAIN_B_0609_right_0009-0",
"TRAIN_B_0189_right_0012-0",
"TRAIN_B_0189_left_0012-0",
"TRAIN_B_0053_left_0011-0",
"TRAIN_B_0609_right_0010-0",
"TRAIN_B_0609_right_0011-0",
"TRAIN_A_0369_right_0009-0",
"TRAIN_A_0557_right_0010-0",
"TRAIN_A_0047_right_0009-0",
"TRAIN_A_0362_right_0008-0",
"TRAIN_A_0518_left_0006-0",
"TRAIN_A_0074_left_0012-0",
"TRAIN_A_0531_right_0006-0",
"TRAIN_B_0021_left_0010-0",
"TRAIN_B_0189_left_0011-0",
"TRAIN_A_0658_left_0014-0",
"TRAIN_B_0424_right_0010-0",
"TRAIN_A_0369_right_0008-0",
"TRAIN_A_0364_left_0009-0",
"TEST_A_0149_right_0012-0",
"TEST_A_0123_right_0008-0",
"TEST_A_0123_right_0009-0",
"TEST_A_0149_right_0013-0",
"TEST_A_0149_left_0012-0",
"TEST_A_0149_left_0011-0",
"TEST_A_0023_right_0009-0"]
self.datapath = [d for d in self.datapath if not any(bad in d for bad in bad_data)]
print(f"# {self.partition} samples: {len(self.datapath)}")
def __len__(self):
return len(self.datapath)
def __getitem__(self, index):
if index in self.cache:
pc1, pc2, color1, color2, flow = self.cache[index]
else:
filename = self.datapath[index]
with open(filename, 'rb') as fp:
data = np.load(fp)
mask1 = data['valid_mask1']
pc1 = data['points1'].astype('float32')[mask1]
pc2 = data['points2'].astype('float32')
color1 = data['color1'].astype('float32')[mask1]
color2 = data['color2'].astype('float32')
flow = data['flow'].astype('float32')[mask1]
if len(self.cache) < self.cache_size:
self.cache[index] = (pc1, pc2, color1, color2, flow)
n1 = pc1.shape[0]
n2 = pc2.shape[0]
num_points = self.options.num_points
if self.options.use_all_points:
num_points = n1
if n1 >= num_points:
sample_idx1 = np.random.choice(n1, num_points, replace=False)
else:
sample_idx1 = np.concatenate((np.arange(n1), np.random.choice(n1, num_points - n1, replace=True)), axis=-1)
if n2 >= num_points:
sample_idx2 = np.random.choice(n2, num_points, replace=False)
else:
sample_idx2 = np.concatenate((np.arange(n2), np.random.choice(n2, num_points - n2, replace=True)), axis=-1)
pc1 = pc1[sample_idx1, :]
pc2 = pc2[sample_idx2, :]
flow = flow[sample_idx1, :]
pc1_center = np.mean(pc1, 0)
pc1 -= pc1_center
pc2 -= pc1_center
return pc1, pc2, flow
class KITTISceneFlowDataset(Dataset):
def __init__(self, options, train=False):
self.options = options
self.train = train
if self.train:
self.datapath = sorted(glob.glob(f"{self.options.dataset_path}/*.npz"))[:100]
else:
self.datapath = sorted(glob.glob(f"{self.options.dataset_path}/*.npz"))[100:]
self.cache = {}
self.cache_size = 30000
def __len__(self):
return len(self.datapath)
def __getitem__(self, index):
fn = self.datapath[index]
with open(fn, 'rb') as fp:
data = np.load(fp)
pc1 = data['pos1'].astype('float32')
pc2 = data['pos2'].astype('float32')
flow = data['gt'].astype('float32')
n1 = pc1.shape[0]
n2 = pc2.shape[0]
if not self.options.use_all_points:
num_points = self.options.num_points
if n1 >= num_points:
sample_idx1 = np.random.choice(n1, num_points, replace=False)
else:
sample_idx1 = np.concatenate((np.arange(n1), np.random.choice(n1, num_points - n1, replace=True)), axis=-1)
if n2 >= num_points:
sample_idx2 = np.random.choice(n2, num_points, replace=False)
else:
sample_idx2 = np.concatenate((np.arange(n2), np.random.choice(n2, num_points - n2, replace=True)), axis=-1)
pc1 = pc1[sample_idx1, :].astype('float32')
pc2 = pc2[sample_idx2, :].astype('float32')
flow = flow[sample_idx1, :].astype('float32')
return pc1, pc2, flow
class ArgoverseSceneFlowDataset(Dataset):
def __init__(self, options, partition='val', width=1):
self.options = options
self.partition = partition
self.width = width
if self.partition == 'train':
self.datapath = sorted(glob.glob(f"{self.options.dataset_path}/training/*/*/*"))
elif self.partition == 'test':
self.datapath = sorted(glob.glob(f"{self.options.dataset_path}/testing/*/*/*"))
elif self.partition == 'val':
self.datapath = sorted(glob.glob(f"{self.options.dataset_path}/val/*/*"))
def __len__(self):
return len(self.datapath)
def __getitem__(self, index):
filename = self.datapath[index]
log_id = filename.split('/')[-3]
dataset_dir = filename.split(log_id)[0]
with open(filename, 'rb') as fp:
data = np.load(fp)
pc1 = data['pc1']
pc2 = data['pc2']
flow = data['flow']
mask1_flow = data['mask1_tracks_flow']
mask2_flow = data['mask2_tracks_flow']
n1 = len(pc1)
n2 = len(pc2)
full_mask1 = np.arange(n1)
full_mask2 = np.arange(n2)
mask1_noflow = np.setdiff1d(full_mask1, mask1_flow, assume_unique=True)
mask2_noflow = np.setdiff1d(full_mask2, mask2_flow, assume_unique=True)
if self.options.use_all_points:
num_points = n1
else:
num_points = self.options.num_points
nonrigid_rate = 0.8
rigid_rate = 0.2
if n1 >= num_points:
if int(num_points * nonrigid_rate) > len(mask1_flow):
num_points1_flow = len(mask1_flow)
num_points1_noflow = num_points - num_points1_flow
else:
num_points1_flow = int(num_points * nonrigid_rate)
num_points1_noflow = int(num_points * rigid_rate) + 1
try: # ANCHOR: argoverse has some cases without nonrigid flows.
sample_idx1_noflow = np.random.choice(mask1_noflow, num_points1_noflow, replace=False)
except:
sample_idx1_noflow = np.random.choice(mask1_noflow, num_points1_noflow, replace=True)
sample_idx1_flow = np.random.choice(mask1_flow, num_points1_flow, replace=False)
sample_idx1 = np.hstack((sample_idx1_flow, sample_idx1_noflow))
pc1_ = pc1[sample_idx1, :]
flow_ = flow[sample_idx1, :]
pc1 = pc1_.astype('float32')
flow = flow_.astype('float32')
if n2 >= num_points:
if int(num_points * nonrigid_rate) > len(mask2_flow):
num_points2_flow = len(mask2_flow)
num_points2_noflow = num_points - num_points2_flow
else:
num_points2_flow = int(num_points * nonrigid_rate)
num_points2_noflow = int(num_points * rigid_rate) + 1
try: # ANCHOR: argoverse has some cases without nonrigid flows.
sample_idx2_noflow = np.random.choice(mask2_noflow, num_points2_noflow, replace=False)
except:
sample_idx2_noflow = np.random.choice(mask2_noflow, num_points2_noflow, replace=True)
sample_idx2_flow = np.random.choice(mask2_flow, num_points2_flow, replace=False)
sample_idx2 = np.hstack((sample_idx2_flow, sample_idx2_noflow))
pc2_ = pc2[sample_idx2, :]
pc2 = pc2_.astype('float32')
return pc1, pc2, flow
# Add Waymo Datasets
class WaymoSceneFlowDataset(Dataset):
def __init__(self, options, train=False):
self.options = options
self.train = train
if self.train:
self.datapath = sorted(glob.glob(f"{self.options.dataset_path}/point/*.npz"))
self.cache = {}
self.cache_size = 30000
def __len__(self):
return len(self.datapath)
def __getitem__(self, index):
fn = self.datapath[index]
with open(fn, 'rb') as fp:
data = np.load(fp)
pc1 = data['p1'].astype('float32')
pc2 = data['p2'].astype('float32')
flow = data['gt'].astype('float32')[:,:3]
n1 = pc1.shape[0]
n2 = pc2.shape[0]
if not self.options.use_all_points:
num_points = self.options.num_points
if n1 >= num_points:
sample_idx1 = np.random.choice(n1, num_points, replace=False)
else:
sample_idx1 = np.concatenate((np.arange(n1), np.random.choice(n1, num_points - n1, replace=True)), axis=-1)
if n2 >= num_points:
sample_idx2 = np.random.choice(n2, num_points, replace=False)
else:
sample_idx2 = np.concatenate((np.arange(n2), np.random.choice(n2, num_points - n2, replace=True)), axis=-1)
pc1 = pc1[sample_idx1, :].astype('float32')
pc2 = pc2[sample_idx2, :].astype('float32')
flow = flow[sample_idx1, :].astype('float32')
return pc1, pc2, flow, sample_idx1
return pc1, pc2, flow
class NuScenesSceneFlowDataset(Dataset):
def __init__(self, options, partition="val", width=1):
self.options = options
self.partition = partition
self.width = width
if self.partition == "train":
self.datapath = sorted(glob.glob(f"{self.options.dataset_path}/train/*"))
elif self.partition == "val":
self.datapath = sorted(glob.glob(f"{self.options.dataset_path}/val/*"))
# Bad data. Pretty noisy samples.
bad_data = ["d02c6908713147e9a4ac5d50784815d3",
"ae989fac82d248b98ce769e753f60f87",
"365e72358ddb405e953cdad865815966",
"4a16cf07faf54dbf93e0c4c083b38c63",
"44fd8959bd574d7fb6773a9fe341282e",
"c6879ea1c3d845eebd7825e6e454bee1",
"359023a812c24fbcae41334842672dd2",
"aa5d89b9f988450eaa442070576913b7",
"c4344682d52f4578b5aa983612764e9b",
"6fd5607c93fa4b569eb2bd0d7f30f9a0",
"5ab1e1f0829541269856edca0f7517da",
"1c01cb36784e44fc8a5ef7d9689ef2fd",
"15a106fd45604b6bb85d67c1e5033022",
"6803c2feca4b40e78434cf209ee8c2da",
"6737346ecd5144d28cef656f17953959",
]
self.datapath = [d for d in self.datapath if not any(bad in d for bad in bad_data)]
def __getitem__(self, index):
filename = self.datapath[index]
with open(filename, 'rb') as fp:
data = np.load(fp)
pc1 = data['pc1'].astype('float32')
pc2 = data['pc2'].astype('float32')
flow = data['flow'].astype('float32')
mask1_flow = data['mask1_tracks_flow']
mask2_flow = data['mask2_tracks_flow']
n1 = len(pc1)
n2 = len(pc2)
full_mask1 = np.arange(n1)
full_mask2 = np.arange(n2)
mask1_noflow = np.setdiff1d(full_mask1, mask1_flow, assume_unique=True)
mask2_noflow = np.setdiff1d(full_mask2, mask2_flow, assume_unique=True)
if self.options.use_all_points:
num_points = n1
else:
num_points = self.options.num_points
nonrigid_rate = 0.8
rigid_rate = 0.2
if n1 >= num_points:
if int(num_points * nonrigid_rate) > len(mask1_flow):
num_points1_flow = len(mask1_flow)
num_points1_noflow = num_points - num_points1_flow
else:
num_points1_flow = int(num_points * nonrigid_rate)
num_points1_noflow = int(num_points * rigid_rate) + 1
sample_idx1_flow = np.random.choice(mask1_flow, num_points1_flow, replace=False)
try: # ANCHOR: nuscenes has some cases without nonrigid flows.
sample_idx1_noflow = np.random.choice(mask1_noflow, num_points1_noflow, replace=False)
except:
sample_idx1_noflow = np.random.choice(mask1_noflow, num_points1_noflow, replace=True)
sample_idx1 = np.hstack((sample_idx1_flow, sample_idx1_noflow))
else:
sample_idx1 = np.concatenate((np.arange(n1), np.random.choice(n1, num_points - n1, replace=True)), axis=-1)
pc1_ = pc1[sample_idx1, :]
flow_ = flow[sample_idx1, :]
pc1 = pc1_.astype('float32')
flow = flow_.astype('float32')
if n2 >= num_points:
if int(num_points * nonrigid_rate) > len(mask2_flow):
num_points2_flow = len(mask2_flow)
num_points2_noflow = num_points - num_points2_flow
else:
num_points2_flow = int(num_points * nonrigid_rate)
num_points2_noflow = int(num_points * rigid_rate) + 1
sample_idx2_flow = np.random.choice(mask2_flow, num_points2_flow, replace=False)
sample_idx2_noflow = np.random.choice(mask2_noflow, num_points2_noflow, replace=False)
sample_idx2 = np.hstack((sample_idx2_flow, sample_idx2_noflow))
else:
sample_idx2 = np.concatenate((np.arange(n2), np.random.choice(n2, num_points - n2, replace=True)), axis=-1)
pc2_ = pc2[sample_idx2, :]
pc2 = pc2_.astype('float32')
return pc1, pc2, flow
def __len__(self):
return len(self.datapath)
| 16,165 | 39.720403 | 123 | py |
LSMOL | LSMOL-main/third_party/nspf/config.py | import argparse
import os
import numpy as np
import torch
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def init_dirs(options):
if not os.path.exists("checkpoints"):
os.makedirs("checkpoints")
if not os.path.exists(f"checkpoints/{options.exp_name}"):
os.makedirs(f"checkpoints/{options.exp_name}")
if not os.path.exists(f"checkpoints/{options.exp_name}/models"):
os.makedirs(f"checkpoints/{options.exp_name}/models")
os.system(f"cp main.py checkpoints/{options.exp_name}/main.py.backup")
os.system(f"cp model.py checkpoints/{options.exp_name}/model.py.backup")
os.system(f"cp data.py checkpoints/{options.exp_name}/data.py.backup")
def set_deterministic_seeds(options):
torch.backends.cudnn.deterministic = True
torch.manual_seed(options.seed)
torch.cuda.manual_seed_all(options.seed)
np.random.seed(options.seed)
def add_config(parser):
parser.add_argument('--exp_name', type=str, default='neural_scene_flow_prior', metavar='N', help='Name of the experiment.')
parser.add_argument('--num_points', type=int, default=2048, help='Point number [default: 2048].')
parser.add_argument('--batch_size', type=int, default=1, metavar='batch_size', help='Batch size.')
parser.add_argument('--iters', type=int, default=5000, metavar='N', help='Number of iterations to optimize the model.')
parser.add_argument('--optimizer', type=str, default='adam', choices=('sgd', 'adam'), help='Optimizer.')
parser.add_argument('--lr', type=float, default=0.008, metavar='LR', help='Learning rate.')
parser.add_argument('--momentum', type=float, default=0, metavar='M', help='SGD momentum (default: 0.9).')
parser.add_argument('--no_cuda', action='store_true', default=False, help='Enables CUDA training.')
parser.add_argument('--seed', type=int, default=1234, metavar='S', help='Random seed (default: 1234).')
parser.add_argument('--dataset', type=str, default='KITTISceneFlowDataset',
choices=['FlyingThings3D', 'KITTISceneFlowDataset', 'ArgoverseSceneFlowDataset',
'NuScenesSceneFlowDataset','WaymoSceneFlowDataset'], metavar='N',
help='Dataset to use.')
parser.add_argument('--dataset_path', type=str, default='./dataset/kitti', metavar='N',
help='Dataset path.')
parser.add_argument('--compute_metrics', action='store_true', default=True, help='whether compute metrics or not.')
parser.add_argument('--visualize', action='store_true', default=False, help='Show visuals.')
parser.add_argument('--animation', action='store_true', default=False, help='create animations.')
parser.add_argument('--time', dest='time', action='store_true', default=True, help='Count the execution time of each step.')
parser.add_argument('--partition', type=str, default='val', metavar='p', help='Model to use.')
# For neural prior
parser.add_argument('--model', type=str, default='neural_prior', choices=['neural_prior'], metavar='N', help='Model to use.')
parser.add_argument('--weight_decay', type=float, default=1e-4, metavar='N', help='Weight decay.')
parser.add_argument('--hidden_units', type=int, default=128, metavar='N', help='Number of hidden units in neural prior')
parser.add_argument('--layer_size', type=int, default=8, metavar='N', help='Number of layers in neural prior')
parser.add_argument('--use_all_points', action='store_true', default=False, help='use all the points or not.')
parser.add_argument('--act_fn', type=str, default='relu', metavar='AF', help='activation function for neural prior.')
parser.add_argument('--backward_flow', action='store_true', default=True, help='use backward flow or not.')
parser.add_argument('--early_patience', type=int, default=100, help='patience in early stopping.')
| 4,058 | 60.5 | 129 | py |
LSMOL | LSMOL-main/third_party/nspf/process/preprocess_sf_nuscenes.py | import os
from dataclasses import dataclass
from pathlib import Path
import numpy as np
import open3d as o3d
from argoverse.utils.cuboid_interior import (filter_point_cloud_to_bbox_3D_vectorized)
from argoverse.utils.se3 import SE3
from argoverse.utils.transform import quat2rotmat
from torch.utils.data import Dataset
from tqdm import tqdm
from pyquaternion import Quaternion
from nuscenes.nuscenes import NuScenes
from nuscenes.utils.data_classes import LidarPointCloud
from nuscenes.utils.splits import create_splits_scenes
@dataclass
class LidarData:
ply: np.ndarray
pose: SE3
timestamp: int
scene_token: str
boxes: np.ndarray
objects: np.ndarray
annotation_token: list
class PreProcessNuScenesDataset(Dataset):
def __init__(
self,
save_fi_name,
nusc, scene_tokens,
channel = 'LIDAR_TOP',
sample_every = 2,
min_dist = 3.0,
max_dist = 50.0,
max_height = 4.0,
margin = 0.6,
max_correspondence_distance=1.0
):
self.save_fi_name = save_fi_name
self.scene_tokens = sorted(scene_tokens)
self.nusc = nusc
self.min_dist = min_dist
self.max_dist = max_dist
self.max_height = max_height
self.margin = margin
self.max_correspondence_distance = max_correspondence_distance
# ANCHOR: get all scene tokens, for each token, has sd_tokens, which stands for sample ids for a specific scene....
self.sd_token_list = []
for scene_token in tqdm(self.scene_tokens):
# Get records from DB.
scene_rec = nusc.get('scene', scene_token)
start_sample_rec = nusc.get('sample', scene_rec['first_sample_token'])
sd_rec = nusc.get('sample_data', start_sample_rec['data'][channel])
# Make list of frames
cur_sd_rec = sd_rec
sd_tokens = []
sd_tokens.append(cur_sd_rec['token'])
while cur_sd_rec['next'] != '':
cur_sd_rec = nusc.get('sample_data', cur_sd_rec['next'])
sd_tokens.append(cur_sd_rec['token'])
sd_tokens = sd_tokens[0:-1:sample_every]
self.sd_token_list.append(sd_tokens)
self.iter_tokens = iter(self.scene_tokens)
def __getitem__(self, index):
scene_token = next(self.iter_tokens)
sd_tokens = self.sd_token_list[index]
# SECTION: 1. get all point clouds, similar to lidar sweeps in argoverse data
lidar_sweeps = []
for i in tqdm(range(len(sd_tokens))):
sc_rec = self.nusc.get('sample_data', sd_tokens[i])
sample_rec = self.nusc.get('sample', sc_rec['sample_token'])
lidar_token = sc_rec['token']
lidar_rec = self.nusc.get('sample_data', lidar_token)
pc1_nusc = LidarPointCloud.from_file(f"{self.nusc.dataroot}/{lidar_rec['filename']}")
# Points live in their own reference frame. So they need to be transformed via global to the image plane.
# First step: transform the point cloud to the ego vehicle frame for the timestamp of the sweep.
cs_record1 = self.nusc.get('calibrated_sensor', lidar_rec['calibrated_sensor_token'])
pc1_nusc.rotate(Quaternion(cs_record1['rotation']).rotation_matrix)
pc1_nusc.translate(np.array(cs_record1['translation']))
# Optional Filter by distance to remove the ego vehicle.
dists_origin = np.sqrt(np.sum(pc1_nusc.points[:3, :] ** 2, axis=0))
keep = np.logical_and(self.min_dist <= dists_origin, dists_origin <= self.max_dist)
pc1_nusc.points = pc1_nusc.points[:, keep]
# Second step: transform to the global frame.
poserecord1 = self.nusc.get('ego_pose', lidar_rec['ego_pose_token'])
pose = SE3(rotation=quat2rotmat(poserecord1["rotation"]), translation=np.array(poserecord1["translation"]))
pc1 = pc1_nusc.points.T[:, :3].copy()
# Remove points above certain height.
pc1 = pc1[pc1[:, 2] <= self.max_height]
# Remove point below ground. Some noisy points!
pc1 = pc1[pc1[:, 2] > -1]
# Get tracks.
_, boxes1, _ = self.nusc.get_sample_data(lidar_token, use_flat_vehicle_coordinates=True)
objects1 = [box.corners().T for box in boxes1]
sample_annotation_tokens1 = sample_rec['anns']
bundle = LidarData(pc1, pose, i, scene_token, boxes1, objects1, sample_annotation_tokens1)
lidar_sweeps.append(bundle)
# SECTION: 2. get objects, get pseudo flows
n1 = lidar_sweeps[0].ply.shape[0]
mask1_tracks_flow = []
mask2_tracks_flow = []
flow = np.zeros((n1, 3), dtype='float32')
# ANCHOR: for annotation tokens of the reference lidar sweep (sweep 0), get object for each token
objects1 = []
objects1_metadata = []
for token in lidar_sweeps[0].annotation_token:
object1_metadata = self.nusc.get('sample_annotation', token)
objects1_metadata.append(object1_metadata)
object1 = get_object(object1_metadata) # object1 is already a single object!
objects1.append(object1)
# ANCHOR: get other object instance token to see if the first frame shown in other frames
objects2 = []
objects2_metadata = []
for token in lidar_sweeps[1].annotation_token:
object2_metadata = self.nusc.get('sample_annotation', token)
objects2_metadata.append(object2_metadata)
object2 = get_object(object2_metadata) # object1 is already a single object!
objects2.append(object2)
objects2_track_id_dict = {object2.track_id:object2 for object2 in objects2}
for _ in range(len(objects1)):
# NOTE: using boxes
box1 = [box for box in lidar_sweeps[0].boxes if box.token == object1.token][0]
# Add a margin to the cuboids. Some cuboids are very tight and might lose some points.
box1.wlh = box1.wlh + self.margin
bbox1_3d = box1.corners().T
inbox_pc1, is_valid = filter_point_cloud_to_bbox_3D_vectorized(bbox1_3d, lidar_sweeps[0].ply)
indices = np.where(is_valid == True)[0]
mask1_tracks_flow.append(indices)
if object1.track_id in objects2_track_id_dict:
object2 = objects2_track_id_dict[object1.track_id]
box2 = [box for box in lidar_sweeps[1].boxes if box.token == object2.token][0]
box_pose1 = SE3(rotation=box1.rotation_matrix, translation=np.array(box1.center))
box_pose2 = SE3(rotation=box2.rotation_matrix, translation=np.array(box2.center))
relative_pose_1_2 = box_pose2.right_multiply_with_se3(box_pose1.inverse())
inbox_pc1_t = relative_pose_1_2.transform_point_cloud(inbox_pc1)
translation = inbox_pc1_t - inbox_pc1
flow[indices, :] = translation
bbox2_3d = box2.corners().T
inbox_pc2, is_valid2 = filter_point_cloud_to_bbox_3D_vectorized(bbox2_3d, lidar_sweeps[1].ply)
mask2_tracks_flow.append(np.where(is_valid2 == True)[0])
# Compensate egomotion to get rigid flow.
map_relative_to_base = lidar_sweeps[1].pose
map_relative_to_other = lidar_sweeps[0].pose
other_to_base = map_relative_to_base.inverse().right_multiply_with_se3(map_relative_to_other)
points = lidar_sweeps[0].ply
points_t = other_to_base.transform_point_cloud(points)
# NOTE: because of the overlaps between object bounding box, need to find unique tracks
if not mask1_tracks_flow:
print('mask1 tracks flow is empty!')
mask1_tracks_flow = np.array(mask1_tracks_flow)
else:
mask1_tracks_flow = np.unique(np.hstack(mask1_tracks_flow))
full_mask1 = np.arange(len(lidar_sweeps[0].ply))
mask1_no_tracks = np.setdiff1d(full_mask1, mask1_tracks_flow, assume_unique=True)
if not mask2_tracks_flow:
print('mask2 tracks flow is empty!')
mask2_tracks_flow = np.array(mask2_tracks_flow)
else:
mask2_tracks_flow = np.unique(np.hstack(mask2_tracks_flow))
full_mask2 = np.arange(len(lidar_sweeps[1].ply))
mask2_no_tracks = np.setdiff1d(full_mask2, mask2_tracks_flow, assume_unique=True)
# NOTE: unnecessary steps with ICP, but just keep it here for consistency....
pc1_o3d = o3d.geometry.PointCloud()
pc1_o3d.points = o3d.utility.Vector3dVector(points_t[mask1_no_tracks])
pc2_o3d = o3d.geometry.PointCloud()
pc2_o3d.points = o3d.utility.Vector3dVector(lidar_sweeps[1].ply[mask2_no_tracks])
# Apply point-to-point ICP
trans_init = np.identity(4)
reg_p2p = o3d.pipelines.registration.registration_icp(
pc1_o3d, pc2_o3d,
self.max_correspondence_distance,
trans_init,
o3d.pipelines.registration.TransformationEstimationPointToPoint(),
o3d.pipelines.registration.ICPConvergenceCriteria(max_iteration=100)
)
pc1_t_o3d = pc1_o3d.transform(reg_p2p.transformation)
points_t_refined = np.asarray(pc1_t_o3d.points)
rigid_flow = points_t_refined - points[mask1_no_tracks]
flow[mask1_no_tracks] = rigid_flow
# ANCHOR: get point clouds
pc1 = lidar_sweeps[0].ply
pc2 = lidar_sweeps[1].ply
# SECTION: 3. save processed data
dataset_path = os.path.join(self.save_fi_name, lidar_sweeps[0].scene_token)
outfile = str(lidar_sweeps[0].timestamp) + '_' + str(lidar_sweeps[1].timestamp) + '.npz'
full_path = os.path.join(dataset_path, outfile)
Path(dataset_path).mkdir(parents=True, exist_ok=True)
np.savez_compressed(full_path, pc1=pc1, pc2=pc2, flow=flow, mask1_tracks_flow=mask1_tracks_flow, mask2_tracks_flow=mask2_tracks_flow)
return
def __len__(self):
return len(self.datapath)
def get_object(label):
translation = np.array(label['translation'])
quaternion = np.array(label['rotation'])
width, length, height = label['size'] # NOTE: wlh in nuscenes!
token = label['token']
track_id = label['instance_token']
# NOTE: don't have these in nuScenes, but just have them here for consistency
if "occlusion" in label:
occlusion = label["occlusion"]
else:
occlusion = 0
if "label_class" in label:
label_class = label["label_class"]
if "name" in label_class:
label_class = label_class["name"]
else:
label_class = None
if "score" in label:
score = label["score"]
else:
score = 1.0
obj_rec = ObjectLabelRecord(
quaternion,
translation,
length,
width,
height,
occlusion,
label_class,
track_id,
token,
score,
)
return obj_rec
class ObjectLabelRecord:
def __init__(
self,
quaternion: np.ndarray,
translation: np.ndarray,
length: float,
width: float,
height: float,
occlusion: int,
label_class: Optional[str] = None,
track_id: Optional[str] = None,
token: Optional[str] = None,
score: float = 1.0,
) -> None:
"""Create an ObjectLabelRecord.
Args:
quaternion: Numpy vector representing quaternion, box/cuboid orientation
translation: Numpy vector representing translation, center of box given as x, y, z.
length: object length.
width: object width.
height: object height.
occlusion: occlusion value.
label_class: class label, see object_classes.py for all possible class in argoverse
track_id: object track id, this is unique for each track
"""
self.quaternion = quaternion
self.translation = translation
self.length = length
self.width = width
self.height = height
self.occlusion = occlusion
self.label_class = label_class
self.track_id = track_id
self.token = token
self.score = score
def as_2d_bbox(self) -> np.ndarray:
"""Construct a 2D bounding box from this label.
Length is x, width is y, and z is height
Alternatively could write code like::
x_corners = l / 2 * np.array([1, 1, 1, 1, -1, -1, -1, -1])
y_corners = w / 2 * np.array([1, -1, -1, 1, 1, -1, -1, 1])
z_corners = h / 2 * np.array([1, 1, -1, -1, 1, 1, -1, -1])
corners = np.vstack((x_corners, y_corners, z_corners))
"""
bbox_object_frame = np.array(
[
[self.length / 2.0, self.width / 2.0, self.height / 2.0],
[self.length / 2.0, -self.width / 2.0, self.height / 2.0],
[-self.length / 2.0, self.width / 2.0, self.height / 2.0],
[-self.length / 2.0, -self.width / 2.0, self.height / 2.0],
]
)
egovehicle_SE3_object = SE3(rotation=quat2rotmat(self.quaternion), translation=self.translation)
bbox_in_egovehicle_frame = egovehicle_SE3_object.transform_point_cloud(bbox_object_frame)
return bbox_in_egovehicle_frame
def as_3d_bbox(self) -> np.ndarray:
r"""Calculate the 8 bounding box corners.
Args:
None
Returns:
Numpy array of shape (8,3)
Corner numbering::
5------4
|\\ |\\
| \\ | \\
6--\\--7 \\
\\ \\ \\ \\
l \\ 1-------0 h
e \\ || \\ || e
n \\|| \\|| i
g \\2------3 g
t width. h
h. t.
First four corners are the ones facing forward.
The last four are the ones facing backwards.
"""
# 3D bounding box corners. (Convention: x points forward, y to the left, z up.)
x_corners = self.length / 2 * np.array([1, 1, 1, 1, -1, -1, -1, -1])
y_corners = self.width / 2 * np.array([1, -1, -1, 1, 1, -1, -1, 1])
z_corners = self.height / 2 * np.array([1, 1, -1, -1, 1, 1, -1, -1])
corners_object_frame = np.vstack((x_corners, y_corners, z_corners)).T
return corners_object_frame
def as_3d_bbox_ego(self) -> np.ndarray:
r"""Calculate the 8 bounding box corners.
Args:
None
Returns:
Numpy array of shape (8,3)
Corner numbering::
5------4
|\\ |\\
| \\ | \\
6--\\--7 \\
\\ \\ \\ \\
l \\ 1-------0 h
e \\ || \\ || e
n \\|| \\|| i
g \\2------3 g
t width. h
h. t.
First four corners are the ones facing forward.
The last four are the ones facing backwards.
"""
# 3D bounding box corners. (Convention: x points forward, y to the left, z up.)
x_corners = self.length / 2 * np.array([1, 1, 1, 1, -1, -1, -1, -1])
y_corners = self.width / 2 * np.array([1, -1, -1, 1, 1, -1, -1, 1])
z_corners = self.height / 2 * np.array([1, 1, -1, -1, 1, 1, -1, -1])
corners_object_frame = np.vstack((x_corners, y_corners, z_corners)).T
egovehicle_SE3_object = SE3(rotation=quat2rotmat(self.quaternion), translation=self.translation)
corners_egovehicle_frame = egovehicle_SE3_object.transform_point_cloud(corners_object_frame)
return corners_egovehicle_frame
# def as_3d_bbox_flat(self) -> np.ndarray:
# x_corners = self.length / 2 * np.array([1, 1, 1, 1, -1, -1, -1, -1])
# y_corners = self.width / 2 * np.array([1, -1, -1, 1, 1, -1, -1, 1])
# z_corners = self.height / 2 * np.array([1, 1, -1, -1, 1, 1, -1, -1])
# corners_object_frame = np.vstack((x_corners, y_corners, z_corners)).T
# egovehicle_SE3_object = SE3(rotation=quat2rotmat(self.quaternion), translation=self.translation)
# corners_egovehicle_frame = egovehicle_SE3_object.transform_point_cloud(corners_object_frame)
# yaw = Quaternion(self.quaternion).yaw_pitch_roll[0]
# box =translate(-np.array(self.translation))
# box.rotate(Quaternion(scalar=np.cos(yaw / 2), vector=[0, 0, np.sin(yaw / 2)]).inverse)
# return box
def render_clip_frustum_cv2(
self,
img: np.ndarray,
corners: np.ndarray,
planes: List[Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]],
camera_config: CameraConfig,
colors: Tuple[Tuple[int, int, int], Tuple[int, int, int], Tuple[int, int, int]] = (
BLUE_RGB,
RED_RGB,
GREEN_RGB,
),
linewidth: int = 2,
) -> np.ndarray:
r"""We bring the 3D points into each camera, and do the clipping there.
Renders box using OpenCV2. Roughly based on
https://github.com/nutonomy/nuscenes-devkit/blob/master/python-sdk/nuscenes_utils/data_classes.py
::
5------4
|\\ |\\
| \\ | \\
6--\\--7 \\
\\ \\ \\ \\
l \\ 1-------0 h
e \\ || \\ || e
n \\|| \\|| i
g \\2------3 g
t width. h
h. t.
Args:
img: Numpy array of shape (M,N,3)
corners: Numpy array of shape (8,3) in camera coordinate frame.
planes: Iterable of 5 clipping planes. Each plane is defined by 4 points.
camera_config: CameraConfig object
colors: tuple of RGB 3-tuples, Colors for front, side & rear.
defaults are 0. blue (0,0,255) in RGB and (255,0,0) in OpenCV's BGR
1. red (255,0,0) in RGB and (0,0,255) in OpenCV's BGR
2. green (0,255,0) in RGB and BGR alike.
linewidth: integer, linewidth for plot
Returns:
img: Numpy array of shape (M,N,3), representing updated image
"""
def draw_rect(selected_corners: np.ndarray, color: Tuple[int, int, int]) -> None:
prev = selected_corners[-1]
for corner in selected_corners:
draw_clipped_line_segment(
img,
prev.copy(),
corner.copy(),
camera_config,
linewidth,
planes,
color,
)
prev = corner
# Draw the sides in green
for i in range(4):
# between front and back corners
draw_clipped_line_segment(
img,
corners[i],
corners[i + 4],
camera_config,
linewidth,
planes,
colors[2][::-1],
)
# Draw front (first 4 corners) in blue
draw_rect(corners[:4], colors[0][::-1])
# Draw rear (last 4 corners) in red
draw_rect(corners[4:], colors[1][::-1])
# grab the top vertices
center_top = np.mean(corners[TOP_VERT_INDICES], axis=0)
uv_ct, _, _, _ = proj_cam_to_uv(center_top.reshape(1, 3), camera_config)
uv_ct = uv_ct.squeeze().astype(np.int32) # cast to integer
if label_is_closeby(center_top) and uv_coord_is_valid(uv_ct, img):
top_left = (uv_ct[0] - BKGRND_RECT_OFFS_LEFT, uv_ct[1] - BKGRND_RECT_OFFS_UP)
bottom_right = (uv_ct[0] + BKGRND_RECT_OFFS_LEFT, uv_ct[1] + BKGRND_RECT_OFFS_DOWN)
img = draw_alpha_rectangle(img, top_left, bottom_right, EMERALD_RGB, alpha=BKGRND_RECT_ALPHA)
add_text_cv2(img, text=str(self.label_class), x=uv_ct[0] - TEXT_OFFS_LEFT, y=uv_ct[1], color=WHITE_BGR)
# Draw blue line indicating the front half
center_bottom_forward = np.mean(corners[2:4], axis=0)
center_bottom = np.mean(corners[[2, 3, 7, 6]], axis=0)
draw_clipped_line_segment(
img,
center_bottom,
center_bottom_forward,
camera_config,
linewidth,
planes,
colors[0][::-1],
)
return img
def uv_coord_is_valid(uv: np.ndarray, img: np.ndarray) -> bool:
"""Check if 2d-point lies within 3-channel color image boundaries"""
h, w, _ = img.shape
return bool(uv[0] >= 0 and uv[1] >= 0 and uv[0] < w and uv[1] < h)
def label_is_closeby(box_point: np.ndarray) -> bool:
"""Check if 3d cuboid pt (in egovehicle frame) is within range from
egovehicle to prevent plot overcrowding.
"""
return bool(np.linalg.norm(box_point) < MAX_RANGE_THRESH_PLOT_CATEGORY)
def draw_alpha_rectangle(
img: np.ndarray,
top_left: Tuple[int, int],
bottom_right: Tuple[int, int],
color_rgb: Tuple[int, int, int],
alpha: float,
) -> np.ndarray:
"""Alpha blend colored rectangle into image. Corner coords given as (x,y) tuples"""
img_h, img_w, _ = img.shape
mask = np.zeros((img_h, img_w), dtype=np.uint8)
mask[top_left[1] : bottom_right[1], top_left[0] : bottom_right[0]] = 1
return vis_mask(img, mask, np.array(list(color_rgb[::-1])), alpha)
if __name__ == "__main__":
dataset_path = '/dataset/nuscenes' # NOTE: path to the original dataset
# ANCHOR: hyperparameters
channel = 'LIDAR_TOP'
sample_every = 2 # nuscenes default: 20 Hz, we use 10 Hz here
min_dist = 3.0
max_dist = 50.0
max_height = 4.0
margin = 0.6
max_correspondence_distance = 1.0
scenes = create_splits_scenes()
split = 'val'
scenes = scenes[split]
save_fi_name = ''
nusc = NuScenes(version='v1.0-trainval', dataroot=dataset_path, verbose=True)
scene_tokens = set()
for scene in nusc.scene:
if scene['name'] in scenes:
scene_tokens.add(scene['token'])
dataset = PreProcessNuScenesDataset(
save_fi_name,
nusc, scene_tokens,
channel,
sample_every,
min_dist, max_dist,
max_height,
margin,
max_correspondence_distance
)
# NOTE: for each scene, get sampels for pseudo scene flow
for i, _ in enumerate(dataset):
print('id is {}, working on scene {}'.format(i, dataset.scene_tokens[i]))
# pass
| 22,943 | 38.020408 | 141 | py |
LSMOL | LSMOL-main/third_party/nspf/process/preprocess_sf_argoverse.py | import copy
import glob
from dataclasses import dataclass
from pathlib import Path
from typing import List, Optional
import argoverse.data_loading.object_label_record as object_label
import numpy as np
import open3d as o3d
from argoverse.map_representation.map_api import ArgoverseMap
from argoverse.utils.cuboid_interior import filter_point_cloud_to_bbox_3D_vectorized
from argoverse.utils.json_utils import read_json_file
from argoverse.utils.ply_loader import load_ply
from argoverse.utils.se3 import SE3
from argoverse.utils.transform import quat2rotmat
from torch.utils.data import Dataset
class PreProcessArgoverseDataset(Dataset):
def __init__(
self,
dataset_path='',
partition='val',
remove_ground=True,
get_gt_tracks=True,
max_correspondence_distance=1.0,
log_id=None,
):
self.partition = partition
self.log_id = log_id
self.remove_ground = remove_ground
self.get_gt_tracks = get_gt_tracks
self.max_correspondence_distance = max_correspondence_distance
if self.partition == 'train':
self.datapath = sorted(glob.glob(f"{dataset_path}/training/*/*/lidar/*"))
elif self.partition == 'test':
self.datapath = sorted(glob.glob(f"{dataset_path}/testing/*/*/lidar/*"))
elif self.partition == 'val':
self.datapath = sorted(glob.glob(f"{dataset_path}/val/*/lidar/*"))
if self.log_id:
self.datapath = [file for file in self.datapath if log_id in file]
if self.partition == "train":
sample_every = 2
else:
sample_every = 10 # To don't get too repetitive for validation.
self.datapath = self.datapath[0:-1:sample_every]
self.avm = ArgoverseMap()
def __getitem__(self, index):
filename = self.datapath[index]
print(filename)
log_id = filename.split('/')[-3]
dataset_dir = filename.split(log_id)[0]
city_info_fpath = f"{dataset_dir}/{log_id}/city_info.json"
city_info = read_json_file(city_info_fpath)
city_name = city_info['city_name']
'''Get consecutive point clouds.'''
lidar_sweep_fnames = sorted(glob.glob(f"{dataset_dir}{log_id}/lidar/PC_*"))
lidar_sweep_idx = lidar_sweep_fnames.index(filename)
if lidar_sweep_idx == len(lidar_sweep_fnames) - 1:
lidar_sweep_idx = lidar_sweep_idx - 1
lidar_sweeps = get_lidar_sweeps(lidar_sweep_fnames, lidar_sweep_idx, self.width)
if self.remove_ground:
for i, lidar_sweep in enumerate(lidar_sweeps):
pc_t = lidar_sweep.pose.transform_point_cloud(lidar_sweep.ply)
_, not_ground_logicals = self.avm.remove_ground_surface(copy.deepcopy(pc_t), city_name, return_logicals=True)
pc_t = pc_t[not_ground_logicals]
lidar_sweeps[i].ply = lidar_sweep.pose.inverse_transform_point_cloud(pc_t).astype('float32')
# ANCHOR: remove non drivable area
drivable_area = False
if drivable_area:
for i, lidar_sweep in enumerate(lidar_sweeps):
pc_t = lidar_sweep.pose.transform_point_cloud(lidar_sweep.ply)
pc_t = self.avm.remove_non_driveable_area_points(pc_t, city_name)
lidar_sweeps[i].ply = lidar_sweep.pose.inverse_transform_point_cloud(pc_t).astype('float32')
# ANCHOR: remove points above certain height.
max_height = 4.0
for i, lidar_sweep in enumerate(lidar_sweeps):
indices = np.where(lidar_sweeps[i].ply[:, 2] <= max_height)[0]
lidar_sweeps[i].ply = lidar_sweeps[i].ply[indices]
# ANCHOR: remove points beyond certain distance.
max_dist = 80
for i, lidar_sweep in enumerate(lidar_sweeps):
pc_o3d = o3d.geometry.PointCloud()
pc_o3d.points = o3d.utility.Vector3dVector(lidar_sweeps[i].ply)
dists_to_center = np.sqrt(np.sum(lidar_sweeps[i].ply ** 2, 1))
ind = np.where(dists_to_center <= max_dist)[0]
lidar_sweeps[i].ply = lidar_sweeps[i].ply[ind]
# ANCHOR: get pseudo flow annotations
if self.get_gt_tracks:
n1 = lidar_sweeps[0].ply.shape[0]
tracks1_fpath = f"{dataset_dir}{log_id}/per_sweep_annotations_amodal/tracked_object_labels_{lidar_sweeps[0].timestamp}.json"
tracks2_fpath = f"{dataset_dir}{log_id}/per_sweep_annotations_amodal/tracked_object_labels_{lidar_sweeps[1].timestamp}.json"
objects1 = object_label.read_label(tracks1_fpath)
objects2 = object_label.read_label(tracks2_fpath)
objects2_track_id_dict = {object2.track_id:object2 for object2 in objects2}
mask1_tracks_flow = []
mask2_tracks_flow = []
flow = np.zeros((n1, 3), dtype='float32')
for object1 in objects1:
if object1.occlusion == 100:
continue
if object1.track_id in objects2_track_id_dict:
object2 = objects2_track_id_dict[object1.track_id]
obj_pose1 = SE3(rotation=quat2rotmat(object1.quaternion), translation=np.array(object1.translation))
obj_pose2 = SE3(rotation=quat2rotmat(object2.quaternion), translation=np.array(object2.translation))
relative_pose_1_2 = obj_pose2.right_multiply_with_se3(obj_pose1.inverse())
# Add a margin to the cuboids. Some cuboids are very tight and might lose some points.
margin = 0.6
object1.length = object1.length + margin
object1.height = object1.height + margin
object1.width = object1.width + margin
bbox1_3d = object1.as_3d_bbox()
inbox_pc1, is_valid = filter_point_cloud_to_bbox_3D_vectorized(bbox1_3d, lidar_sweeps[0].ply)
indices = np.where(is_valid == True)[0]
mask1_tracks_flow.append(indices)
inbox_pc1_t = relative_pose_1_2.transform_point_cloud(inbox_pc1)
translation = inbox_pc1_t - inbox_pc1
flow[indices, :] = translation
bbox2_3d = object2.as_3d_bbox()
inbox_pc2, is_valid2 = filter_point_cloud_to_bbox_3D_vectorized(bbox2_3d, lidar_sweeps[1].ply)
mask2_tracks_flow.append(np.where(is_valid2 == True)[0])
# Compensate egomotion to get rigid flow.
map_relative_to_base = lidar_sweeps[1].pose
map_relative_to_other = lidar_sweeps[0].pose
other_to_base = map_relative_to_base.inverse().right_multiply_with_se3(map_relative_to_other)
points = lidar_sweeps[0].ply
points_t = other_to_base.transform_point_cloud(points)
mask1_tracks_flow = np.unique(np.hstack(mask1_tracks_flow))
mask2_tracks_flow = np.unique(np.hstack(mask2_tracks_flow))
# NOTE: refine the rigid registration with ICP (without the tracks), this might be unnecessary since the given ego pose is pretty good already
full_mask1 = np.arange(len(lidar_sweeps[0].ply))
full_mask2 = np.arange(len(lidar_sweeps[1].ply))
mask1_no_tracks = np.setdiff1d(full_mask1, mask1_tracks_flow, assume_unique=True)
mask2_no_tracks = np.setdiff1d(full_mask2, mask2_tracks_flow, assume_unique=True)
pc1_o3d = o3d.geometry.PointCloud()
pc1_o3d.points = o3d.utility.Vector3dVector(points_t[mask1_no_tracks])
pc2_o3d = o3d.geometry.PointCloud()
pc2_o3d.points = o3d.utility.Vector3dVector(lidar_sweeps[1].ply[mask2_no_tracks])
# Apply point-to-point ICP
trans_init = np.identity(4)
reg_p2p = o3d.registration.registration_icp(
pc1_o3d, pc2_o3d,
self.max_correspondence_distance,
trans_init,
o3d.registration.TransformationEstimationPointToPoint(),
o3d.registration.ICPConvergenceCriteria(max_iteration=100)
)
pc1_t_o3d = pc1_o3d.transform(reg_p2p.transformation)
points_t_refined = np.asarray(pc1_t_o3d.points)
rigid_flow = points_t_refined - points[mask1_no_tracks]
flow[mask1_no_tracks] = rigid_flow
'''Get point clouds.'''
pc1 = lidar_sweeps[0].ply
pc2 = lidar_sweeps[1].ply
# ANCHOR: save processed flow
dataset_path = filename.split('argoverse-tracking')[0] + 'Argoverse_SceneFlow_remove_ground' + filename.split('argoverse-tracking')[1].split('lidar')[0]
outfile = str(lidar_sweeps[0].timestamp) + '_' + str(lidar_sweeps[1].timestamp) + '.npz'
full_path = dataset_path + outfile
Path(dataset_path).mkdir(parents=True, exist_ok=True)
np.savez_compressed(full_path, pc1=pc1, pc2=pc2, flow=flow, mask1_tracks_flow=mask1_tracks_flow, mask2_tracks_flow=mask2_tracks_flow)
def __len__(self):
return len(self.datapath)
@dataclass
class PlyWithPose:
"""Struct to hold ply and pose data."""
ply: np.ndarray
pose: SE3
timestamp: int
def get_lidar_sweeps(base_directory: Path, sweep_index: int, width: int) -> Optional[List[PlyWithPose]]:
"""Get the lidar sweep from the given sweep_directory.
Args:
sweep_directory: path to middle lidar sweep.
sweep_index: index of the middle lidar sweep.
width: +/- lidar scans to grab.
Returns:
List of plys with their associated pose if all the sweeps exist.
"""
sweeps = []
n_forward = width
start = sweep_index
end = start + n_forward
if start < 0:
start = 0
if end >= len(base_directory):
end = len(base_directory) - 1
for step_index in range(start, end + 1):
sweep_path = base_directory[step_index]
ply = load_ply(sweep_path)
if ply is None:
return None
ply_timestamp = sweep_path.split('PC_')[1].split('.')[0]
log_path = base_directory[0].split('lidar')[0]
pose_fname = glob.glob(f"{log_path}/poses/city_SE3_egovehicle_{ply_timestamp}.json")[0]
pose_ = read_json_file(pose_fname)
pose = SE3(rotation=quat2rotmat(pose_['rotation']), translation=np.array(pose_['translation']))
timestamp = int(sweep_path.split('/')[-1].split('_')[1][:-4])
if pose is None:
return None
bundle = PlyWithPose(ply, pose, timestamp)
sweeps.append(bundle)
return sweeps
if __name__ == "__main__":
dataset_path = '/dataset/argo/argoverse-tracking' # NOTE: path to the original dataset
# ANCHOR: hyperparameters
remove_ground = True
get_gt_tracks = True
partition = "val"
max_correspondence_distance = 1.0
dataset = PreProcessArgoverseDataset(
dataset_path=dataset_path,
partition=partition, # 'train' or 'val' only
remove_ground=remove_ground,
get_gt_tracks=get_gt_tracks,
max_correspondence_distance=max_correspondence_distance,
)
for data in dataset:
print('start preprocessing scene flow dataset') | 11,330 | 40.811808 | 160 | py |
LSMOL | LSMOL-main/model_training/train_det_2d_class.py | import logging
import os
from collections import OrderedDict
import torch
import detectron2.utils.comm as comm
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.data import MetadataCatalog
from detectron2.data import DatasetCatalog
from detectron2.data.datasets import load_coco_json
from detectron2.data import detection_utils as utils
from detectron2.data import transforms as T
from detectron2.data import build_detection_test_loader,build_detection_train_loader
from detectron2.data import DatasetMapper
import copy
from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, hooks, launch
from detectron2.evaluation import (
CityscapesInstanceEvaluator,
CityscapesSemSegEvaluator,
COCOEvaluator,
COCOPanopticEvaluator,
DatasetEvaluators,
LVISEvaluator,
PascalVOCDetectionEvaluator,
SemSegEvaluator,
verify_results,
)
from detectron2.modeling import GeneralizedRCNNWithTTA
def build_evaluator(cfg, dataset_name, output_folder=None):
"""
Create evaluator(s) for a given dataset.
This uses the special metadata "evaluator_type" associated with each builtin dataset.
For your own dataset, you can simply create an evaluator manually in your
script and do not have to worry about the hacky if-else logic here.
"""
if output_folder is None:
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
evaluator_list = []
evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
if evaluator_type in ["sem_seg", "coco_panoptic_seg"]:
evaluator_list.append(
SemSegEvaluator(
dataset_name,
distributed=True,
output_dir=output_folder,
)
)
if evaluator_type in ["coco", "coco_panoptic_seg"]:
evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder))
if evaluator_type == "coco_panoptic_seg":
evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder))
if evaluator_type == "cityscapes_instance":
assert (
torch.cuda.device_count() > comm.get_rank()
), "CityscapesEvaluator currently do not work with multiple machines."
return CityscapesInstanceEvaluator(dataset_name)
if evaluator_type == "cityscapes_sem_seg":
assert (
torch.cuda.device_count() > comm.get_rank()
), "CityscapesEvaluator currently do not work with multiple machines."
return CityscapesSemSegEvaluator(dataset_name)
elif evaluator_type == "pascal_voc":
return PascalVOCDetectionEvaluator(dataset_name)
elif evaluator_type == "lvis":
return LVISEvaluator(dataset_name, output_dir=output_folder)
if len(evaluator_list) == 0:
raise NotImplementedError(
"no Evaluator for the dataset {} with the type {}".format(dataset_name, evaluator_type)
)
elif len(evaluator_list) == 1:
return evaluator_list[0]
return DatasetEvaluators(evaluator_list)
class Trainer(DefaultTrainer):
@classmethod
def build_evaluator(cls, cfg, dataset_name):
output_folder = os.path.join(cfg.OUTPUT_DIR)
evaluators = [COCOEvaluator(dataset_name, cfg, True, output_folder)]
return DatasetEvaluators(evaluators)
@classmethod
def build_test_loader(cls, cfg, dataset_name):
return build_detection_test_loader(cfg, dataset_name, mapper=DatasetMapper(cfg, False))
def setup(args):
"""
Create configs and perform basic setups.
"""
BaseFolder='/data/yuqi_wang/waymo'
TrainFolder = '/data/yuqi_wang/waymo_range'
ValFolder = '/data/yuqi_wang/waymo/new_val'
FULL_LABEL_CLASSES = ['unknown', 'vehicle', 'pedestrian', 'cyclist']
for d in ['train']:
DatasetCatalog.register('waymo_'+d,lambda d=d: load_coco_json(os.path.join(BaseFolder,'pseudo_anno',d+'_annotations_sf_3class.json'),TrainFolder))
MetadataCatalog.get('waymo_'+d).set(thing_classes=FULL_LABEL_CLASSES)
for d in ['val']:
DatasetCatalog.register('waymo_'+d,lambda d=d: load_coco_json(os.path.join(BaseFolder,'pseudo_anno',d+'_annotations_class.json'),ValFolder))
MetadataCatalog.get('waymo_'+d).set(thing_classes=FULL_LABEL_CLASSES)
cfg = get_cfg()
cfg.OUTPUT_DIR='../log_sf_class'
cfg.MODEL.ROI_HEADS.NUM_CLASSES = len(FULL_LABEL_CLASSES)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(cfg, args)
return cfg
def main(args):
cfg = setup(args)
if args.eval_only:
model = Trainer.build_model(cfg)
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
cfg.MODEL.WEIGHTS, resume=args.resume
)
res = Trainer.test(cfg, model)
if cfg.TEST.AUG.ENABLED:
res.update(Trainer.test_with_TTA(cfg, model))
if comm.is_main_process():
verify_results(cfg, res)
return res
"""
If you'd like to do anything fancier than the standard training logic,
consider writing your own training loop (see plain_train_net.py) or
subclassing the trainer.
"""
trainer = Trainer(cfg)
trainer.resume_or_load(resume=args.resume)
if cfg.TEST.AUG.ENABLED:
trainer.register_hooks(
[hooks.EvalHook(0, lambda: trainer.test_with_TTA(cfg, trainer.model))]
)
return trainer.train()
if __name__ == "__main__":
args = default_argument_parser().parse_args()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
| 5,795 | 35.917197 | 154 | py |
LSMOL | LSMOL-main/model_training/train_det_2d.py | import logging
import os
from collections import OrderedDict
import torch
import detectron2.utils.comm as comm
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.data import MetadataCatalog
from detectron2.data import DatasetCatalog
from detectron2.data.datasets import load_coco_json
from detectron2.data import detection_utils as utils
from detectron2.data import transforms as T
from detectron2.data import build_detection_test_loader,build_detection_train_loader
from detectron2.data import DatasetMapper
import copy
from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, hooks, launch
from detectron2.evaluation import (
CityscapesInstanceEvaluator,
CityscapesSemSegEvaluator,
COCOEvaluator,
COCOPanopticEvaluator,
DatasetEvaluators,
LVISEvaluator,
PascalVOCDetectionEvaluator,
SemSegEvaluator,
verify_results,
)
from detectron2.modeling import GeneralizedRCNNWithTTA
def build_evaluator(cfg, dataset_name, output_folder=None):
"""
Create evaluator(s) for a given dataset.
This uses the special metadata "evaluator_type" associated with each builtin dataset.
For your own dataset, you can simply create an evaluator manually in your
script and do not have to worry about the hacky if-else logic here.
"""
if output_folder is None:
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
evaluator_list = []
evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
if evaluator_type in ["sem_seg", "coco_panoptic_seg"]:
evaluator_list.append(
SemSegEvaluator(
dataset_name,
distributed=True,
output_dir=output_folder,
)
)
if evaluator_type in ["coco", "coco_panoptic_seg"]:
evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder))
if evaluator_type == "coco_panoptic_seg":
evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder))
if evaluator_type == "cityscapes_instance":
assert (
torch.cuda.device_count() > comm.get_rank()
), "CityscapesEvaluator currently do not work with multiple machines."
return CityscapesInstanceEvaluator(dataset_name)
if evaluator_type == "cityscapes_sem_seg":
assert (
torch.cuda.device_count() > comm.get_rank()
), "CityscapesEvaluator currently do not work with multiple machines."
return CityscapesSemSegEvaluator(dataset_name)
elif evaluator_type == "pascal_voc":
return PascalVOCDetectionEvaluator(dataset_name)
elif evaluator_type == "lvis":
return LVISEvaluator(dataset_name, output_dir=output_folder)
if len(evaluator_list) == 0:
raise NotImplementedError(
"no Evaluator for the dataset {} with the type {}".format(dataset_name, evaluator_type)
)
elif len(evaluator_list) == 1:
return evaluator_list[0]
return DatasetEvaluators(evaluator_list)
class Trainer(DefaultTrainer):
@classmethod
def build_evaluator(cls, cfg, dataset_name):
output_folder = os.path.join(cfg.OUTPUT_DIR)
evaluators = [COCOEvaluator(dataset_name, cfg, True, output_folder)]
return DatasetEvaluators(evaluators)
@classmethod
def build_test_loader(cls, cfg, dataset_name):
return build_detection_test_loader(cfg, dataset_name, mapper=DatasetMapper(cfg, False))
def setup(args):
"""
Create configs and perform basic setups.
"""
BaseFolder='/data/yuqi_wang/waymo'
# TrainFolder = '/data/yuqi_wang/waymo_range'
# TrainFolder = '/data/yuqi_wang/waymo/train_visible'
TrainFolder = '/data/yuqi_wang/waymo/train_visible_few'
ValFolder = '/data/yuqi_wang/waymo/new_val'
FULL_LABEL_CLASSES = ['unknown', 'object']
for d in ['train']:
DatasetCatalog.register('waymo_'+d,lambda d=d: load_coco_json(os.path.join(BaseFolder,'pseudo_anno',d+'_vis_few_annotations.json'),TrainFolder))
MetadataCatalog.get('waymo_'+d).set(thing_classes=FULL_LABEL_CLASSES)
for d in ['val']:
DatasetCatalog.register('waymo_'+d,lambda d=d: load_coco_json(os.path.join(BaseFolder,'pseudo_anno',d+'_annotations.json'),ValFolder))
MetadataCatalog.get('waymo_'+d).set(thing_classes=FULL_LABEL_CLASSES)
cfg = get_cfg()
cfg.OUTPUT_DIR='../log_gt_mini'
cfg.MODEL.ROI_HEADS.NUM_CLASSES = len(FULL_LABEL_CLASSES)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(cfg, args)
return cfg
def main(args):
cfg = setup(args)
if args.eval_only:
model = Trainer.build_model(cfg)
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
cfg.MODEL.WEIGHTS, resume=args.resume
)
res = Trainer.test(cfg, model)
if cfg.TEST.AUG.ENABLED:
res.update(Trainer.test_with_TTA(cfg, model))
if comm.is_main_process():
verify_results(cfg, res)
return res
"""
If you'd like to do anything fancier than the standard training logic,
consider writing your own training loop (see plain_train_net.py) or
subclassing the trainer.
"""
trainer = Trainer(cfg)
trainer.resume_or_load(resume=args.resume)
if cfg.TEST.AUG.ENABLED:
trainer.register_hooks(
[hooks.EvalHook(0, lambda: trainer.test_with_TTA(cfg, trainer.model))]
)
return trainer.train()
if __name__ == "__main__":
args = default_argument_parser().parse_args()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
| 5,880 | 35.987421 | 152 | py |
SSKD | SSKD-master/teacher.py | import os
import os.path as osp
import argparse
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim.lr_scheduler import MultiStepLR
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
from torchvision.datasets import CIFAR100
from tensorboardX import SummaryWriter
from utils import AverageMeter, accuracy
from models import model_dict
torch.backends.cudnn.benchmark = True
parser = argparse.ArgumentParser(description='train teacher network.')
parser.add_argument('--epoch', type=int, default=240)
parser.add_argument('--batch-size', type=int, default=64)
parser.add_argument('--lr', type=float, default=0.05)
parser.add_argument('--momentum', type=float, default=0.9)
parser.add_argument('--weight-decay', type=float, default=5e-4)
parser.add_argument('--gamma', type=float, default=0.1)
parser.add_argument('--milestones', type=int, nargs='+', default=[150,180,210])
parser.add_argument('--save-interval', type=int, default=40)
parser.add_argument('--arch', type=str)
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--gpu-id', type=int, default=0)
args = parser.parse_args()
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
np.random.seed(args.seed)
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu_id)
exp_name = 'teacher_{}_seed{}'.format(args.arch, args.seed)
exp_path = './experiments/{}'.format(exp_name)
os.makedirs(exp_path, exist_ok=True)
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5071, 0.4866, 0.4409], std=[0.2675, 0.2565, 0.2761]),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.5071, 0.4866, 0.4409], std=[0.2675, 0.2565, 0.2761]),
])
trainset = CIFAR100('./data', train=True, transform=transform_train, download=True)
valset = CIFAR100('./data', train=False, transform=transform_test, download=True)
train_loader = DataLoader(trainset, batch_size=args.batch_size, shuffle=True, num_workers=4, pin_memory=False)
val_loader = DataLoader(valset, batch_size=args.batch_size, shuffle=False, num_workers=4, pin_memory=False)
model = model_dict[args.arch](num_classes=100).cuda()
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
scheduler = MultiStepLR(optimizer, milestones=args.milestones, gamma=args.gamma)
logger = SummaryWriter(osp.join(exp_path, 'events'))
best_acc = -1
for epoch in range(args.epoch):
model.train()
loss_record = AverageMeter()
acc_record = AverageMeter()
start = time.time()
for x, target in train_loader:
optimizer.zero_grad()
x = x.cuda()
target = target.cuda()
output = model(x)
loss = F.cross_entropy(output, target)
loss.backward()
optimizer.step()
batch_acc = accuracy(output, target, topk=(1,))[0]
loss_record.update(loss.item(), x.size(0))
acc_record.update(batch_acc.item(), x.size(0))
logger.add_scalar('train/cls_loss', loss_record.avg, epoch+1)
logger.add_scalar('train/cls_acc', acc_record.avg, epoch+1)
run_time = time.time() - start
info = 'train_Epoch:{:03d}/{:03d}\t run_time:{:.3f}\t cls_loss:{:.3f}\t cls_acc:{:.2f}\t'.format(
epoch+1, args.epoch, run_time, loss_record.avg, acc_record.avg)
print(info)
model.eval()
acc_record = AverageMeter()
loss_record = AverageMeter()
start = time.time()
for x, target in val_loader:
x = x.cuda()
target = target.cuda()
with torch.no_grad():
output = model(x)
loss = F.cross_entropy(output, target)
batch_acc = accuracy(output, target, topk=(1,))[0]
loss_record.update(loss.item(), x.size(0))
acc_record.update(batch_acc.item(), x.size(0))
run_time = time.time() - start
logger.add_scalar('val/cls_loss', loss_record.avg, epoch+1)
logger.add_scalar('val/cls_acc', acc_record.avg, epoch+1)
info = 'test_Epoch:{:03d}/{:03d}\t run_time:{:.2f}\t cls_loss:{:.3f}\t cls_acc:{:.2f}\n'.format(
epoch+1, args.epoch, run_time, loss_record.avg, acc_record.avg)
print(info)
scheduler.step()
# save checkpoint
if (epoch+1) in args.milestones or epoch+1==args.epoch or (epoch+1)%args.save_interval==0:
state_dict = dict(epoch=epoch+1, state_dict=model.state_dict(), acc=acc_record.avg)
name = osp.join(exp_path, 'ckpt/{:03d}.pth'.format(epoch+1))
os.makedirs(osp.dirname(name), exist_ok=True)
torch.save(state_dict, name)
# save best
if acc_record.avg > best_acc:
state_dict = dict(epoch=epoch+1, state_dict=model.state_dict(), acc=acc_record.avg)
name = osp.join(exp_path, 'ckpt/best.pth')
os.makedirs(osp.dirname(name), exist_ok=True)
torch.save(state_dict, name)
best_acc = acc_record.avg
print('best_acc: {:.2f}'.format(best_acc))
| 5,112 | 34.020548 | 110 | py |
SSKD | SSKD-master/student.py | import os
import os.path as osp
import argparse
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim.lr_scheduler import MultiStepLR
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
from tensorboardX import SummaryWriter
from utils import AverageMeter, accuracy
from wrapper import wrapper
from cifar import CIFAR100
from models import model_dict
torch.backends.cudnn.benchmark = True
parser = argparse.ArgumentParser(description='train SSKD student network.')
parser.add_argument('--epoch', type=int, default=240)
parser.add_argument('--t-epoch', type=int, default=60)
parser.add_argument('--batch-size', type=int, default=64)
parser.add_argument('--lr', type=float, default=0.05)
parser.add_argument('--t-lr', type=float, default=0.05)
parser.add_argument('--momentum', type=float, default=0.9)
parser.add_argument('--weight-decay', type=float, default=5e-4)
parser.add_argument('--gamma', type=float, default=0.1)
parser.add_argument('--milestones', type=int, nargs='+', default=[150,180,210])
parser.add_argument('--t-milestones', type=int, nargs='+', default=[30,45])
parser.add_argument('--save-interval', type=int, default=40)
parser.add_argument('--ce-weight', type=float, default=0.1) # cross-entropy
parser.add_argument('--kd-weight', type=float, default=0.9) # knowledge distillation
parser.add_argument('--tf-weight', type=float, default=2.7) # transformation
parser.add_argument('--ss-weight', type=float, default=10.0) # self-supervision
parser.add_argument('--kd-T', type=float, default=4.0) # temperature in KD
parser.add_argument('--tf-T', type=float, default=4.0) # temperature in LT
parser.add_argument('--ss-T', type=float, default=0.5) # temperature in SS
parser.add_argument('--ratio-tf', type=float, default=1.0) # keep how many wrong predictions of LT
parser.add_argument('--ratio-ss', type=float, default=0.75) # keep how many wrong predictions of SS
parser.add_argument('--s-arch', type=str) # student architecture
parser.add_argument('--t-path', type=str) # teacher checkpoint path
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--gpu-id', type=int, default=0)
args = parser.parse_args()
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
np.random.seed(args.seed)
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu_id)
t_name = osp.abspath(args.t_path).split('/')[-1]
t_arch = '_'.join(t_name.split('_')[1:-1])
exp_name = 'sskd_student_{}_weight{}+{}+{}+{}_T{}+{}+{}_ratio{}+{}_seed{}_{}'.format(\
args.s_arch, \
args.ce_weight, args.kd_weight, args.tf_weight, args.ss_weight, \
args.kd_T, args.tf_T, args.ss_T, \
args.ratio_tf, args.ratio_ss, \
args.seed, t_name)
exp_path = './experiments/{}'.format(exp_name)
os.makedirs(exp_path, exist_ok=True)
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5071, 0.4866, 0.4409], std=[0.2675, 0.2565, 0.2761]),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.5071, 0.4866, 0.4409], std=[0.2675, 0.2565, 0.2761]),
])
trainset = CIFAR100('./data', train=True, transform=transform_train)
valset = CIFAR100('./data', train=False, transform=transform_test)
train_loader = DataLoader(trainset, batch_size=args.batch_size, shuffle=True, num_workers=4, pin_memory=False)
val_loader = DataLoader(valset, batch_size=args.batch_size, shuffle=False, num_workers=4, pin_memory=False)
ckpt_path = osp.join(args.t_path, 'ckpt/best.pth')
t_model = model_dict[t_arch](num_classes=100).cuda()
state_dict = torch.load(ckpt_path)['state_dict']
t_model.load_state_dict(state_dict)
t_model = wrapper(module=t_model).cuda()
t_optimizer = optim.SGD([{'params':t_model.backbone.parameters(), 'lr':0.0},
{'params':t_model.proj_head.parameters(), 'lr':args.t_lr}],
momentum=args.momentum, weight_decay=args.weight_decay)
t_model.eval()
t_scheduler = MultiStepLR(t_optimizer, milestones=args.t_milestones, gamma=args.gamma)
logger = SummaryWriter(osp.join(exp_path, 'events'))
acc_record = AverageMeter()
loss_record = AverageMeter()
start = time.time()
for x, target in val_loader:
x = x[:,0,:,:,:].cuda()
target = target.cuda()
with torch.no_grad():
output, _, feat = t_model(x)
loss = F.cross_entropy(output, target)
batch_acc = accuracy(output, target, topk=(1,))[0]
acc_record.update(batch_acc.item(), x.size(0))
loss_record.update(loss.item(), x.size(0))
run_time = time.time() - start
info = 'teacher cls_acc:{:.2f}\n'.format(acc_record.avg)
print(info)
# train ssp_head
for epoch in range(args.t_epoch):
t_model.eval()
loss_record = AverageMeter()
acc_record = AverageMeter()
start = time.time()
for x, _ in train_loader:
t_optimizer.zero_grad()
x = x.cuda()
c,h,w = x.size()[-3:]
x = x.view(-1, c, h, w)
_, rep, feat = t_model(x, bb_grad=False)
batch = int(x.size(0) / 4)
nor_index = (torch.arange(4*batch) % 4 == 0).cuda()
aug_index = (torch.arange(4*batch) % 4 != 0).cuda()
nor_rep = rep[nor_index]
aug_rep = rep[aug_index]
nor_rep = nor_rep.unsqueeze(2).expand(-1,-1,3*batch).transpose(0,2)
aug_rep = aug_rep.unsqueeze(2).expand(-1,-1,1*batch)
simi = F.cosine_similarity(aug_rep, nor_rep, dim=1)
target = torch.arange(batch).unsqueeze(1).expand(-1,3).contiguous().view(-1).long().cuda()
loss = F.cross_entropy(simi, target)
loss.backward()
t_optimizer.step()
batch_acc = accuracy(simi, target, topk=(1,))[0]
loss_record.update(loss.item(), 3*batch)
acc_record.update(batch_acc.item(), 3*batch)
logger.add_scalar('train/teacher_ssp_loss', loss_record.avg, epoch+1)
logger.add_scalar('train/teacher_ssp_acc', acc_record.avg, epoch+1)
run_time = time.time() - start
info = 'teacher_train_Epoch:{:03d}/{:03d}\t run_time:{:.3f}\t ssp_loss:{:.3f}\t ssp_acc:{:.2f}\t'.format(
epoch+1, args.t_epoch, run_time, loss_record.avg, acc_record.avg)
print(info)
t_model.eval()
acc_record = AverageMeter()
loss_record = AverageMeter()
start = time.time()
for x, _ in val_loader:
x = x.cuda()
c,h,w = x.size()[-3:]
x = x.view(-1, c, h, w)
with torch.no_grad():
_, rep, feat = t_model(x)
batch = int(x.size(0) / 4)
nor_index = (torch.arange(4*batch) % 4 == 0).cuda()
aug_index = (torch.arange(4*batch) % 4 != 0).cuda()
nor_rep = rep[nor_index]
aug_rep = rep[aug_index]
nor_rep = nor_rep.unsqueeze(2).expand(-1,-1,3*batch).transpose(0,2)
aug_rep = aug_rep.unsqueeze(2).expand(-1,-1,1*batch)
simi = F.cosine_similarity(aug_rep, nor_rep, dim=1)
target = torch.arange(batch).unsqueeze(1).expand(-1,3).contiguous().view(-1).long().cuda()
loss = F.cross_entropy(simi, target)
batch_acc = accuracy(simi, target, topk=(1,))[0]
acc_record.update(batch_acc.item(),3*batch)
loss_record.update(loss.item(), 3*batch)
run_time = time.time() - start
logger.add_scalar('val/teacher_ssp_loss', loss_record.avg, epoch+1)
logger.add_scalar('val/teacher_ssp_acc', acc_record.avg, epoch+1)
info = 'ssp_test_Epoch:{:03d}/{:03d}\t run_time:{:.2f}\t ssp_loss:{:.3f}\t ssp_acc:{:.2f}\n'.format(
epoch+1, args.t_epoch, run_time, loss_record.avg, acc_record.avg)
print(info)
t_scheduler.step()
name = osp.join(exp_path, 'ckpt/teacher.pth')
os.makedirs(osp.dirname(name), exist_ok=True)
torch.save(t_model.state_dict(), name)
s_model = model_dict[args.s_arch](num_classes=100)
s_model = wrapper(module=s_model).cuda()
optimizer = optim.SGD(s_model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
scheduler = MultiStepLR(optimizer, milestones=args.milestones, gamma=args.gamma)
best_acc = 0
for epoch in range(args.epoch):
# train
s_model.train()
loss1_record = AverageMeter()
loss2_record = AverageMeter()
loss3_record = AverageMeter()
loss4_record = AverageMeter()
cls_acc_record = AverageMeter()
ssp_acc_record = AverageMeter()
start = time.time()
for x, target in train_loader:
optimizer.zero_grad()
c,h,w = x.size()[-3:]
x = x.view(-1,c,h,w).cuda()
target = target.cuda()
batch = int(x.size(0) / 4)
nor_index = (torch.arange(4*batch) % 4 == 0).cuda()
aug_index = (torch.arange(4*batch) % 4 != 0).cuda()
output, s_feat, _ = s_model(x, bb_grad=True)
log_nor_output = F.log_softmax(output[nor_index] / args.kd_T, dim=1)
log_aug_output = F.log_softmax(output[aug_index] / args.tf_T, dim=1)
with torch.no_grad():
knowledge, t_feat, _ = t_model(x)
nor_knowledge = F.softmax(knowledge[nor_index] / args.kd_T, dim=1)
aug_knowledge = F.softmax(knowledge[aug_index] / args.tf_T, dim=1)
# error level ranking
aug_target = target.unsqueeze(1).expand(-1,3).contiguous().view(-1).long().cuda()
rank = torch.argsort(aug_knowledge, dim=1, descending=True)
rank = torch.argmax(torch.eq(rank, aug_target.unsqueeze(1)).long(), dim=1) # groundtruth label's rank
index = torch.argsort(rank)
tmp = torch.nonzero(rank, as_tuple=True)[0]
wrong_num = tmp.numel()
correct_num = 3*batch - wrong_num
wrong_keep = int(wrong_num * args.ratio_tf)
index = index[:correct_num+wrong_keep]
distill_index_tf = torch.sort(index)[0]
s_nor_feat = s_feat[nor_index]
s_aug_feat = s_feat[aug_index]
s_nor_feat = s_nor_feat.unsqueeze(2).expand(-1,-1,3*batch).transpose(0,2)
s_aug_feat = s_aug_feat.unsqueeze(2).expand(-1,-1,1*batch)
s_simi = F.cosine_similarity(s_aug_feat, s_nor_feat, dim=1)
t_nor_feat = t_feat[nor_index]
t_aug_feat = t_feat[aug_index]
t_nor_feat = t_nor_feat.unsqueeze(2).expand(-1,-1,3*batch).transpose(0,2)
t_aug_feat = t_aug_feat.unsqueeze(2).expand(-1,-1,1*batch)
t_simi = F.cosine_similarity(t_aug_feat, t_nor_feat, dim=1)
t_simi = t_simi.detach()
aug_target = torch.arange(batch).unsqueeze(1).expand(-1,3).contiguous().view(-1).long().cuda()
rank = torch.argsort(t_simi, dim=1, descending=True)
rank = torch.argmax(torch.eq(rank, aug_target.unsqueeze(1)).long(), dim=1) # groundtruth label's rank
index = torch.argsort(rank)
tmp = torch.nonzero(rank, as_tuple=True)[0]
wrong_num = tmp.numel()
correct_num = 3*batch - wrong_num
wrong_keep = int(wrong_num * args.ratio_ss)
index = index[:correct_num+wrong_keep]
distill_index_ss = torch.sort(index)[0]
log_simi = F.log_softmax(s_simi / args.ss_T, dim=1)
simi_knowledge = F.softmax(t_simi / args.ss_T, dim=1)
loss1 = F.cross_entropy(output[nor_index], target)
loss2 = F.kl_div(log_nor_output, nor_knowledge, reduction='batchmean') * args.kd_T * args.kd_T
loss3 = F.kl_div(log_aug_output[distill_index_tf], aug_knowledge[distill_index_tf], \
reduction='batchmean') * args.tf_T * args.tf_T
loss4 = F.kl_div(log_simi[distill_index_ss], simi_knowledge[distill_index_ss], \
reduction='batchmean') * args.ss_T * args.ss_T
loss = args.ce_weight * loss1 + args.kd_weight * loss2 + args.tf_weight * loss3 + args.ss_weight * loss4
loss.backward()
optimizer.step()
cls_batch_acc = accuracy(output[nor_index], target, topk=(1,))[0]
ssp_batch_acc = accuracy(s_simi, aug_target, topk=(1,))[0]
loss1_record.update(loss1.item(), batch)
loss2_record.update(loss2.item(), batch)
loss3_record.update(loss3.item(), len(distill_index_tf))
loss4_record.update(loss4.item(), len(distill_index_ss))
cls_acc_record.update(cls_batch_acc.item(), batch)
ssp_acc_record.update(ssp_batch_acc.item(), 3*batch)
logger.add_scalar('train/ce_loss', loss1_record.avg, epoch+1)
logger.add_scalar('train/kd_loss', loss2_record.avg, epoch+1)
logger.add_scalar('train/tf_loss', loss3_record.avg, epoch+1)
logger.add_scalar('train/ss_loss', loss4_record.avg, epoch+1)
logger.add_scalar('train/cls_acc', cls_acc_record.avg, epoch+1)
logger.add_scalar('train/ss_acc', ssp_acc_record.avg, epoch+1)
run_time = time.time() - start
info = 'student_train_Epoch:{:03d}/{:03d}\t run_time:{:.3f}\t ce_loss:{:.3f}\t kd_loss:{:.3f}\t cls_acc:{:.2f}'.format(
epoch+1, args.epoch, run_time, loss1_record.avg, loss2_record.avg, cls_acc_record.avg)
print(info)
# cls val
s_model.eval()
acc_record = AverageMeter()
loss_record = AverageMeter()
start = time.time()
for x, target in val_loader:
x = x[:,0,:,:,:].cuda()
target = target.cuda()
with torch.no_grad():
output, _, feat = s_model(x)
loss = F.cross_entropy(output, target)
batch_acc = accuracy(output, target, topk=(1,))[0]
acc_record.update(batch_acc.item(), x.size(0))
loss_record.update(loss.item(), x.size(0))
run_time = time.time() - start
logger.add_scalar('val/ce_loss', loss_record.avg, epoch+1)
logger.add_scalar('val/cls_acc', acc_record.avg, epoch+1)
info = 'student_test_Epoch:{:03d}/{:03d}\t run_time:{:.2f}\t cls_acc:{:.2f}\n'.format(
epoch+1, args.epoch, run_time, acc_record.avg)
print(info)
if acc_record.avg > best_acc:
best_acc = acc_record.avg
state_dict = dict(epoch=epoch+1, state_dict=s_model.state_dict(), best_acc=best_acc)
name = osp.join(exp_path, 'ckpt/student_best.pth')
os.makedirs(osp.dirname(name), exist_ok=True)
torch.save(state_dict, name)
scheduler.step()
| 14,139 | 38.830986 | 123 | py |
SSKD | SSKD-master/wrapper.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class wrapper(nn.Module):
def __init__(self, module):
super(wrapper, self).__init__()
self.backbone = module
feat_dim = list(module.children())[-1].in_features
self.proj_head = nn.Sequential(
nn.Linear(feat_dim, feat_dim),
nn.ReLU(inplace=True),
nn.Linear(feat_dim, feat_dim)
)
def forward(self, x, bb_grad=True):
feats, out = self.backbone(x, is_feat=True)
feat = feats[-1].view(feats[-1].size(0), -1)
if not bb_grad:
feat = feat.detach()
return out, self.proj_head(feat), feat
| 702 | 24.107143 | 58 | py |
SSKD | SSKD-master/utils.py | import os
import logging
import numpy as np
import torch
from torch.nn import init
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.count = 0
self.sum = 0.0
self.val = 0.0
self.avg = 0.0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def norm(x):
n = np.linalg.norm(x)
return x / n
| 1,010 | 21.977273 | 69 | py |
SSKD | SSKD-master/cifar.py | from __future__ import print_function
from PIL import Image
import os
import os.path
import numpy as np
import sys
import pickle
import torch
import torch.utils.data as data
from itertools import permutations
class VisionDataset(data.Dataset):
_repr_indent = 4
def __init__(self, root, transforms=None, transform=None, target_transform=None):
if isinstance(root, torch._six.string_classes):
root = os.path.expanduser(root)
self.root = root
has_transforms = transforms is not None
has_separate_transform = transform is not None or target_transform is not None
if has_transforms and has_separate_transform:
raise ValueError("Only transforms or transform/target_transform can "
"be passed as argument")
# for backwards-compatibility
self.transform = transform
self.target_transform = target_transform
if has_separate_transform:
transforms = StandardTransform(transform, target_transform)
self.transforms = transforms
def __getitem__(self, index):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
def __repr__(self):
head = "Dataset " + self.__class__.__name__
body = ["Number of datapoints: {}".format(self.__len__())]
if self.root is not None:
body.append("Root location: {}".format(self.root))
body += self.extra_repr().splitlines()
if self.transforms is not None:
body += [repr(self.transforms)]
lines = [head] + [" " * self._repr_indent + line for line in body]
return '\n'.join(lines)
def _format_transform_repr(self, transform, head):
lines = transform.__repr__().splitlines()
return (["{}{}".format(head, lines[0])] +
["{}{}".format(" " * len(head), line) for line in lines[1:]])
def extra_repr(self):
return ""
class CIFAR10(VisionDataset):
base_folder = 'cifar-10-batches-py'
url = "https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"
filename = "cifar-10-python.tar.gz"
tgz_md5 = 'c58f30108f718f92721af3b95e74349a'
train_list = [
['data_batch_1', 'c99cafc152244af753f735de768cd75f'],
['data_batch_2', 'd4bba439e000b95fd0a9bffe97cbabec'],
['data_batch_3', '54ebc095f3ab1f0389bbae665268c751'],
['data_batch_4', '634d18415352ddfa80567beed471001a'],
['data_batch_5', '482c414d41f54cd18b22e5b47cb7c3cb'],
]
test_list = [
['test_batch', '40351d587109b95175f43aff81a1287e'],
]
meta = {
'filename': 'batches.meta',
'key': 'label_names',
'md5': '5ff9c542aee3614f3951f8cda6e48888',
}
def __init__(self, root, train=True,
transform=None, download=False):
super(CIFAR10, self).__init__(root)
self.transform = transform
self.train = train # training set or test set
if download:
raise ValueError('cannot download.')
exit()
#self.download()
#if not self._check_integrity():
# raise RuntimeError('Dataset not found or corrupted.' +
# ' You can use download=True to download it')
if self.train:
downloaded_list = self.train_list
else:
downloaded_list = self.test_list
self.data = []
self.targets = []
# now load the picked numpy arrays
for file_name, checksum in downloaded_list:
file_path = os.path.join(self.root, self.base_folder, file_name)
with open(file_path, 'rb') as f:
if sys.version_info[0] == 2:
entry = pickle.load(f)
else:
entry = pickle.load(f, encoding='latin1')
self.data.append(entry['data'])
if 'labels' in entry:
self.targets.extend(entry['labels'])
else:
self.targets.extend(entry['fine_labels'])
self.data = np.vstack(self.data).reshape(-1, 3, 32, 32)
self.data = self.data.transpose((0, 2, 3, 1)) # convert to HWC
self._load_meta()
def _load_meta(self):
path = os.path.join(self.root, self.base_folder, self.meta['filename'])
#if not check_integrity(path, self.meta['md5']):
# raise RuntimeError('Dataset metadata file not found or corrupted.' +
# ' You can use download=True to download it')
with open(path, 'rb') as infile:
if sys.version_info[0] == 2:
data = pickle.load(infile)
else:
data = pickle.load(infile, encoding='latin1')
self.classes = data[self.meta['key']]
self.class_to_idx = {_class: i for i, _class in enumerate(self.classes)}
def __getitem__(self, index):
img, target = self.data[index], self.targets[index]
if self.train:
if np.random.rand() < 0.5:
img = img[:,::-1,:]
img0 = np.rot90(img, 0).copy()
img0 = Image.fromarray(img0)
img0 = self.transform(img0)
img1 = np.rot90(img, 1).copy()
img1 = Image.fromarray(img1)
img1 = self.transform(img1)
img2 = np.rot90(img, 2).copy()
img2 = Image.fromarray(img2)
img2 = self.transform(img2)
img3 = np.rot90(img, 3).copy()
img3 = Image.fromarray(img3)
img3 = self.transform(img3)
img = torch.stack([img0,img1,img2,img3])
return img, target
def __len__(self):
return len(self.data)
def _check_integrity(self):
root = self.root
for fentry in (self.train_list + self.test_list):
filename, md5 = fentry[0], fentry[1]
fpath = os.path.join(root, self.base_folder, filename)
if not check_integrity(fpath, md5):
return False
return True
def download(self):
import tarfile
if self._check_integrity():
print('Files already downloaded and verified')
return
download_url(self.url, self.root, self.filename, self.tgz_md5)
# extract file
with tarfile.open(os.path.join(self.root, self.filename), "r:gz") as tar:
tar.extractall(path=self.root)
def extra_repr(self):
return "Split: {}".format("Train" if self.train is True else "Test")
class CIFAR100(CIFAR10):
"""`CIFAR100 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ Dataset.
This is a subclass of the `CIFAR10` Dataset.
"""
base_folder = 'cifar-100-python'
url = "https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz"
filename = "cifar-100-python.tar.gz"
tgz_md5 = 'eb9058c3a382ffc7106e4002c42a8d85'
train_list = [
['train', '16019d7e3df5f24257cddd939b257f8d'],
]
test_list = [
['test', 'f0ef6b0ae62326f3e7ffdfab6717acfc'],
]
meta = {
'filename': 'meta',
'key': 'fine_label_names',
'md5': '7973b15100ade9c7d40fb424638fde48',
}
| 7,139 | 31.752294 | 86 | py |
SSKD | SSKD-master/models/resnet.py | from __future__ import absolute_import
'''Resnet for cifar dataset.
Ported form
https://github.com/facebook/fb.resnet.torch
and
https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
(c) YANG, Wei
'''
import torch.nn as nn
import torch.nn.functional as F
import math
__all__ = ['resnet']
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, is_last=False):
super(BasicBlock, self).__init__()
self.is_last = is_last
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
preact = out
out = F.relu(out)
if self.is_last:
return out, preact
else:
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, is_last=False):
super(Bottleneck, self).__init__()
self.is_last = is_last
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
preact = out
out = F.relu(out)
if self.is_last:
return out, preact
else:
return out
class ResNet(nn.Module):
def __init__(self, depth, num_filters, block_name='BasicBlock', num_classes=10):
super(ResNet, self).__init__()
# Model type specifies number of layers for CIFAR-10 model
if block_name.lower() == 'basicblock':
assert (depth - 2) % 6 == 0, 'When use basicblock, depth should be 6n+2, e.g. 20, 32, 44, 56, 110, 1202'
n = (depth - 2) // 6
block = BasicBlock
elif block_name.lower() == 'bottleneck':
assert (depth - 2) % 9 == 0, 'When use bottleneck, depth should be 9n+2, e.g. 20, 29, 47, 56, 110, 1199'
n = (depth - 2) // 9
block = Bottleneck
else:
raise ValueError('block_name shoule be Basicblock or Bottleneck')
self.inplanes = num_filters[0]
self.conv1 = nn.Conv2d(3, num_filters[0], kernel_size=3, padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(num_filters[0])
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, num_filters[1], n)
self.layer2 = self._make_layer(block, num_filters[2], n, stride=2)
self.layer3 = self._make_layer(block, num_filters[3], n, stride=2)
self.avgpool = nn.AvgPool2d(8)
self.fc = nn.Linear(num_filters[3] * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = list([])
layers.append(block(self.inplanes, planes, stride, downsample, is_last=(blocks == 1)))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, is_last=(i == blocks-1)))
return nn.Sequential(*layers)
def get_feat_modules(self):
feat_m = nn.ModuleList([])
feat_m.append(self.conv1)
feat_m.append(self.bn1)
feat_m.append(self.relu)
feat_m.append(self.layer1)
feat_m.append(self.layer2)
feat_m.append(self.layer3)
return feat_m
def get_bn_before_relu(self):
if isinstance(self.layer1[0], Bottleneck):
bn1 = self.layer1[-1].bn3
bn2 = self.layer2[-1].bn3
bn3 = self.layer3[-1].bn3
elif isinstance(self.layer1[0], BasicBlock):
bn1 = self.layer1[-1].bn2
bn2 = self.layer2[-1].bn2
bn3 = self.layer3[-1].bn2
else:
raise NotImplementedError('ResNet unknown block error !!!')
return [bn1, bn2, bn3]
def forward(self, x, is_feat=False, preact=False):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x) # 32x32
f0 = x
x, f1_pre = self.layer1(x) # 32x32
f1 = x
x, f2_pre = self.layer2(x) # 16x16
f2 = x
x, f3_pre = self.layer3(x) # 8x8
f3 = x
x = self.avgpool(x)
x = x.view(x.size(0), -1)
f4 = x
x = self.fc(x)
if is_feat:
if preact:
return [f0, f1_pre, f2_pre, f3_pre, f4], x
else:
return [f0, f1, f2, f3, f4], x
else:
return x
def resnet8(**kwargs):
return ResNet(8, [16, 16, 32, 64], 'basicblock', **kwargs)
def resnet14(**kwargs):
return ResNet(14, [16, 16, 32, 64], 'basicblock', **kwargs)
def resnet20(**kwargs):
return ResNet(20, [16, 16, 32, 64], 'basicblock', **kwargs)
def resnet14x05(**kwargs):
return ResNet(14, [8, 8, 16, 32], 'basicblock', **kwargs)
def resnet20x05(**kwargs):
return ResNet(20, [8, 8, 16, 32], 'basicblock', **kwargs)
def resnet20x0375(**kwargs):
return ResNet(20, [6, 6, 12, 24], 'basicblock', **kwargs)
def resnet32(**kwargs):
return ResNet(32, [16, 16, 32, 64], 'basicblock', **kwargs)
def resnet44(**kwargs):
return ResNet(44, [16, 16, 32, 64], 'basicblock', **kwargs)
def resnet56(**kwargs):
return ResNet(56, [16, 16, 32, 64], 'basicblock', **kwargs)
def resnet110(**kwargs):
return ResNet(110, [16, 16, 32, 64], 'basicblock', **kwargs)
def resnet8x4(**kwargs):
return ResNet(8, [32, 64, 128, 256], 'basicblock', **kwargs)
def resnet32x4(**kwargs):
return ResNet(32, [32, 64, 128, 256], 'basicblock', **kwargs)
if __name__ == '__main__':
import torch
x = torch.randn(2, 3, 32, 32)
net = resnet8x4(num_classes=20)
feats, logit = net(x, is_feat=True, preact=True)
for f in feats:
print(f.shape, f.min().item())
print(logit.shape)
for m in net.get_bn_before_relu():
if isinstance(m, nn.BatchNorm2d):
print('pass')
else:
print('warning')
| 8,021 | 29.044944 | 116 | py |
SSKD | SSKD-master/models/mobilenetv2.py | """
MobileNetV2 implementation used in
<Knowledge Distillation via Route Constrained Optimization>
"""
import torch
import torch.nn as nn
import math
__all__ = ['mobilenetv2_T_w', 'mobile_half']
BN = None
def conv_bn(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU(inplace=True)
)
def conv_1x1_bn(inp, oup):
return nn.Sequential(
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU(inplace=True)
)
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super(InvertedResidual, self).__init__()
self.blockname = None
self.stride = stride
assert stride in [1, 2]
self.use_res_connect = self.stride == 1 and inp == oup
self.conv = nn.Sequential(
# pw
nn.Conv2d(inp, inp * expand_ratio, 1, 1, 0, bias=False),
nn.BatchNorm2d(inp * expand_ratio),
nn.ReLU(inplace=True),
# dw
nn.Conv2d(inp * expand_ratio, inp * expand_ratio, 3, stride, 1, groups=inp * expand_ratio, bias=False),
nn.BatchNorm2d(inp * expand_ratio),
nn.ReLU(inplace=True),
# pw-linear
nn.Conv2d(inp * expand_ratio, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
)
self.names = ['0', '1', '2', '3', '4', '5', '6', '7']
def forward(self, x):
t = x
if self.use_res_connect:
return t + self.conv(x)
else:
return self.conv(x)
class MobileNetV2(nn.Module):
"""mobilenetV2"""
def __init__(self, T,
feature_dim,
input_size=32,
width_mult=1.,
remove_avg=False):
super(MobileNetV2, self).__init__()
self.remove_avg = remove_avg
# setting of inverted residual blocks
self.interverted_residual_setting = [
# t, c, n, s
[1, 16, 1, 1],
[T, 24, 2, 1],
[T, 32, 3, 2],
[T, 64, 4, 2],
[T, 96, 3, 1],
[T, 160, 3, 2],
[T, 320, 1, 1],
]
# building first layer
assert input_size % 32 == 0
input_channel = int(32 * width_mult)
self.conv1 = conv_bn(3, input_channel, 2)
# building inverted residual blocks
self.blocks = nn.ModuleList([])
for t, c, n, s in self.interverted_residual_setting:
output_channel = int(c * width_mult)
layers = []
strides = [s] + [1] * (n - 1)
for stride in strides:
layers.append(
InvertedResidual(input_channel, output_channel, stride, t)
)
input_channel = output_channel
self.blocks.append(nn.Sequential(*layers))
self.last_channel = int(1280 * width_mult) if width_mult > 1.0 else 1280
self.conv2 = conv_1x1_bn(input_channel, self.last_channel)
H = input_size // (32//2)
self.avgpool = nn.AvgPool2d(H, ceil_mode=True)
# building classifier
#self.classifier = nn.Sequential(
# # nn.Dropout(0.5),
# nn.Linear(self.last_channel, feature_dim),
#)
self.classifier = nn.Linear(self.last_channel, feature_dim)
self._initialize_weights()
print(T, width_mult)
def get_bn_before_relu(self):
bn1 = self.blocks[1][-1].conv[-1]
bn2 = self.blocks[2][-1].conv[-1]
bn3 = self.blocks[4][-1].conv[-1]
bn4 = self.blocks[6][-1].conv[-1]
return [bn1, bn2, bn3, bn4]
def get_feat_modules(self):
feat_m = nn.ModuleList([])
feat_m.append(self.conv1)
feat_m.append(self.blocks)
return feat_m
def forward(self, x, is_feat=False, preact=False):
out = self.conv1(x)
f0 = out
out = self.blocks[0](out)
out = self.blocks[1](out)
f1 = out
out = self.blocks[2](out)
f2 = out
out = self.blocks[3](out)
out = self.blocks[4](out)
f3 = out
out = self.blocks[5](out)
out = self.blocks[6](out)
f4 = out
out = self.conv2(out)
if not self.remove_avg:
out = self.avgpool(out)
out = out.view(out.size(0), -1)
f5 = out
out = self.classifier(out)
if is_feat:
return [f0, f1, f2, f3, f4, f5], out
else:
return out
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def mobilenetv2_T_w(T, W, feature_dim=100):
model = MobileNetV2(T=T, feature_dim=feature_dim, width_mult=W)
return model
def mobile_half(num_classes):
return mobilenetv2_T_w(6, 0.5, num_classes)
if __name__ == '__main__':
x = torch.randn(2, 3, 32, 32)
net = mobile_half(100)
feats, logit = net(x, is_feat=True, preact=True)
for f in feats:
print(f.shape, f.min().item())
print(logit.shape)
for m in net.get_bn_before_relu():
if isinstance(m, nn.BatchNorm2d):
print('pass')
else:
print('warning')
| 5,777 | 27.323529 | 115 | py |
SSKD | SSKD-master/models/vgg.py | '''VGG for CIFAR10. FC layers are removed.
(c) YANG, Wei
'''
import torch.nn as nn
import torch.nn.functional as F
import math
__all__ = [
'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn',
'vgg19_bn', 'vgg19',
]
model_urls = {
'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth',
'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth',
'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',
}
class VGG(nn.Module):
def __init__(self, cfg, batch_norm=False, num_classes=1000):
super(VGG, self).__init__()
self.block0 = self._make_layers(cfg[0], batch_norm, 3)
self.block1 = self._make_layers(cfg[1], batch_norm, cfg[0][-1])
self.block2 = self._make_layers(cfg[2], batch_norm, cfg[1][-1])
self.block3 = self._make_layers(cfg[3], batch_norm, cfg[2][-1])
self.block4 = self._make_layers(cfg[4], batch_norm, cfg[3][-1])
self.pool0 = nn.MaxPool2d(kernel_size=2, stride=2)
self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)
self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2)
self.pool4 = nn.AdaptiveAvgPool2d((1, 1))
# self.pool4 = nn.MaxPool2d(kernel_size=2, stride=2)
self.classifier = nn.Linear(512, num_classes)
self._initialize_weights()
def get_feat_modules(self):
feat_m = nn.ModuleList([])
feat_m.append(self.block0)
feat_m.append(self.pool0)
feat_m.append(self.block1)
feat_m.append(self.pool1)
feat_m.append(self.block2)
feat_m.append(self.pool2)
feat_m.append(self.block3)
feat_m.append(self.pool3)
feat_m.append(self.block4)
feat_m.append(self.pool4)
return feat_m
def get_bn_before_relu(self):
bn1 = self.block1[-1]
bn2 = self.block2[-1]
bn3 = self.block3[-1]
bn4 = self.block4[-1]
return [bn1, bn2, bn3, bn4]
def forward(self, x, is_feat=False, preact=False):
h = x.shape[2]
x = F.relu(self.block0(x))
f0 = x
x = self.pool0(x)
x = self.block1(x)
f1_pre = x
x = F.relu(x)
f1 = x
x = self.pool1(x)
x = self.block2(x)
f2_pre = x
x = F.relu(x)
f2 = x
x = self.pool2(x)
x = self.block3(x)
f3_pre = x
x = F.relu(x)
f3 = x
if h == 64:
x = self.pool3(x)
x = self.block4(x)
f4_pre = x
x = F.relu(x)
f4 = x
x = self.pool4(x)
x = x.view(x.size(0), -1)
f5 = x
x = self.classifier(x)
if is_feat:
if preact:
return [f0, f1_pre, f2_pre, f3_pre, f4_pre, f5], x
else:
return [f0, f1, f2, f3, f4, f5], x
else:
return x
@staticmethod
def _make_layers(cfg, batch_norm=False, in_channels=3):
layers = []
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
layers = layers[:-1]
return nn.Sequential(*layers)
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
cfg = {
'A': [[64], [128], [256, 256], [512, 512], [512, 512]],
'B': [[64, 64], [128, 128], [256, 256], [512, 512], [512, 512]],
'D': [[64, 64], [128, 128], [256, 256, 256], [512, 512, 512], [512, 512, 512]],
'E': [[64, 64], [128, 128], [256, 256, 256, 256], [512, 512, 512, 512], [512, 512, 512, 512]],
'S': [[64], [128], [256], [512], [512]],
}
def vgg8(**kwargs):
"""VGG 8-layer model (configuration "S")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(cfg['S'], **kwargs)
return model
def vgg8_bn(**kwargs):
"""VGG 8-layer model (configuration "S")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(cfg['S'], batch_norm=True, **kwargs)
return model
def vgg11(**kwargs):
"""VGG 11-layer model (configuration "A")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(cfg['A'], **kwargs)
return model
def vgg11_bn(**kwargs):
"""VGG 11-layer model (configuration "A") with batch normalization"""
model = VGG(cfg['A'], batch_norm=True, **kwargs)
return model
def vgg13(**kwargs):
"""VGG 13-layer model (configuration "B")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(cfg['B'], **kwargs)
return model
def vgg13_bn(**kwargs):
"""VGG 13-layer model (configuration "B") with batch normalization"""
model = VGG(cfg['B'], batch_norm=True, **kwargs)
return model
def vgg16(**kwargs):
"""VGG 16-layer model (configuration "D")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(cfg['D'], **kwargs)
return model
def vgg16_bn(**kwargs):
"""VGG 16-layer model (configuration "D") with batch normalization"""
model = VGG(cfg['D'], batch_norm=True, **kwargs)
return model
def vgg19(**kwargs):
"""VGG 19-layer model (configuration "E")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(cfg['E'], **kwargs)
return model
def vgg19_bn(**kwargs):
"""VGG 19-layer model (configuration 'E') with batch normalization"""
model = VGG(cfg['E'], batch_norm=True, **kwargs)
return model
if __name__ == '__main__':
import torch
x = torch.randn(2, 3, 32, 32)
net = vgg19_bn(num_classes=100)
feats, logit = net(x, is_feat=True, preact=True)
for f in feats:
print(f.shape, f.min().item())
print(logit.shape)
for m in net.get_bn_before_relu():
if isinstance(m, nn.BatchNorm2d):
print('pass')
else:
print('warning')
| 6,971 | 28.417722 | 98 | py |
SSKD | SSKD-master/models/classifier.py | from __future__ import print_function
import torch.nn as nn
#########################################
# ===== Classifiers ===== #
#########################################
class LinearClassifier(nn.Module):
def __init__(self, dim_in, n_label=10):
super(LinearClassifier, self).__init__()
self.net = nn.Linear(dim_in, n_label)
def forward(self, x):
return self.net(x)
class NonLinearClassifier(nn.Module):
def __init__(self, dim_in, n_label=10, p=0.1):
super(NonLinearClassifier, self).__init__()
self.net = nn.Sequential(
nn.Linear(dim_in, 200),
nn.Dropout(p=p),
nn.BatchNorm1d(200),
nn.ReLU(inplace=True),
nn.Linear(200, n_label),
)
def forward(self, x):
return self.net(x)
| 819 | 21.777778 | 51 | py |
SSKD | SSKD-master/models/resnetv2.py | '''ResNet in PyTorch.
For Pre-activation ResNet, see 'preact_resnet.py'.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, is_last=False):
super(BasicBlock, self).__init__()
self.is_last = is_last
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
preact = out
out = F.relu(out)
if self.is_last:
return out, preact
else:
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1, is_last=False):
super(Bottleneck, self).__init__()
self.is_last = is_last
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion * planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion * planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
preact = out
out = F.relu(out)
if self.is_last:
return out, preact
else:
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10, zero_init_residual=False):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.linear = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def get_feat_modules(self):
feat_m = nn.ModuleList([])
feat_m.append(self.conv1)
feat_m.append(self.bn1)
feat_m.append(self.layer1)
feat_m.append(self.layer2)
feat_m.append(self.layer3)
feat_m.append(self.layer4)
return feat_m
def get_bn_before_relu(self):
if isinstance(self.layer1[0], Bottleneck):
bn1 = self.layer1[-1].bn3
bn2 = self.layer2[-1].bn3
bn3 = self.layer3[-1].bn3
bn4 = self.layer4[-1].bn3
elif isinstance(self.layer1[0], BasicBlock):
bn1 = self.layer1[-1].bn2
bn2 = self.layer2[-1].bn2
bn3 = self.layer3[-1].bn2
bn4 = self.layer4[-1].bn2
else:
raise NotImplementedError('ResNet unknown block error !!!')
return [bn1, bn2, bn3, bn4]
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for i in range(num_blocks):
stride = strides[i]
layers.append(block(self.in_planes, planes, stride, i == num_blocks - 1))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x, is_feat=False, preact=False):
out = F.relu(self.bn1(self.conv1(x)))
f0 = out
out, f1_pre = self.layer1(out)
f1 = out
out, f2_pre = self.layer2(out)
f2 = out
out, f3_pre = self.layer3(out)
f3 = out
out, f4_pre = self.layer4(out)
f4 = out
out = self.avgpool(out)
out = out.view(out.size(0), -1)
f5 = out
out = self.linear(out)
if is_feat:
if preact:
return [[f0, f1_pre, f2_pre, f3_pre, f4_pre, f5], out]
else:
return [f0, f1, f2, f3, f4, f5], out
else:
return out
def ResNet18(**kwargs):
return ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
def ResNet34(**kwargs):
return ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
def ResNet50(**kwargs):
return ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
def ResNet101(**kwargs):
return ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
def ResNet152(**kwargs):
return ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if __name__ == '__main__':
net = ResNet18(num_classes=100)
x = torch.randn(2, 3, 32, 32)
feats, logit = net(x, is_feat=True, preact=True)
for f in feats:
print(f.shape, f.min().item())
print(logit.shape)
for m in net.get_bn_before_relu():
if isinstance(m, nn.BatchNorm2d):
print('pass')
else:
print('warning')
| 6,915 | 33.753769 | 106 | py |
SSKD | SSKD-master/models/ShuffleNetv1.py | '''ShuffleNet in PyTorch.
See the paper "ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices" for more details.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class ShuffleBlock(nn.Module):
def __init__(self, groups):
super(ShuffleBlock, self).__init__()
self.groups = groups
def forward(self, x):
'''Channel shuffle: [N,C,H,W] -> [N,g,C/g,H,W] -> [N,C/g,g,H,w] -> [N,C,H,W]'''
N,C,H,W = x.size()
g = self.groups
return x.view(N,g,C//g,H,W).permute(0,2,1,3,4).reshape(N,C,H,W)
class Bottleneck(nn.Module):
def __init__(self, in_planes, out_planes, stride, groups, is_last=False):
super(Bottleneck, self).__init__()
self.is_last = is_last
self.stride = stride
mid_planes = int(out_planes/4)
g = 1 if in_planes == 24 else groups
self.conv1 = nn.Conv2d(in_planes, mid_planes, kernel_size=1, groups=g, bias=False)
self.bn1 = nn.BatchNorm2d(mid_planes)
self.shuffle1 = ShuffleBlock(groups=g)
self.conv2 = nn.Conv2d(mid_planes, mid_planes, kernel_size=3, stride=stride, padding=1, groups=mid_planes, bias=False)
self.bn2 = nn.BatchNorm2d(mid_planes)
self.conv3 = nn.Conv2d(mid_planes, out_planes, kernel_size=1, groups=groups, bias=False)
self.bn3 = nn.BatchNorm2d(out_planes)
self.shortcut = nn.Sequential()
if stride == 2:
self.shortcut = nn.Sequential(nn.AvgPool2d(3, stride=2, padding=1))
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.shuffle1(out)
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
res = self.shortcut(x)
preact = torch.cat([out, res], 1) if self.stride == 2 else out+res
out = F.relu(preact)
# out = F.relu(torch.cat([out, res], 1)) if self.stride == 2 else F.relu(out+res)
if self.is_last:
return out, preact
else:
return out
class ShuffleNet(nn.Module):
def __init__(self, cfg, num_classes=10):
super(ShuffleNet, self).__init__()
out_planes = cfg['out_planes']
num_blocks = cfg['num_blocks']
groups = cfg['groups']
self.conv1 = nn.Conv2d(3, 24, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(24)
self.in_planes = 24
self.layer1 = self._make_layer(out_planes[0], num_blocks[0], groups)
self.layer2 = self._make_layer(out_planes[1], num_blocks[1], groups)
self.layer3 = self._make_layer(out_planes[2], num_blocks[2], groups)
self.linear = nn.Linear(out_planes[2], num_classes)
def _make_layer(self, out_planes, num_blocks, groups):
layers = []
for i in range(num_blocks):
stride = 2 if i == 0 else 1
cat_planes = self.in_planes if i == 0 else 0
layers.append(Bottleneck(self.in_planes, out_planes-cat_planes,
stride=stride,
groups=groups,
is_last=(i == num_blocks - 1)))
self.in_planes = out_planes
return nn.Sequential(*layers)
def get_feat_modules(self):
feat_m = nn.ModuleList([])
feat_m.append(self.conv1)
feat_m.append(self.bn1)
feat_m.append(self.layer1)
feat_m.append(self.layer2)
feat_m.append(self.layer3)
return feat_m
def get_bn_before_relu(self):
raise NotImplementedError('ShuffleNet currently is not supported for "Overhaul" teacher')
def forward(self, x, is_feat=False, preact=False):
out = F.relu(self.bn1(self.conv1(x)))
f0 = out
out, f1_pre = self.layer1(out)
f1 = out
out, f2_pre = self.layer2(out)
f2 = out
out, f3_pre = self.layer3(out)
f3 = out
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
f4 = out
out = self.linear(out)
if is_feat:
if preact:
return [f0, f1_pre, f2_pre, f3_pre, f4], out
else:
return [f0, f1, f2, f3, f4], out
else:
return out
def ShuffleV1(**kwargs):
cfg = {
'out_planes': [240, 480, 960],
'num_blocks': [4, 8, 4],
'groups': 3
}
return ShuffleNet(cfg, **kwargs)
if __name__ == '__main__':
x = torch.randn(2, 3, 32, 32)
net = ShuffleV1(num_classes=100)
import time
a = time.time()
feats, logit = net(x, is_feat=True, preact=True)
b = time.time()
print(b - a)
for f in feats:
print(f.shape, f.min().item())
print(logit.shape)
| 4,732 | 33.05036 | 126 | py |
SSKD | SSKD-master/models/util.py | from __future__ import print_function
import torch.nn as nn
import math
class Paraphraser(nn.Module):
"""Paraphrasing Complex Network: Network Compression via Factor Transfer"""
def __init__(self, t_shape, k=0.5, use_bn=False):
super(Paraphraser, self).__init__()
in_channel = t_shape[1]
out_channel = int(t_shape[1] * k)
self.encoder = nn.Sequential(
nn.Conv2d(in_channel, in_channel, 3, 1, 1),
nn.BatchNorm2d(in_channel) if use_bn else nn.Sequential(),
nn.LeakyReLU(0.1, inplace=True),
nn.Conv2d(in_channel, out_channel, 3, 1, 1),
nn.BatchNorm2d(out_channel) if use_bn else nn.Sequential(),
nn.LeakyReLU(0.1, inplace=True),
nn.Conv2d(out_channel, out_channel, 3, 1, 1),
nn.BatchNorm2d(out_channel) if use_bn else nn.Sequential(),
nn.LeakyReLU(0.1, inplace=True),
)
self.decoder = nn.Sequential(
nn.ConvTranspose2d(out_channel, out_channel, 3, 1, 1),
nn.BatchNorm2d(out_channel) if use_bn else nn.Sequential(),
nn.LeakyReLU(0.1, inplace=True),
nn.ConvTranspose2d(out_channel, in_channel, 3, 1, 1),
nn.BatchNorm2d(in_channel) if use_bn else nn.Sequential(),
nn.LeakyReLU(0.1, inplace=True),
nn.ConvTranspose2d(in_channel, in_channel, 3, 1, 1),
nn.BatchNorm2d(in_channel) if use_bn else nn.Sequential(),
nn.LeakyReLU(0.1, inplace=True),
)
def forward(self, f_s, is_factor=False):
factor = self.encoder(f_s)
if is_factor:
return factor
rec = self.decoder(factor)
return factor, rec
class Translator(nn.Module):
def __init__(self, s_shape, t_shape, k=0.5, use_bn=True):
super(Translator, self).__init__()
in_channel = s_shape[1]
out_channel = int(t_shape[1] * k)
self.encoder = nn.Sequential(
nn.Conv2d(in_channel, in_channel, 3, 1, 1),
nn.BatchNorm2d(in_channel) if use_bn else nn.Sequential(),
nn.LeakyReLU(0.1, inplace=True),
nn.Conv2d(in_channel, out_channel, 3, 1, 1),
nn.BatchNorm2d(out_channel) if use_bn else nn.Sequential(),
nn.LeakyReLU(0.1, inplace=True),
nn.Conv2d(out_channel, out_channel, 3, 1, 1),
nn.BatchNorm2d(out_channel) if use_bn else nn.Sequential(),
nn.LeakyReLU(0.1, inplace=True),
)
def forward(self, f_s):
return self.encoder(f_s)
class Connector(nn.Module):
"""Connect for Knowledge Transfer via Distillation of Activation Boundaries Formed by Hidden Neurons"""
def __init__(self, s_shapes, t_shapes):
super(Connector, self).__init__()
self.s_shapes = s_shapes
self.t_shapes = t_shapes
self.connectors = nn.ModuleList(self._make_conenctors(s_shapes, t_shapes))
@staticmethod
def _make_conenctors(s_shapes, t_shapes):
assert len(s_shapes) == len(t_shapes), 'unequal length of feat list'
connectors = []
for s, t in zip(s_shapes, t_shapes):
if s[1] == t[1] and s[2] == t[2]:
connectors.append(nn.Sequential())
else:
connectors.append(ConvReg(s, t, use_relu=False))
return connectors
def forward(self, g_s):
out = []
for i in range(len(g_s)):
out.append(self.connectors[i](g_s[i]))
return out
class ConnectorV2(nn.Module):
"""A Comprehensive Overhaul of Feature Distillation (ICCV 2019)"""
def __init__(self, s_shapes, t_shapes):
super(ConnectorV2, self).__init__()
self.s_shapes = s_shapes
self.t_shapes = t_shapes
self.connectors = nn.ModuleList(self._make_conenctors(s_shapes, t_shapes))
def _make_conenctors(self, s_shapes, t_shapes):
assert len(s_shapes) == len(t_shapes), 'unequal length of feat list'
t_channels = [t[1] for t in t_shapes]
s_channels = [s[1] for s in s_shapes]
connectors = nn.ModuleList([self._build_feature_connector(t, s)
for t, s in zip(t_channels, s_channels)])
return connectors
@staticmethod
def _build_feature_connector(t_channel, s_channel):
C = [nn.Conv2d(s_channel, t_channel, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(t_channel)]
for m in C:
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return nn.Sequential(*C)
def forward(self, g_s):
out = []
for i in range(len(g_s)):
out.append(self.connectors[i](g_s[i]))
return out
class ConvReg(nn.Module):
"""Convolutional regression for FitNet"""
def __init__(self, s_shape, t_shape, use_relu=True):
super(ConvReg, self).__init__()
self.use_relu = use_relu
s_N, s_C, s_H, s_W = s_shape
t_N, t_C, t_H, t_W = t_shape
if s_H == 2 * t_H:
self.conv = nn.Conv2d(s_C, t_C, kernel_size=3, stride=2, padding=1)
elif s_H * 2 == t_H:
self.conv = nn.ConvTranspose2d(s_C, t_C, kernel_size=4, stride=2, padding=1)
elif s_H >= t_H:
self.conv = nn.Conv2d(s_C, t_C, kernel_size=(1+s_H-t_H, 1+s_W-t_W))
else:
raise NotImplemented('student size {}, teacher size {}'.format(s_H, t_H))
self.bn = nn.BatchNorm2d(t_C)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
if self.use_relu:
return self.relu(self.bn(x))
else:
return self.bn(x)
class Regress(nn.Module):
"""Simple Linear Regression for hints"""
def __init__(self, dim_in=1024, dim_out=1024):
super(Regress, self).__init__()
self.linear = nn.Linear(dim_in, dim_out)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = x.view(x.shape[0], -1)
x = self.linear(x)
x = self.relu(x)
return x
class Embed(nn.Module):
"""Embedding module"""
def __init__(self, dim_in=1024, dim_out=128):
super(Embed, self).__init__()
self.linear = nn.Linear(dim_in, dim_out)
self.l2norm = Normalize(2)
def forward(self, x):
x = x.view(x.shape[0], -1)
x = self.linear(x)
x = self.l2norm(x)
return x
class LinearEmbed(nn.Module):
"""Linear Embedding"""
def __init__(self, dim_in=1024, dim_out=128):
super(LinearEmbed, self).__init__()
self.linear = nn.Linear(dim_in, dim_out)
def forward(self, x):
x = x.view(x.shape[0], -1)
x = self.linear(x)
return x
class MLPEmbed(nn.Module):
"""non-linear embed by MLP"""
def __init__(self, dim_in=1024, dim_out=128):
super(MLPEmbed, self).__init__()
self.linear1 = nn.Linear(dim_in, 2 * dim_out)
self.relu = nn.ReLU(inplace=True)
self.linear2 = nn.Linear(2 * dim_out, dim_out)
self.l2norm = Normalize(2)
def forward(self, x):
x = x.view(x.shape[0], -1)
x = self.relu(self.linear1(x))
x = self.l2norm(self.linear2(x))
return x
class Normalize(nn.Module):
"""normalization layer"""
def __init__(self, power=2):
super(Normalize, self).__init__()
self.power = power
def forward(self, x):
norm = x.pow(self.power).sum(1, keepdim=True).pow(1. / self.power)
out = x.div(norm)
return out
class Flatten(nn.Module):
"""flatten module"""
def __init__(self):
super(Flatten, self).__init__()
def forward(self, feat):
return feat.view(feat.size(0), -1)
class PoolEmbed(nn.Module):
"""pool and embed"""
def __init__(self, layer=0, dim_out=128, pool_type='avg'):
super().__init__()
if layer == 0:
pool_size = 8
nChannels = 16
elif layer == 1:
pool_size = 8
nChannels = 16
elif layer == 2:
pool_size = 6
nChannels = 32
elif layer == 3:
pool_size = 4
nChannels = 64
elif layer == 4:
pool_size = 1
nChannels = 64
else:
raise NotImplementedError('layer not supported: {}'.format(layer))
self.embed = nn.Sequential()
if layer <= 3:
if pool_type == 'max':
self.embed.add_module('MaxPool', nn.AdaptiveMaxPool2d((pool_size, pool_size)))
elif pool_type == 'avg':
self.embed.add_module('AvgPool', nn.AdaptiveAvgPool2d((pool_size, pool_size)))
self.embed.add_module('Flatten', Flatten())
self.embed.add_module('Linear', nn.Linear(nChannels*pool_size*pool_size, dim_out))
self.embed.add_module('Normalize', Normalize(2))
def forward(self, x):
return self.embed(x)
if __name__ == '__main__':
import torch
g_s = [
torch.randn(2, 16, 16, 16),
torch.randn(2, 32, 8, 8),
torch.randn(2, 64, 4, 4),
]
g_t = [
torch.randn(2, 32, 16, 16),
torch.randn(2, 64, 8, 8),
torch.randn(2, 128, 4, 4),
]
s_shapes = [s.shape for s in g_s]
t_shapes = [t.shape for t in g_t]
net = ConnectorV2(s_shapes, t_shapes)
out = net(g_s)
for f in out:
print(f.shape)
| 9,622 | 32.068729 | 107 | py |
SSKD | SSKD-master/models/ShuffleNetv2.py | '''ShuffleNetV2 in PyTorch.
See the paper "ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design" for more details.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class ShuffleBlock(nn.Module):
def __init__(self, groups=2):
super(ShuffleBlock, self).__init__()
self.groups = groups
def forward(self, x):
'''Channel shuffle: [N,C,H,W] -> [N,g,C/g,H,W] -> [N,C/g,g,H,w] -> [N,C,H,W]'''
N, C, H, W = x.size()
g = self.groups
return x.view(N, g, C//g, H, W).permute(0, 2, 1, 3, 4).reshape(N, C, H, W)
class SplitBlock(nn.Module):
def __init__(self, ratio):
super(SplitBlock, self).__init__()
self.ratio = ratio
def forward(self, x):
c = int(x.size(1) * self.ratio)
return x[:, :c, :, :], x[:, c:, :, :]
class BasicBlock(nn.Module):
def __init__(self, in_channels, split_ratio=0.5, is_last=False):
super(BasicBlock, self).__init__()
self.is_last = is_last
self.split = SplitBlock(split_ratio)
in_channels = int(in_channels * split_ratio)
self.conv1 = nn.Conv2d(in_channels, in_channels,
kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(in_channels)
self.conv2 = nn.Conv2d(in_channels, in_channels,
kernel_size=3, stride=1, padding=1, groups=in_channels, bias=False)
self.bn2 = nn.BatchNorm2d(in_channels)
self.conv3 = nn.Conv2d(in_channels, in_channels,
kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(in_channels)
self.shuffle = ShuffleBlock()
def forward(self, x):
x1, x2 = self.split(x)
out = F.relu(self.bn1(self.conv1(x2)))
out = self.bn2(self.conv2(out))
preact = self.bn3(self.conv3(out))
out = F.relu(preact)
# out = F.relu(self.bn3(self.conv3(out)))
preact = torch.cat([x1, preact], 1)
out = torch.cat([x1, out], 1)
out = self.shuffle(out)
if self.is_last:
return out, preact
else:
return out
class DownBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(DownBlock, self).__init__()
mid_channels = out_channels // 2
# left
self.conv1 = nn.Conv2d(in_channels, in_channels,
kernel_size=3, stride=2, padding=1, groups=in_channels, bias=False)
self.bn1 = nn.BatchNorm2d(in_channels)
self.conv2 = nn.Conv2d(in_channels, mid_channels,
kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(mid_channels)
# right
self.conv3 = nn.Conv2d(in_channels, mid_channels,
kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(mid_channels)
self.conv4 = nn.Conv2d(mid_channels, mid_channels,
kernel_size=3, stride=2, padding=1, groups=mid_channels, bias=False)
self.bn4 = nn.BatchNorm2d(mid_channels)
self.conv5 = nn.Conv2d(mid_channels, mid_channels,
kernel_size=1, bias=False)
self.bn5 = nn.BatchNorm2d(mid_channels)
self.shuffle = ShuffleBlock()
def forward(self, x):
# left
out1 = self.bn1(self.conv1(x))
out1 = F.relu(self.bn2(self.conv2(out1)))
# right
out2 = F.relu(self.bn3(self.conv3(x)))
out2 = self.bn4(self.conv4(out2))
out2 = F.relu(self.bn5(self.conv5(out2)))
# concat
out = torch.cat([out1, out2], 1)
out = self.shuffle(out)
return out
class ShuffleNetV2(nn.Module):
def __init__(self, net_size, num_classes=10):
super(ShuffleNetV2, self).__init__()
out_channels = configs[net_size]['out_channels']
num_blocks = configs[net_size]['num_blocks']
# self.conv1 = nn.Conv2d(3, 24, kernel_size=3,
# stride=1, padding=1, bias=False)
self.conv1 = nn.Conv2d(3, 24, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(24)
self.in_channels = 24
self.layer1 = self._make_layer(out_channels[0], num_blocks[0])
self.layer2 = self._make_layer(out_channels[1], num_blocks[1])
self.layer3 = self._make_layer(out_channels[2], num_blocks[2])
self.conv2 = nn.Conv2d(out_channels[2], out_channels[3],
kernel_size=1, stride=1, padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(out_channels[3])
self.linear = nn.Linear(out_channels[3], num_classes)
def _make_layer(self, out_channels, num_blocks):
layers = [DownBlock(self.in_channels, out_channels)]
for i in range(num_blocks):
layers.append(BasicBlock(out_channels, is_last=(i == num_blocks - 1)))
self.in_channels = out_channels
return nn.Sequential(*layers)
def get_feat_modules(self):
feat_m = nn.ModuleList([])
feat_m.append(self.conv1)
feat_m.append(self.bn1)
feat_m.append(self.layer1)
feat_m.append(self.layer2)
feat_m.append(self.layer3)
return feat_m
def get_bn_before_relu(self):
raise NotImplementedError('ShuffleNetV2 currently is not supported for "Overhaul" teacher')
def forward(self, x, is_feat=False, preact=False):
out = F.relu(self.bn1(self.conv1(x)))
# out = F.max_pool2d(out, 3, stride=2, padding=1)
f0 = out
out, f1_pre = self.layer1(out)
f1 = out
out, f2_pre = self.layer2(out)
f2 = out
out, f3_pre = self.layer3(out)
f3 = out
out = F.relu(self.bn2(self.conv2(out)))
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
f4 = out
out = self.linear(out)
if is_feat:
if preact:
return [f0, f1_pre, f2_pre, f3_pre, f4], out
else:
return [f0, f1, f2, f3, f4], out
else:
return out
configs = {
0.2: {
'out_channels': (40, 80, 160, 512),
'num_blocks': (3, 3, 3)
},
0.3: {
'out_channels': (40, 80, 160, 512),
'num_blocks': (3, 7, 3)
},
0.5: {
'out_channels': (48, 96, 192, 1024),
'num_blocks': (3, 7, 3)
},
1: {
'out_channels': (116, 232, 464, 1024),
'num_blocks': (3, 7, 3)
},
1.5: {
'out_channels': (176, 352, 704, 1024),
'num_blocks': (3, 7, 3)
},
2: {
'out_channels': (224, 488, 976, 2048),
'num_blocks': (3, 7, 3)
}
}
def ShuffleV2(**kwargs):
model = ShuffleNetV2(net_size=1, **kwargs)
return model
if __name__ == '__main__':
net = ShuffleV2(num_classes=100)
x = torch.randn(3, 3, 32, 32)
import time
a = time.time()
feats, logit = net(x, is_feat=True, preact=True)
b = time.time()
print(b - a)
for f in feats:
print(f.shape, f.min().item())
print(logit.shape)
| 7,074 | 32.530806 | 107 | py |
SSKD | SSKD-master/models/wrn.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
"""
Original Author: Wei Yang
"""
__all__ = ['wrn']
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, dropRate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.droprate = dropRate
self.equalInOut = (in_planes == out_planes)
self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0, bias=False) or None
def forward(self, x):
if not self.equalInOut:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
out = self.conv2(out)
return torch.add(x if self.equalInOut else self.convShortcut(x), out)
class NetworkBlock(nn.Module):
def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate):
layers = []
for i in range(nb_layers):
layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, dropRate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class WideResNet(nn.Module):
def __init__(self, depth, num_classes, widen_factor=1, dropRate=0.0):
super(WideResNet, self).__init__()
nChannels = [16, 16*widen_factor, 32*widen_factor, 64*widen_factor]
assert (depth - 4) % 6 == 0, 'depth should be 6n+4'
n = (depth - 4) // 6
block = BasicBlock
# 1st conv before any network block
self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1,
padding=1, bias=False)
# 1st block
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)
# 2nd block
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate)
# 3rd block
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(nChannels[3])
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(nChannels[3], num_classes)
self.nChannels = nChannels[3]
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def get_feat_modules(self):
feat_m = nn.ModuleList([])
feat_m.append(self.conv1)
feat_m.append(self.block1)
feat_m.append(self.block2)
feat_m.append(self.block3)
return feat_m
def get_bn_before_relu(self):
bn1 = self.block2.layer[0].bn1
bn2 = self.block3.layer[0].bn1
bn3 = self.bn1
return [bn1, bn2, bn3]
def forward(self, x, is_feat=False, preact=False):
out = self.conv1(x)
f0 = out
out = self.block1(out)
f1 = out
out = self.block2(out)
f2 = out
out = self.block3(out)
f3 = out
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.nChannels)
f4 = out
out = self.fc(out)
if is_feat:
if preact:
f1 = self.block2.layer[0].bn1(f1)
f2 = self.block3.layer[0].bn1(f2)
f3 = self.bn1(f3)
return [f0, f1, f2, f3, f4], out
else:
return out
def wrn(**kwargs):
"""
Constructs a Wide Residual Networks.
"""
model = WideResNet(**kwargs)
return model
def wrn_40_2(**kwargs):
model = WideResNet(depth=40, widen_factor=2, **kwargs)
return model
def wrn_40_1(**kwargs):
model = WideResNet(depth=40, widen_factor=1, **kwargs)
return model
def wrn_16_2(**kwargs):
model = WideResNet(depth=16, widen_factor=2, **kwargs)
return model
def wrn_16_1(**kwargs):
model = WideResNet(depth=16, widen_factor=1, **kwargs)
return model
if __name__ == '__main__':
import torch
x = torch.randn(2, 3, 32, 32)
net = wrn_40_2(num_classes=100)
feats, logit = net(x, is_feat=True, preact=True)
for f in feats:
print(f.shape, f.min().item())
print(logit.shape)
for m in net.get_bn_before_relu():
if isinstance(m, nn.BatchNorm2d):
print('pass')
else:
print('warning')
| 5,519 | 31.280702 | 116 | py |
scirepeval | scirepeval-main/scirepeval.py | import argparse
import json
from typing import List, Union
from evaluation.encoders import Model
from evaluation.evaluator import IREvaluator, SupervisedEvaluator, SupervisedTask
from evaluation.few_shot_evaluator import FewShotEvaluator
from evaluation.gpt3_encoder import GPT3Model
from evaluation.instructor import InstructorModel
from reviewer_matching import ReviewerMatchingEvaluator
from evaluation.eval_datasets import SimpleDataset, IRDataset
TASK_IDS = {"classification": "[CLF]", "regression": "[RGN]", "proximity": "[PRX]",
"adhoc_search": {"query": "[QRY]", "candidates": "[PRX]"}}
import pytorch_lightning as pl
pl.seed_everything(42, workers=True)
class SciRepEval:
def __init__(self, tasks_config: str = "scirepeval_tasks.jsonl", task_list: List[str] = None,
task_formats: List[str] = None, batch_size: int = 32):
tasks_dict = dict()
task_by_formats = dict()
with open(tasks_config, encoding="utf-8") as f:
for line in f:
d = json.loads(line)
tasks_dict[d["name"]] = d
if d["type"] not in task_by_formats:
task_by_formats[d["type"]] = []
task_by_formats[d["type"]].append(d["name"])
if not task_list and not task_formats:
self.tasks = tasks_dict
elif task_list:
self.tasks = {k: tasks_dict[k] for k in task_list}
elif task_formats:
self.tasks = dict()
for task_format in task_formats:
self.tasks.update({k: tasks_dict[k] for k in task_by_formats[task_format]})
self.batch_size = batch_size
def evaluate(self, model: Union[Model, List[Model]], output: str):
final_results = dict()
if type(model) != list:
model = [model]
for task_name, task in self.tasks.items():
for m in model:
m.task_id = TASK_IDS[task["type"]]
kwargs = dict()
task_data = task["data"]
if not task_data.get("meta"):
raise ValueError(f"Task {task_name} has no test metadata")
if task_data.get("meta"):
metadata = task_data["meta"]
kwargs["meta_dataset"] = metadata if type(metadata) != dict else (metadata["name"], metadata["config"])
if not task_data.get("test"):
if type(metadata) == dict:
kwargs["test_dataset"] = (metadata["name"], metadata["config"])
else:
raise ValueError(f"Task {task_name} has no test data")
if task_data.get("test"):
testdata = task_data["test"]
kwargs["test_dataset"] = testdata if type(testdata) != dict else (testdata["name"], testdata["config"])
kwargs["metrics"] = tuple(task["metrics"])
kwargs["batch_size"] = task["batch_size"] if "batch_size" in task else self.batch_size
if "fields" in task:
kwargs["fields"] = task["fields"]
save_path, load_path = None, None
if "embeddings" in task:
save_path = task["embeddings"].get("save")
load_path = task["embeddings"].get("load")
few_shot_evaluators = []
if task["type"] in {"classification", "regression"}:
subtype = SupervisedTask.CLASSIFICATION if task[
"type"] == "classification" else SupervisedTask.REGRESSION
if task.get("multi_label"):
subtype = SupervisedTask.MULTILABEL_CLASSIFICATION
evaluator = SupervisedEvaluator(task_name, subtype, model=model,
**kwargs)
if task.get("few_shot"):
for run in task["few_shot"]:
few_shot_evaluators.append(
FewShotEvaluator(f"{task_name} {run['sample_size']} shot", subtype, model=model,
sample_size=run["sample_size"], num_iterations=run["iterations"],
**kwargs))
else:
if task_name == "Paper-Reviewer Matching":
if not task_data.get("reviewers") and not task_data.get("hf_reviewers"):
raise ValueError(f"Task {task_name} has no reviewer metadata locally or hf_metadata")
if task_data.get("reviewers"):
reviewers = task_data["reviewers"]
kwargs["reviewer_metadata"] = reviewers if type(reviewers) != dict else (
reviewers["name"], reviewers["config"])
evaluator = ReviewerMatchingEvaluator(task_name, model=model, **kwargs)
else:
data_class = SimpleDataset if task_data.get("simple_format") else IRDataset
evaluator = IREvaluator(task_name, model=model, dataset_class=data_class, **kwargs)
embeddings = evaluator.generate_embeddings(save_path) if not load_path else load_path
results = evaluator.evaluate(embeddings)
if not few_shot_evaluators:
final_results[task_name] = results
else:
final_results[task_name] = dict()
final_results[task_name]["complete"] = results
final_results[task_name]["few_shot"] = []
for few_shot in few_shot_evaluators:
final_results[task_name]["few_shot"].append(
{"sample_size": few_shot.sample_size, "results": few_shot.evaluate(embeddings)})
with open(output, "w") as f:
json.dump(final_results, f, indent=4)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--tasks-config', help='path to the task config file', default="scirepeval_tasks.jsonl")
parser.add_argument('--mtype', help='Model variant to be used (default, pals, adapters, fusion)', default="default")
parser.add_argument('--gpt3-model', help='Name of embedding model in case of using openai api', default=None)
parser.add_argument('--model', '-m', help='HuggingFace model to be used')
parser.add_argument('--batch-size', type=int, default=32, help='batch size')
parser.add_argument('--ctrl-tokens', action='store_true', default=False, help='use control codes for tasks')
parser.add_argument('--adapters-dir', help='path to the adapter checkpoints', default=None)
parser.add_argument('--fusion-dir', help='path to the fusion checkpoints', default=None)
parser.add_argument('--adapters-chkpt', help='hf adapter names keyed on tasks', default=None, type=json.loads)
parser.add_argument('--output', help="path to the output file", default="scirepeval_results.json")
parser.add_argument('--fp16', action='store_true', default=False, help='use floating point 16 precision')
parser.add_argument('--instructor', action='store_true', default=False, help='use an instructor model for eval')
args = parser.parse_args()
adapters_load_from = args.adapters_dir if args.adapters_dir else args.adapters_chkpt
if args.gpt3_model:
model = GPT3Model(embed_model=args.gpt3_model)
elif args.instructor:
model = InstructorModel(args.model)
else:
model = Model(variant=args.mtype, base_checkpoint=args.model, adapters_load_from=adapters_load_from,
fusion_load_from=args.fusion_dir,
use_ctrl_codes=args.ctrl_tokens,
task_id="", all_tasks=["[CLF]", "[QRY]", "[RGN]", "[PRX]"], use_fp16=args.fp16)
evaluator = SciRepEval(tasks_config=args.tasks_config, batch_size=args.batch_size)
evaluator.evaluate(model, args.output)
| 7,865 | 52.510204 | 121 | py |
scirepeval | scirepeval-main/adapter_fusion.py | from typing import List, Optional, Union, Dict
from transformers.adapters import PfeifferConfig
from transformers.adapters import AutoAdapterModel
from transformers.adapters.composition import Fuse
from abc import ABC, abstractmethod
import torch
import os
class AdapterFactory:
@staticmethod
def get_adapter(checkpoint_name: str, task_ids: List[str], fuse_adapters: bool,
adapters_dir: Union[str, Dict] = None):
print(task_ids)
if not fuse_adapters:
return AdapterEncoder(checkpoint_name, task_ids)
else:
return AdapterFusion(checkpoint_name, task_ids, adapters_dir)
class AbstractAdapter(torch.nn.Module, ABC):
def __init__(self, checkpoint_name):
super(AbstractAdapter, self).__init__()
self.model = AutoAdapterModel.from_pretrained(checkpoint_name) # checkpoint
@abstractmethod
def save_pretrained(self, save_path: str):
self.model.save_all_adapters(save_path)
def resize_token_embeddings(self, new_num_tokens: Optional[int] = None):
return self.model.resize_token_embeddings(new_num_tokens)
class AdapterEncoder(AbstractAdapter):
def __init__(self, checkpoint_name, task_ids: List[str], load_as=None):
super(AdapterEncoder, self).__init__(checkpoint_name)
# Add a new adapter
for t_id in task_ids:
if not load_as:
self.model.add_adapter(t_id, config="pfeiffer")
else:
# load_as can str for a local path or dict to be loaded from adapters hub
if type(load_as) == str:
self.model.load_adapter(f"{load_as}/{t_id}/", load_as=t_id)
else:
self.model.load_adapter(load_as[t_id], load_as=t_id)
self.model.train_adapter(adapter_setup=task_ids, train_embeddings=False)
def forward(self, input_ids, attention_mask, task_id):
self.model.base_model.set_active_adapters(task_id)
return self.model(input_ids, attention_mask=attention_mask)
def save_pretrained(self, save_path: str, adapter_names: List[str] = None):
# self.model.save_pretrained(save_path)
save_path = f'{save_path}/adapters/'
os.makedirs(save_path, exist_ok=True)
if not adapter_names:
self.model.save_all_adapters(save_path)
else:
for a_name in adapter_names:
self.model.save_adapter(f"{save_path}/{a_name}/", a_name)
class AdapterFusion(AbstractAdapter):
def __init__(self, checkpoint_name, task_ids: List[str], load_adapters_as: Union[str, dict], fusion_dir: str = None,
inference=False):
super(AdapterFusion, self).__init__(checkpoint_name)
# Add a new adapter
# load_adapters_as can str for a local path with adapters and fusion dirs or dict to be loaded from adapters hub,
# the adapters hub version of single adapters should have the suffix _fusion
if not fusion_dir:
fusion_dir = load_adapters_as.replace("/adapters/", "") if inference and type(
load_adapters_as) == str else None
load_adapters_as = load_adapters_as.replace("fusion", "adapters") if type(
load_adapters_as) == str else load_adapters_as
for t_id in task_ids:
if type(load_adapters_as) == str and os.path.isdir(load_adapters_as):
self.model.load_adapter(f"{load_adapters_as}/{t_id}/", load_as=t_id)
else:
self.model.load_adapter(load_adapters_as[t_id], load_as=t_id)
self.fusion_mods_dict = dict()
for i, t_id in enumerate(task_ids):
task_fuse = Fuse(*([t_id] + task_ids[:i] + task_ids[i + 1:]))
self.fusion_mods_dict[t_id] = task_fuse
if not inference:
self.model.add_adapter_fusion(task_fuse)
else:
if fusion_dir:
self.model.load_adapter_fusion(f"{fusion_dir}/{t_id}_fusion/")
else:
self.model.load_adapter_fusion(f"{load_adapters_as[t_id]}_fusion")
self.model.train_adapter_fusion(list(self.fusion_mods_dict.values()))
print(self.model.active_adapters)
# self.model.get_input_embeddings().train()
# self.model.train_adapter(adapter_setup=task_ids, train_embeddings=True)
def forward(self, input_ids, attention_mask, task_id):
self.model.base_model.set_active_adapters(self.fusion_mods_dict[task_id])
return self.model(input_ids, attention_mask=attention_mask)
def save_pretrained(self, save_path: str):
# self.model.save_pretrained(save_path)
from pathlib import Path
Path(save_path).mkdir(parents=True, exist_ok=True)
for t_id, t_fuse in self.fusion_mods_dict.items():
self.model.save_adapter_fusion(f'{save_path}/{t_id}_fusion/', t_fuse)
| 4,905 | 44.850467 | 121 | py |
scirepeval | scirepeval-main/bert_pals.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import json
import math
from typing import List, Optional
import os
import six
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import CrossEntropyLoss, MSELoss
from torch.nn.parameter import Parameter
from transformers.models.bert.modeling_bert import BertPreTrainedModel
from transformers.models.bert.configuration_bert import BertConfig
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
class BertPalConfig(BertConfig):
"""Configuration class to store the configuration of a `BertModel`.
"""
def __init__(self, vocab_size, hidden_size=768, num_hidden_layers=12, num_attention_heads=12,
intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1,
max_position_embeddings=512, type_vocab_size=16, initializer_range=0.02, pals=False, mult=False,
top=False, lhuc=False, houlsby=False, bert_lay_top=False, num_tasks=1, extra_dim=None,
hidden_size_aug=204, **kwargs):
"""Constructs BertConfig.
Args:
vocab_size: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
super().__init__(vocab_size, hidden_size, num_hidden_layers, num_attention_heads, intermediate_size, hidden_act,
hidden_dropout_prob, attention_probs_dropout_prob, max_position_embeddings, type_vocab_size,
initializer_range, **kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.hidden_size_aug = hidden_size_aug
self.pals = pals
self.extra_dim = extra_dim
self.houlsby = houlsby
self.mult = mult
self.top = top
self.bert_lay_top = bert_lay_top
self.lhuc = lhuc
self.num_tasks = num_tasks
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertPalConfig(vocab_size=None)
for (key, value) in six.iteritems(json_object):
config.__dict__[key] = value
return config
def to_json_string(self, use_diff: bool = True):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class BERTLayerNorm(nn.Module):
def __init__(self, config, multi_params=None, variance_epsilon=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BERTLayerNorm, self).__init__()
if multi_params is not None:
self.weight = nn.Parameter(torch.ones(config.hidden_size_aug))
self.bias = nn.Parameter(torch.zeros(config.hidden_size_aug))
else:
self.weight = nn.Parameter(torch.ones(config.hidden_size))
self.bias = nn.Parameter(torch.zeros(config.hidden_size))
self.variance_epsilon = variance_epsilon
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class BERTEmbeddings(nn.Module):
def __init__(self, config):
super(BERTEmbeddings, self).__init__()
"""Construct the embedding module from word, position and token_type embeddings.
"""
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BERTLayerNorm(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None):
seq_length = input_ids.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BERTSelfAttention(nn.Module):
def __init__(self, config, multi_params=None):
super(BERTSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
if multi_params is not None:
self.num_attention_heads = multi_params
self.attention_head_size = int(config.hidden_size_aug / self.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
hidden_size = config.hidden_size_aug
else:
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
hidden_size = config.hidden_size
self.query = nn.Linear(hidden_size, self.all_head_size)
self.key = nn.Linear(hidden_size, self.all_head_size)
self.value = nn.Linear(hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
class BERTMultSelfOutput(nn.Module):
def __init__(self, config, multi_params=None):
super(BERTMultSelfOutput, self).__init__()
self.LayerNorm = BERTLayerNorm(config, multi_params)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BERTSelfOutput(nn.Module):
def __init__(self, config, multi_params=None, houlsby=False):
super(BERTSelfOutput, self).__init__()
if houlsby:
multi = BERTLowRank(config)
self.multi_layers = nn.ModuleList([copy.deepcopy(multi) for _ in range(config.num_tasks)])
if multi_params is not None:
self.dense = nn.Linear(config.hidden_size_aug, config.hidden_size_aug)
else:
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BERTLayerNorm(config, multi_params)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.houlsby = houlsby
def forward(self, hidden_states, input_tensor, attention_mask=None, i=0):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
if self.houlsby:
hidden_states = hidden_states + self.multi_layers[i](hidden_states, attention_mask)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BERTAttention(nn.Module):
def __init__(self, config, multi_params=None, houlsby=False):
super(BERTAttention, self).__init__()
self.self = BERTSelfAttention(config, multi_params)
self.output = BERTSelfOutput(config, multi_params, houlsby)
def forward(self, input_tensor, attention_mask, i=0):
self_output = self.self(input_tensor, attention_mask)
attention_output = self.output(self_output, input_tensor, attention_mask, i=i)
return attention_output
class BERTPals(nn.Module):
def __init__(self, config, extra_dim=None):
super(BERTPals, self).__init__()
# Encoder and decoder matrices project down to the smaller dimension
self.aug_dense = nn.Linear(config.hidden_size, config.hidden_size_aug)
self.aug_dense2 = nn.Linear(config.hidden_size_aug, config.hidden_size)
# Attention without the final matrix multiply.
self.attn = BERTSelfAttention(config, 6)
self.config = config
self.hidden_act_fn = gelu
def forward(self, hidden_states, attention_mask=None):
hidden_states_aug = self.aug_dense(hidden_states)
hidden_states_aug = self.attn(hidden_states_aug, attention_mask)
hidden_states = self.aug_dense2(hidden_states_aug)
hidden_states = self.hidden_act_fn(hidden_states)
return hidden_states
class BERTLowRank(nn.Module):
def __init__(self, config, extra_dim=None):
super(BERTLowRank, self).__init__()
# Encoder and decoder matrices project down to the smaller dimension
if config.extra_dim:
self.aug_dense = nn.Linear(config.hidden_size, config.extra_dim)
self.aug_dense2 = nn.Linear(config.extra_dim, config.hidden_size)
else:
self.aug_dense = nn.Linear(config.hidden_size, config.hidden_size_aug)
self.aug_dense2 = nn.Linear(config.hidden_size_aug, config.hidden_size)
self.config = config
self.hidden_act_fn = gelu
def forward(self, hidden_states, attention_mask=None):
hidden_states_aug = self.aug_dense(hidden_states)
hidden_states_aug = self.hidden_act_fn(hidden_states_aug)
hidden_states = self.aug_dense2(hidden_states_aug)
return hidden_states
class BERTIntermediate(nn.Module):
def __init__(self, config):
super(BERTIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
self.config = config
self.intermediate_act_fn = gelu
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BERTLhuc(nn.Module):
def __init__(self, config):
super(BERTLhuc, self).__init__()
self.lhuc = Parameter(torch.zeros(config.hidden_size))
def forward(self, hidden_states):
hidden_states = hidden_states * 2. * nn.functional.sigmoid(self.lhuc)
return hidden_states
class BERTOutput(nn.Module):
def __init__(self, config, houlsby=False):
super(BERTOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BERTLayerNorm(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
if houlsby:
if config.pals:
multi = BERTPals(config)
else:
multi = BERTLowRank(config)
self.multi_layers = nn.ModuleList([copy.deepcopy(multi) for _ in range(config.num_tasks)])
self.houlsby = houlsby
def forward(self, hidden_states, input_tensor, attention_mask=None, i=0):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
if self.houlsby:
hidden_states = hidden_states + self.multi_layers[i](input_tensor, attention_mask)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BERTLayer(nn.Module):
def __init__(self, config, mult=False, houlsby=False):
super(BERTLayer, self).__init__()
self.attention = BERTAttention(config, houlsby=houlsby)
self.intermediate = BERTIntermediate(config)
self.output = BERTOutput(config, houlsby=houlsby)
if config.lhuc:
lhuc = BERTLhuc(config)
self.multi_lhuc = nn.ModuleList([copy.deepcopy(lhuc) for _ in range(config.num_tasks)])
if mult:
if config.pals:
multi = BERTPals(config)
else:
multi = BERTLowRank(config)
self.multi_layers = nn.ModuleList([copy.deepcopy(multi) for _ in range(config.num_tasks)])
self.mult = mult
self.lhuc = config.lhuc
self.houlsby = houlsby
def forward(self, hidden_states, attention_mask, i=0):
attention_output = self.attention(hidden_states, attention_mask, i)
intermediate_output = self.intermediate(attention_output)
if self.lhuc and not self.mult:
layer_output = self.output(intermediate_output, attention_output)
layer_output = self.multi_lhuc[i](layer_output)
elif self.mult:
extra = self.multi_layers[i](hidden_states, attention_mask)
if self.lhuc:
extra = self.multi_lhuc[i](extra)
layer_output = self.output(intermediate_output, attention_output + extra)
elif self.houlsby:
layer_output = self.output(intermediate_output, attention_output, attention_mask, i)
else:
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BERTEncoder(nn.Module):
def __init__(self, config):
super(BERTEncoder, self).__init__()
self.config = config
if config.houlsby:
# Adjust line below to add PALs etc. to different layers. True means add a PAL.
self.multis = [True if i < 999 else False for i in range(config.num_hidden_layers)]
self.layer = nn.ModuleList([BERTLayer(config, houlsby=mult) for mult in self.multis])
elif config.mult:
# Adjust line below to add PALs etc. to different layers. True means add a PAL.
self.multis = [True if i < 999 else False for i in range(config.num_hidden_layers)]
self.layer = nn.ModuleList([BERTLayer(config, mult=mult) for mult in self.multis])
else:
layer = BERTLayer(config)
self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)])
if config.top:
if config.bert_lay_top:
multi = BERTLayer(config)
else:
# Projection matrices and attention for adding to the top.
mult_dense = nn.Linear(config.hidden_size, config.hidden_size_aug)
self.mult_dense = nn.ModuleList([copy.deepcopy(mult_dense) for _ in range(config.num_tasks)])
mult_dense2 = nn.Linear(config.hidden_size_aug, config.hidden_size)
self.mult_dense2 = nn.ModuleList([copy.deepcopy(mult_dense2) for _ in range(config.num_tasks)])
multi = nn.ModuleList([copy.deepcopy(BERTAttention(config, 12)) for _ in range(6)])
self.multi_layers = nn.ModuleList([copy.deepcopy(multi) for _ in range(config.num_tasks)])
self.gelu = gelu
if config.mult and config.pals:
dense = nn.Linear(config.hidden_size, config.hidden_size_aug)
# Shared encoder and decoder across layers
self.mult_aug_dense = nn.ModuleList([copy.deepcopy(dense) for _ in range(config.num_tasks)])
dense2 = nn.Linear(config.hidden_size_aug, config.hidden_size)
self.mult_aug_dense2 = nn.ModuleList([copy.deepcopy(dense2) for _ in range(config.num_tasks)])
for l, layer in enumerate(self.layer):
if self.multis[l]:
for i, lay in enumerate(layer.multi_layers):
lay.aug_dense = self.mult_aug_dense[i]
lay.aug_dense2 = self.mult_aug_dense2[i]
if config.houlsby and config.pals:
dense = nn.Linear(config.hidden_size, config.hidden_size_aug)
# Shared encoder and decoder across layers
self.mult_aug_dense = nn.ModuleList([copy.deepcopy(dense) for _ in range(config.num_tasks)])
dense2 = nn.Linear(config.hidden_size_aug, config.hidden_size)
self.mult_aug_dense2 = nn.ModuleList([copy.deepcopy(dense2) for _ in range(config.num_tasks)])
dense3 = nn.Linear(config.hidden_size, config.hidden_size_aug)
for l, layer in enumerate(self.layer):
if self.multis[l]:
for i, lay in enumerate(layer.output.multi_layers):
lay.aug_dense = self.mult_aug_dense[i]
lay.aug_dense2 = self.mult_aug_dense2[i]
def forward(self, hidden_states, attention_mask, i=0):
all_encoder_layers = []
for layer_module in self.layer:
hidden_states = layer_module(hidden_states, attention_mask, i)
all_encoder_layers.append(hidden_states)
if self.config.top:
if self.config.bert_lay_top:
all_encoder_layers[-1] = self.multi_layers[i](hidden_states, attention_mask)
else:
hidden_states = self.mult_dense[i](hidden_states)
for lay in self.multi_layers[i]:
hidden_states = lay(hidden_states, attention_mask)
all_encoder_layers[-1] = self.mult_dense2[i](hidden_states)
return all_encoder_layers
class BERTPooler(nn.Module):
def __init__(self, config):
super(BERTPooler, self).__init__()
dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
self.pool = False
if self.pool:
self.mult_dense_layers = nn.ModuleList([copy.deepcopy(dense) for _ in range(config.num_tasks)])
else:
self.dense = dense
self.mult = config.mult
self.top = config.top
def forward(self, hidden_states, i=0):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
if (self.mult or self.top) and self.pool:
pooled_output = self.mult_dense_layers[i](first_token_tensor)
else:
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertModel(BertPreTrainedModel):
"""BERT model ("Bidirectional Embedding Representations from a Transformer").
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 2, 0]])
config = modeling.BertConfig(vocab_size=32000, hidden_size=512,
num_hidden_layers=8, num_attention_heads=6, intermediate_size=1024)
model = modeling.BertModel(config=config)
all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config: BertPalConfig):
"""Constructor for BertModel.
Args:
config: `BertConfig` instance.
"""
super(BertModel, self).__init__(config)
self.embeddings = BERTEmbeddings(config)
self.encoder = BERTEncoder(config)
self.pooler = BERTPooler(config)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, i=0):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, from_seq_length]
# So we can broadcast to [batch_size, num_heads, to_seq_length, from_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.float()
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
embedding_output = self.embeddings(input_ids, token_type_ids)
all_encoder_layers = self.encoder(embedding_output, extended_attention_mask, i)
sequence_output = all_encoder_layers[-1]
pooled_output = self.pooler(sequence_output, i)
return all_encoder_layers, pooled_output
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
class BertForMultiTask(nn.Module):
"""BERT model for classification or regression on GLUE tasks (STS-B is treated as a regression task).
This module is composed of the BERT model with a linear layer on top of
the pooled output.
```
"""
def __init__(self, config, tasks):
super(BertForMultiTask, self).__init__()
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.ModuleList([nn.Linear(config.hidden_size, num_labels)
for i, num_labels in enumerate(tasks)])
def init_weights(module):
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=config.initializer_range)
elif isinstance(module, BERTLayerNorm):
module.beta.data.normal_(mean=0.0, std=config.initializer_range)
module.gamma.data.normal_(mean=0.0, std=config.initializer_range)
if isinstance(module, nn.Linear):
if module.bias is not None:
module.bias.data.zero_()
self.apply(init_weights)
def forward(self, input_ids, token_type_ids, attention_mask, task_id, name='cola', labels=None):
_, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, task_id)
pooled_output = self.dropout(pooled_output)
logits = self.classifier[task_id](pooled_output)
if labels is not None and name != 'sts':
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits, labels)
return loss, logits
# STS is a regression task.
elif labels is not None and name == 'sts':
loss_fct = MSELoss()
loss = loss_fct(logits, labels.unsqueeze(1))
return loss, logits
else:
return logits
class BertForSequenceClassification(nn.Module):
"""BERT model for classification.
This module is composed of the BERT model with a linear layer on top of
the pooled output.
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 2, 0]])
config = BertConfig(vocab_size=32000, hidden_size=512,
num_hidden_layers=8, num_attention_heads=6, intermediate_size=1024)
num_labels = 2
model = BertForSequenceClassification(config, num_labels)
logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, num_labels):
super(BertForSequenceClassification, self).__init__()
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, num_labels)
def init_weights(module):
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=config.initializer_range)
elif isinstance(module, BERTLayerNorm):
module.beta.data.normal_(mean=0.0, std=config.initializer_range)
module.gamma.data.normal_(mean=0.0, std=config.initializer_range)
if isinstance(module, nn.Linear):
if module.bias is not None:
module.bias.data.zero_()
self.apply(init_weights)
def forward(self, input_ids, token_type_ids, attention_mask, labels=None):
_, pooled_output = self.bert(input_ids, token_type_ids, attention_mask)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits, labels)
return loss, logits
else:
return logits
class BertForQuestionAnswering(nn.Module):
"""BERT model for Question Answering (span extraction).
This module is composed of the BERT model with a linear layer on top of
the sequence output that computes start_logits and end_logits
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 2, 0]])
config = BertConfig(vocab_size=32000, hidden_size=512,
num_hidden_layers=8, num_attention_heads=6, intermediate_size=1024)
model = BertForQuestionAnswering(config)
start_logits, end_logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertForQuestionAnswering, self).__init__()
self.bert = BertModel(config)
# TODO check with Google if it's normal there is no dropout on the token classifier of SQuAD in the TF version
# self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.qa_outputs = nn.Linear(config.hidden_size, 2)
def init_weights(module):
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=config.initializer_range)
elif isinstance(module, BERTLayerNorm):
module.beta.data.normal_(mean=0.0, std=config.initializer_range)
module.gamma.data.normal_(mean=0.0, std=config.initializer_range)
if isinstance(module, nn.Linear):
module.bias.data.zero_()
self.apply(init_weights)
def forward(self, input_ids, token_type_ids, attention_mask, start_positions=None, end_positions=None):
all_encoder_layers, _ = self.bert(input_ids, token_type_ids, attention_mask)
sequence_output = all_encoder_layers[-1]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension - if not this is a no-op
start_positions = start_positions.squeeze(-1)
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
return total_loss
else:
return start_logits, end_logits
class BertForMultipleChoice(nn.Module):
"""BERT model for multiple choice tasks.
This module is composed of the BERT model with a linear layer on top of
the pooled output.
Params:
`config`: a BertConfig class instance with the configuration to build a new model.
`num_choices`: the number of classes for the classifier. Default = 2.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, num_choices, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length]
with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A`
and type 1 corresponds to a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`labels`: labels for the classification output: torch.LongTensor of shape [batch_size]
with indices selected in [0, ..., num_choices].
Outputs:
if `labels` is not `None`:
Outputs the CrossEntropy classification loss of the output with the labels.
if `labels` is `None`:
Outputs the classification logits of shape [batch_size, num_labels].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[[31, 51, 99], [15, 5, 0]], [[12, 16, 42], [14, 28, 57]]])
input_mask = torch.LongTensor([[[1, 1, 1], [1, 1, 0]],[[1,1,0], [1, 0, 0]]])
token_type_ids = torch.LongTensor([[[0, 0, 1], [0, 1, 0]],[[0, 1, 1], [0, 0, 1]]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
num_choices = 2
model = BertForMultipleChoice(config, num_choices)
logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, num_choices=2):
super(BertForMultipleChoice, self).__init__()
self.num_choices = num_choices
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
def init_weights(module):
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=config.initializer_range)
elif isinstance(module, BERTLayerNorm):
module.beta.data.normal_(mean=0.0, std=config.initializer_range)
module.gamma.data.normal_(mean=0.0, std=config.initializer_range)
if isinstance(module, nn.Linear):
module.bias.data.zero_()
self.apply(init_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):
flat_input_ids = input_ids.view(-1, input_ids.size(-1))
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1))
_, pooled_output = self.bert(flat_input_ids, flat_token_type_ids, flat_attention_mask)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, self.num_choices)
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
return loss
else:
return reshaped_logits
class BertPalsEncoder(torch.nn.Module):
def __init__(self, config: str, task_ids: List[str], checkpoint):
super(BertPalsEncoder, self).__init__()
self.bert_config = BertPalConfig.from_json_file(config) if type(config) == str else config
self.bert_config.num_tasks = len(task_ids)
if type(checkpoint) != str:
self.bert_config.vocab_size = checkpoint.config.vocab_size
self.bert = BertModel(self.bert_config) if type(config) == str else checkpoint
self.task_idx = {task: i for i, task in enumerate(task_ids)}
print(self.task_idx)
def init_weights(module):
if isinstance(module, (torch.nn.Linear, torch.nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.bert_config.initializer_range)
elif isinstance(module, BERTLayerNorm):
module.bias.data.normal_(mean=0.0, std=self.bert_config.initializer_range)
module.weight.data.normal_(mean=0.0, std=self.bert_config.initializer_range)
if isinstance(module, torch.nn.Linear):
if module.bias is not None:
module.bias.data.zero_()
if type(config) == str:
if type(checkpoint) == str:
chk = torch.load(checkpoint, map_location='cpu')
update = {k.replace("bert.", ""): v for k, v in chk.items()}
else:
self.apply(init_weights)
partial = checkpoint.state_dict()
model_dict = self.bert.state_dict()
update = {}
for n, p in model_dict.items():
if 'aug' in n or 'mult' in n:
update[n] = p
if 'pooler.mult' in n and 'bias' in n:
update[n] = partial['pooler.dense.bias']
if 'pooler.mult' in n and 'weight' in n:
update[n] = partial['pooler.dense.weight']
else:
update[n] = partial[n]
self.bert.load_state_dict(update)
def forward(self, input_ids, attention_mask=None, task_id=None):
embedding = self.bert(input_ids, attention_mask=attention_mask, i=self.task_idx[task_id])
return embedding[0][-1]
def resize_token_embeddings(self, new_num_tokens: Optional[int] = None):
return self.bert.resize_token_embeddings(new_num_tokens)
def save_pretrained(self, save_path: str):
os.makedirs(save_path, exist_ok=True)
torch.save(self.bert.state_dict(), f'{save_path}/pytorch_model.bin')
torch.save(self.bert.config.save_pretrained(save_path))
| 40,279 | 45.728538 | 120 | py |
scirepeval | scirepeval-main/evaluation/gpt3_encoder.py | import os
import openai
import torch
from transformers import GPT2TokenizerFast
class GPT3Model:
def __init__(self, embed_model: str):
openai.api_key = os.getenv("OPENAI_API_KEY")
self.embed_model = embed_model
self.tokenizer = GPT2TokenizerFast.from_pretrained("gpt2")
def __call__(self, batch, batch_ids=None):
batch_embed = []
for iptext in batch:
try:
response = openai.Embedding.create(
input=iptext,
model=self.embed_model
)
embeddings = response['data'][0]['embedding']
batch_embed.append(embeddings)
except:
response = openai.Embedding.create(
input=" ".join(iptext.split(" ")[:450]),
model=self.embed_model
)
embeddings = response['data'][0]['embedding']
batch_embed.append(embeddings)
return torch.tensor(batch_embed)
| 1,019 | 31.903226 | 66 | py |
scirepeval | scirepeval-main/evaluation/encoders.py | from typing import Dict, Union, List
from transformers import AutoModel, AutoTokenizer
import os
from bert_pals import BertPalsEncoder, BertPalConfig, BertModel
from adapter_fusion import AdapterEncoder, AdapterFusion
import torch
import logging
logger = logging.getLogger(__name__)
class EncoderFactory:
def __init__(self, base_checkpoint: str = None, adapters_load_from: Union[str, Dict] = None,
fusion_load_from: str = None, all_tasks: list = None):
self.base_checkpoint = f"{base_checkpoint}/model" if os.path.isdir(base_checkpoint) else base_checkpoint
self.all_tasks = all_tasks
self.adapters_load_from = f"{adapters_load_from}/model/adapters" if (type(
adapters_load_from) == str and os.path.isdir(
adapters_load_from)) else adapters_load_from
self.fusion_load_from = f"{fusion_load_from}/model"
def get_encoder(self, variant: str):
if variant == "default":
return AutoModel.from_pretrained(self.base_checkpoint)
elif variant == "pals":
# needs all task names and a local checkpoint path
if os.path.isdir(self.base_checkpoint):
return BertPalsEncoder(config=f"{self.base_checkpoint}/config.json", task_ids=self.all_tasks,
checkpoint=f"{self.base_checkpoint}/pytorch_model.bin")
else:
pals_config = BertPalConfig.from_pretrained(self.base_checkpoint)
pals_model = BertModel.from_pretrained(self.base_checkpoint)
return BertPalsEncoder(config=pals_config, task_ids=self.all_tasks,
checkpoint=pals_model)
elif variant == "adapters":
# needs a base model checkpoint and the adapters to be loaded from local path or dict of (task_id,
# adapter) from adapters hub
return AdapterEncoder(self.base_checkpoint, self.all_tasks, load_as=self.adapters_load_from)
elif variant == "fusion":
# needs a base model and list of adapters/local adapter checkpoint paths to be fused
return AdapterFusion(self.base_checkpoint, self.all_tasks, load_adapters_as=self.adapters_load_from,
fusion_dir=self.fusion_load_from, inference=True)
else:
raise ValueError("Unknown encoder type: {}".format(variant))
class Model:
def __init__(self, variant: str = "default", base_checkpoint: str = None,
adapters_load_from: Union[str, Dict] = None, fusion_load_from: str = None,
use_ctrl_codes: bool = False, task_id: Union[str, Dict] = None,
all_tasks: list = None, hidden_dim: int = 768, max_len: int = 512, use_fp16=False):
self.variant = variant
self.encoder = EncoderFactory(base_checkpoint, adapters_load_from, fusion_load_from, all_tasks).get_encoder(
variant)
if torch.cuda.is_available():
self.encoder.to('cuda')
self.encoder.eval()
tokenizer_checkpoint = f"{base_checkpoint}/tokenizer" if os.path.isdir(base_checkpoint) else base_checkpoint
self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_checkpoint)
self.use_ctrl_codes = use_ctrl_codes
self.reqd_token_idx = 0 if not use_ctrl_codes else 1
self._task_id = task_id
if self._task_id:
if use_ctrl_codes:
logger.info(f"Control code used: {self._task_id}")
elif variant != "default":
logger.info(f"Task id used: {self._task_id}")
self.hidden_dim = hidden_dim
self.max_length = max_len
self.use_fp16 = use_fp16
@property
def task_id(self):
return self._task_id
@task_id.setter
def task_id(self, value):
if self.use_ctrl_codes:
logger.info(f"Control code used: {value}")
elif self.variant != "default":
logger.info(f"Task id used: {value}")
self._task_id = value
def __call__(self, batch, batch_ids=None):
def append_ctrl_code(batch, batch_ids):
if type(self._task_id) == dict:
batch = [f"{self.task_id['query']} {text}" if bid[1] == "q" else f"{self.task_id['candidates']} {text}"
for text, bid in zip(batch, batch_ids)]
else:
batch = [f"{self.task_id} {text}" for text in batch]
return batch
batch = [batch] if type(batch) == str else batch
batch_ids = [] if not batch_ids else batch_ids
if self.use_ctrl_codes:
batch = append_ctrl_code(batch, batch_ids)
input_ids = self.tokenizer(batch, padding=True, truncation=True,
return_tensors="pt", return_token_type_ids=False, max_length=self.max_length)
input_ids.to('cuda')
if self.variant == "default":
output = self.encoder(**input_ids)
elif type(self._task_id) != dict:
output = self.encoder(task_id=self._task_id, **input_ids)
else:
x = input_ids["input_ids"]
output = torch.zeros(x.shape[0], x.shape[1], self.hidden_dim).to("cuda")
q_idx = torch.tensor([i for i, b in enumerate(batch_ids) if b[1] == "q"])
c_idx = torch.tensor([i for i, b in enumerate(batch_ids) if b[1] == "c"])
if not q_idx.shape[0]:
output = self.encoder(task_id=self._task_id["candidates"], **input_ids)
elif not c_idx.shape[0]:
output = self.encoder(task_id=self._task_id["query"], **input_ids)
else:
for i, v in enumerate(sorted(self._task_id.values())):
curr_input_idx = q_idx if v == "[QRY]" else c_idx
curr_input = x[curr_input_idx]
curr_output = self.encoder(task_id=v, input_ids=curr_input,
attention_mask=input_ids["attention_mask"][curr_input_idx])
try:
output[curr_input_idx] = curr_output # adapters
except:
output[curr_input_idx] = curr_output.last_hidden_state # pals
try:
embedding = output.last_hidden_state[:, self.reqd_token_idx, :] # cls token
except:
embedding = output[:, self.reqd_token_idx, :] # cls token
return embedding.half() if self.use_fp16 else embedding
| 6,499 | 48.618321 | 119 | py |
scirepeval | scirepeval-main/training/pl_training.py | import json
import sys
# setting path
sys.path.append('../')
import argparse
from typing import Dict, Optional, Any
import datasets
import pytorch_lightning as pl
import torch
import torch.nn
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.loggers import TensorBoardLogger
from pytorch_lightning.utilities.distributed import rank_zero_only
from pytorch_lightning.utilities.distributed import sync_ddp_if_available
from pytorch_lightning.utilities.types import TRAIN_DATALOADERS, EVAL_DATALOADERS, STEP_OUTPUT
from torch.distributed import ReduceOp
from torch.utils.data import DataLoader
from transformers import AdamW, get_linear_schedule_with_warmup
from transformers import AutoTokenizer, AutoModel, AutoConfig
from adapter_fusion import AdapterFactory
from bert_pals import BertPalsEncoder
from mtl_datasets import ClassificationDataset, multi_collate, MultiLabelClassificationDataset, IRDataset, \
CustomChainDataset, TripletDataset, RegressionDataset
from schedulers import InverseSquareRootSchedule, InverseSquareRootScheduleConfig
from strategies import BatchingStrategy
from tasks import TaskFamily, load_tasks
pl.seed_everything(42, workers=True)
def init_weights(modules):
for module in modules:
module.linear.weight.data.normal_(mean=0.0, std=0.02)
if module.linear.bias is not None:
module.linear.bias.data.zero_()
pl_to_split_map = {"fit": "train", "validate": "dev", "test": "test", "predict": "test"}
class SciRepTrain(pl.LightningModule):
def __init__(self, batch_size: int, init_lr: float, peak_lr: float, tokenizer: str, model: str, warmup_steps: int,
log_dir: str,
use_ctrl_tokens=False,
task_dict: Dict[str, TaskFamily] = None,
pals_cfg: str = None, adapter_type: str = None, max_len: int = 512, load_adapters_as=None):
super().__init__()
self.task_dict = load_tasks() if not task_dict else task_dict
print(self.task_dict.keys())
self.heads = torch.nn.ModuleDict(
{t.name: t.head for t in self.task_dict.values() if t.head}
)
self.init_loss = None
self.task_idx = {t: i for i, t in enumerate(self.task_dict)}
self.loss_wt = torch.ones(len(self.task_dict)).float()
init_weights(self.heads.values())
self.warmup_steps = warmup_steps
self.multi_train = None
self.multi_test = None
self.multi_val = None
self.pals = pals_cfg is not None
self.adapters = adapter_type is not None
self.use_ctrl_tokens = use_ctrl_tokens
spl_ctrl_tokens = set()
for t in self.task_dict.values():
if type(t.ctrl_token) == str:
spl_ctrl_tokens.add(t.ctrl_token)
else:
spl_ctrl_tokens.update(t.ctrl_token.values())
spl_ctrl_tokens = sorted(list(spl_ctrl_tokens))
task_ids = spl_ctrl_tokens
self.tokenizer = AutoTokenizer.from_pretrained(tokenizer)
if self.adapters:
adapters_dir = f'{log_dir}/model/adapters/' if not load_adapters_as else load_adapters_as
try:
adapters_dir = json.loads(adapters_dir)
except:
pass
self.encoder = AdapterFactory.get_adapter(model, task_ids,
adapter_type == "fusion", adapters_dir)
else:
self.encoder = AutoModel.from_pretrained(model)
if self.pals:
self.encoder = BertPalsEncoder(f"bert_pals_config/{pals_cfg}", task_ids, self.encoder)
if self.use_ctrl_tokens:
print("Using Control Tokens", spl_ctrl_tokens)
special_tokens_dict = {'additional_special_tokens': spl_ctrl_tokens}
num_added_toks = self.tokenizer.add_special_tokens(special_tokens_dict)
self.encoder.resize_token_embeddings(len(self.tokenizer))
self.batch_size = batch_size
self.init_lr = init_lr
self.peak_lr = peak_lr
self.max_len = max_len
self.save_hyperparameters(ignore=["task_dict"])
def forward(self, input_ids, attention_mask=None, token_idx=0, task_id=None):
if not self.pals:
embedding = self.encoder(input_ids, attention_mask=attention_mask) if not self.adapters else self.encoder(
input_ids,
attention_mask=attention_mask,
task_id=task_id)
return embedding.last_hidden_state[:, token_idx, :]
else:
embedding = self.encoder(input_ids, attention_mask=attention_mask, task_id=task_id)
return embedding[:, token_idx, :]
def configure_optimizers(self):
"""Prepare optimizer and schedule (linear warmup and decay)"""
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in self.named_parameters() if
p.requires_grad and not any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
{
"params": [p for n, p in self.named_parameters() if
p.requires_grad and any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
}
]
optimizer = AdamW(
optimizer_grouped_parameters, lr=self.init_lr, eps=1e-8
)
self.opt = optimizer
if self.pals or self.adapters:
scheduler = get_linear_schedule_with_warmup(optimizer, self.warmup_steps, 77500)
else:
scheduler_config = InverseSquareRootScheduleConfig(warmup_updates=self.warmup_steps,
warmup_init_lr=self.init_lr,
lr=self.peak_lr)
scheduler = InverseSquareRootSchedule(scheduler_config, optimizer)
return {
"optimizer": optimizer,
"lr_scheduler": {
"scheduler": scheduler,
"interval": "step",
"frequency": 1}
}
def calc_loss(self, train_batch, batch_idx):
losses, loss_per_task = [], torch.zeros(len(self.task_dict)).cuda()
scl = torch.tensor(0.0)
for name, batch in train_batch.items():
task = self.task_dict[name]
idx = 0 if not self.use_ctrl_tokens else 1
task_id = task.ctrl_token
if task.type not in set(["classification", "regression"]):
query, pos, neg = batch[0][0], batch[0][1], batch[0][2]
query_ctrl = cand_ctrl = task_id
if type(task_id) == dict:
query_ctrl = task_id["query"]
cand_ctrl = task_id["candidates"]
query_emb, pos_emb, neg_emb = self(query['input_ids'], query['attention_mask'], idx, query_ctrl), self(
pos['input_ids'], pos['attention_mask'], idx, cand_ctrl), self(neg['input_ids'],
neg['attention_mask'], idx,
cand_ctrl)
curr_loss = task.loss(query_emb, pos_emb, neg_emb)
else:
x, y = batch[0], batch[1]
encoding = self(x['input_ids'], x['attention_mask'], idx, task_id)
logits = self.heads[name](encoding)
if task.type == "regression":
logits = logits.squeeze()
curr_loss = task.loss(logits, y)
if task.multi_label:
curr_loss = torch.mean(curr_loss, dim=1)
elif task.contrastive_loss:
scl = task.contrastive_loss(encoding, y, self.heads[name].num_labels)
curr_loss = 0.1 * curr_loss + 0.9 * scl
loss_per_task[self.task_idx[name]] = torch.mean(curr_loss)
return loss_per_task
def training_step(self, train_batch, batch_idx):
loss_per_task = self.calc_loss(train_batch, batch_idx)
loss = torch.sum(loss_per_task)
self.log("train_loss", loss, prog_bar=True, on_step=True, on_epoch=True, batch_size=self.batch_size)
self.log("lr", self.lr_schedulers().get_last_lr()[-1], on_step=True, on_epoch=False, prog_bar=True, logger=True)
return {"loss": loss}
def validation_step(self, train_batch, batch_idx) -> Optional[STEP_OUTPUT]:
loss_per_task = self.calc_loss(train_batch, batch_idx)
# loss_per_task = torch.mul(self.loss_wt.cuda(), loss_per_task)
loss = torch.sum(loss_per_task)
dist_loss_per_task = loss_per_task.clone().data
dist_loss_per_task = sync_ddp_if_available(dist_loss_per_task, reduce_op=ReduceOp.SUM)
for task in self.task_dict:
self.log(f"val_loss_{task}", dist_loss_per_task[self.task_idx[task]], on_step=True, on_epoch=True,
prog_bar=False,
batch_size=self.batch_size, rank_zero_only=True)
self.log("val_loss", loss, on_step=True, on_epoch=False, prog_bar=True)
self.log("avg_val_loss", loss, on_epoch=True, prog_bar=True, sync_dist=True, batch_size=self.batch_size)
return {"val_loss": loss}
def load_data(self, split) -> CustomChainDataset:
hf_split = "validation" if split == "dev" else "train"
dataset_list = []
task_dataset_map = {"classification": ClassificationDataset, "regression": RegressionDataset, "ir": IRDataset}
for t_name, task in self.task_dict.items():
data_file = {hf_split: task.data_files[split]} if task.data_files else None
dataset_name = (task.dataset, hf_split)
data_src = data_file if data_file else dataset_name
op_token = task.ctrl_token if self.use_ctrl_tokens else None
if type(data_src) == dict:
data = datasets.load_dataset("json", data_files=data_src, streaming=True)[
next(iter(data_src.keys()))]
else:
data = datasets.load_dataset(**data_src[0], split=data_src[1], streaming=True)
kwargs = {"data": data, "ctrl_token": op_token, "max_len": self.max_len, "task_name": t_name,
"tokenizer": self.tokenizer, "fields": task.input_fields,
"sample_size": task.sample_size[split] if type(task.sample_size) == dict else task.sample_size}
if task.type == "classification":
kwargs.update({"label_field": task.labels_field, "labels": task.labels})
elif task.type == "regression":
kwargs.update({"label_field": task.labels_field})
if task.multi_label:
dataset_list.append(MultiLabelClassificationDataset(**kwargs))
else:
dataset_list.append(task_dataset_map.get(task.type, TripletDataset)(**kwargs))
multi_dataset = CustomChainDataset(dataset_list, batch_size=self.batch_size,
device_rank=self.trainer.global_rank, num_devices=self.trainer.world_size,
batching_strategy=BatchingStrategy.MIXED_PROPORTIONAL)
if split == "train":
self.multi_train = multi_dataset
elif split == "dev":
self.multi_val = multi_dataset
def setup(self, stage: Optional[str] = None) -> None:
self.load_data("train")
def train_dataloader(self) -> TRAIN_DATALOADERS:
return DataLoader(self.multi_train, batch_size=self.batch_size, collate_fn=multi_collate, num_workers=4,
pin_memory=True)
def val_dataloader(self) -> EVAL_DATALOADERS:
self.load_data("dev")
return DataLoader(self.multi_val, batch_size=self.batch_size, collate_fn=multi_collate, num_workers=2)
@rank_zero_only
def on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
try:
logger = self.logger
log_dir = f'{logger.save_dir}/{logger.name}/{logger.version}/checkpoints'
self.tokenizer.save_pretrained(f'{log_dir}/tokenizer/')
self.tokenizer.save_vocabulary(f'{log_dir}/tokenizer/')
self.encoder.save_pretrained(f'{log_dir}/model')
except:
print("Exception encountered while saving, try agin from checkpoint")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--tasks-config', help='path to the task config file', default="sample_data/tasks_config.json")
parser.add_argument('model', help='HuggingFace model to be used')
parser.add_argument('--tokenizer', help='HuggingFace tokenizer to be used (same as model name if not supplied)',
default=None)
parser.add_argument('--output', help='dir to save checkpoints and finetuned model', default="./lightning_logs/")
parser.add_argument('version', help='experiment version')
parser.add_argument('--pals-config', default=None, help='path to config file for PALS architecture')
parser.add_argument('--adapter-type', default=None, help='type of adapter architecture (single/fusion)')
parser.add_argument('--adapters-chkpt', default=None,
help='Adapters to be loaded either from a directory path or a dictionary of pretrained huggingface adapters with id')
parser.add_argument('--batch-size', type=int, default=16, help='batch size')
parser.add_argument('--lr', type=float, default=1e-4, help='initial learning rate')
parser.add_argument('--peak-lr', type=float, default=5e-5, help='initial learning rate')
parser.add_argument('--warmup', type=int, default=700, help='number of warmup steps')
parser.add_argument('--epochs', type=int, default=2, help='number of epochs')
parser.add_argument('--grad-accum', type=int, default=8, help='grad accumulation steps')
parser.add_argument('--ctrl-tokens', action='store_true', default=False, help='use control codes for tasks')
parser.add_argument('--gpu', type=int, default=None, help='number of gpus')
parser.add_argument('--max-len', type=int, default=512, help='max sequence length')
parser.add_argument('--val-check_interval', type=float, default=1.0, help='validation loop interval')
parser.add_argument('--checkpoint', default=None, help='resume from checkpoint path')
args = parser.parse_args()
mconfig = AutoConfig.from_pretrained(args.model)
tasks_dict = load_tasks(args.tasks_config, mconfig.hidden_size)
log_dir = args.output
logger = TensorBoardLogger(
save_dir=log_dir,
version=args.version,
name='full_run',
)
# second part of the path shouldn't be f-string
filepath = f'{log_dir}/{logger.name}/{logger.version}/checkpoints/'
checkpoint_callback = ModelCheckpoint(
dirpath=filepath,
filename='ep-{epoch}_avg_val_loss-{avg_val_loss:.3f}',
save_top_k=4,
verbose=True,
monitor='avg_val_loss', # monitors metrics logged by self.log.
mode='min'
)
model = SciRepTrain(batch_size=args.batch_size, init_lr=args.lr,
peak_lr=args.peak_lr,
tokenizer=args.tokenizer if args.tokenizer else args.model,
model=args.model,
warmup_steps=args.warmup,
use_ctrl_tokens=args.ctrl_tokens, task_dict=tasks_dict, pals_cfg=args.pals_config,
adapter_type=args.adapter_type, log_dir=filepath, max_len=args.max_len,
load_adapters_as=args.adapters_chkpt)
hparams = {"gpus": args.gpu, "val_check_interval": args.val_check_interval, "num_sanity_val_steps": 4,
"max_epochs": args.epochs,
"accumulate_grad_batches": args.grad_accum, "resume_from_checkpoint": args.checkpoint}
trainer = pl.Trainer(logger=logger,
strategy="ddp" if hparams["gpus"] else None,
enable_checkpointing=True,
callbacks=[checkpoint_callback],
precision=16,
**hparams)
logger.log_hyperparams(hparams)
logger.log_hyperparams({"tasks": {k: str(v) for k, v in tasks_dict.items()}})
trainer.fit(model)
| 16,448 | 49.612308 | 141 | py |
scirepeval | scirepeval-main/training/strategies.py | import enum
from abc import ABC, abstractmethod
from typing import Iterable
from torch.utils.data import Dataset
import random
class BatchWrapper(ABC):
@abstractmethod
def get_batch_iter(self, datasets: Iterable[Dataset], batch_size: int):
pass
class SequentialBatching(BatchWrapper):
def get_batch_iter(self, datasets: Iterable[Dataset], batch_size: int):
for d in datasets:
for x in d:
yield x
class MixedBatching(BatchWrapper):
def get_batch_iter(self, datasets: Iterable[Dataset], batch_size: int):
iters = list(map(iter, datasets))
while iters:
it = random.choice(iters)
try:
yield next(it)
except StopIteration:
iters.remove(it)
class ProportionalBatching(BatchWrapper):
def get_batch_iter(self, datasets: Iterable[Dataset], batch_size: int):
iters = list(map(iter, datasets))
di = 0
while iters:
it = iters[di]
i = 0
try:
rng = round(batch_size / len(iters))
while i < rng:
x = next(it)
i += 1
yield x
else:
di = (di + 1) % len(iters)
except StopIteration:
iters.remove(it)
if di >= len(iters):
di = 0
class TaskBasedBatching(BatchWrapper):
def get_batch_iter(self, datasets: Iterable[Dataset], batch_size: int):
iters = list(map(iter, datasets))
di = 0
while iters:
it = iters[di]
i = 0
try:
while i < batch_size:
x = next(it)
i += 1
yield x
else:
di = (di + 1) % len(iters)
except StopIteration:
iters.remove(it)
if di >= len(iters):
di = 0
class BatchingStrategy(enum.Enum):
SEQUENTIAL = SequentialBatching()
MIXED_RANDOM = MixedBatching()
MIXED_PROPORTIONAL = ProportionalBatching()
TASK_PER_BATCH = TaskBasedBatching() | 2,200 | 26.860759 | 75 | py |
scirepeval | scirepeval-main/training/mtl_datasets.py | import decimal
from typing import Iterator, Tuple, List, Dict, Union, Any, Iterable
import torch
from torch.utils.data import IterableDataset, DataLoader, ChainDataset, get_worker_info
from torch.utils.data.dataset import T_co, Dataset
from transformers import PreTrainedTokenizer, BatchEncoding, AutoTokenizer
import datasets
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MultiLabelBinarizer
from skmultilearn.model_selection import IterativeStratification
from abc import ABC, abstractmethod
import itertools
from torch.utils.data._utils.collate import default_collate
from collections import defaultdict
from strategies import BatchingStrategy
import random
datasets.logging.set_verbosity_error()
class AbstractMultiTaskDataset(ABC, IterableDataset):
def __init__(self, task_name: str, data: datasets.Dataset, tokenizer: PreTrainedTokenizer,
fields: List[str],
sample_size, ctrl_token: str, max_len: int):
self.task_name = task_name
self.data = data
self.tokenizer = tokenizer
self.fields = fields
self.sample_size = sample_size
self.ctrl_token = ctrl_token
self.max_len = max_len
self._effective_sample_size = sample_size
def sub_sample(self, json_parse: Iterator[Dict]) -> Iterator:
curr_len = 0
try:
for _ in range(self.effective_sample_size):
curr_len += 1
yield next(json_parse)
except StopIteration:
print(
f"Reqd sample size {self.effective_sample_size} greater than {self.task_name} dataset size {curr_len}, using complete dataset")
@abstractmethod
def preprocess(self, line: Dict[str, str]) -> Union[
Tuple[str, BatchEncoding, torch.Tensor], List[Tuple[str, List[BatchEncoding]]]]:
pass
def postprocess_iter(self, curr_iter):
return curr_iter
@property
def effective_sample_size(self):
return self._effective_sample_size
@effective_sample_size.setter
def effective_sample_size(self, val):
self._effective_sample_size = val
def __iter__(self) -> Iterator[T_co]:
json_parse = iter(self.data)
if self.sample_size == -1:
map_itr = map(self.preprocess, json_parse)
else:
map_itr = map(self.preprocess, self.sub_sample(json_parse))
return self.postprocess_iter(map_itr)
def tokenized_input(self, input_data: Union[Dict[str, str], str], ctrl_token_key: str = None) -> BatchEncoding:
text = []
if type(input_data) == dict:
for field in self.fields:
if input_data[field]:
if type(input_data[field]) in set([decimal.Decimal, float]):
input_data[field] = str(int(input_data[field]))
text.append(input_data[field])
text = (f" {self.tokenizer.sep_token} ".join(text)).strip()
else:
text = input_data
if self.ctrl_token:
ctrl_token = self.ctrl_token if not ctrl_token_key else self.ctrl_token[ctrl_token_key]
text = ctrl_token + " " + text
input_ids = self.tokenizer(text, padding="max_length", truncation=True, return_tensors="pt",
max_length=self.max_len)
# if self.ctrl_token:
# input_ids["input_ids"] = input_ids["input_ids"][:,1:]
# input_ids["attention_mask"] = input_ids["attention_mask"][:,1:]
return {"input_ids": input_ids["input_ids"].flatten(), "attention_mask": input_ids["attention_mask"].flatten()}
class ClassificationDataset(AbstractMultiTaskDataset):
def __init__(self, task_name: str, data: datasets.Dataset, tokenizer: PreTrainedTokenizer,
fields: List[str],
label_field: str, labels: Dict[str, int], sample_size=-1, ctrl_token: str = None, max_len: int = 512):
super().__init__(task_name, data, tokenizer, fields, sample_size, ctrl_token, max_len)
self.labels = labels
self.label_field = label_field
def label_transform(self, label_raw: str) -> Union[int, np.ndarray]:
return self.labels[label_raw]
def preprocess(self, line: Dict[str, str]) -> Tuple[str, BatchEncoding, int]:
# Splits the line into text and label and applies preprocessing to the text
label = line[self.label_field]
input_ids = self.tokenized_input(line)
return self.task_name, input_ids, self.label_transform(label)
def sub_sample(self, json_parse: Iterator[Dict]) -> Iterator:
# json_itr_list = itertools.tee(json_parse, 2)
# json_parse = json_itr_list[0]
X, y = zip(*[(d, self.labels[d[self.label_field]]) for d in json_parse])
X, y = np.array(X), np.array(y)
if X.shape[0] < self.effective_sample_size:
print(
f"Reqd sample size {self.effective_sample_size} greater than {self.task_name} dataset size {X.shape[0]}, using complete dataset")
X_sub = X
else:
X_sub, _, _, _ = train_test_split(X, y, train_size=self.effective_sample_size, random_state=42, stratify=y)
for d in X_sub:
yield d
class MultiLabelClassificationDataset(ClassificationDataset):
def __init__(self, task_name: str, data: datasets.Dataset, tokenizer: PreTrainedTokenizer,
fields: List[str],
label_field: str, labels: Dict[str, int], sample_size=-1, ctrl_token: str = None, max_len: int = 512):
super().__init__(task_name, data, tokenizer, fields, label_field, labels, sample_size, ctrl_token, max_len)
self.labels = dict(sorted(labels.items()))
self.mlb = MultiLabelBinarizer()
self.mlb.fit([list(self.labels.keys())])
def label_transform(self, label_raw: List[str]) -> Union[int, np.ndarray]:
return self.mlb.transform([label_raw]).flatten().astype(float)
def sub_sample(self, json_parse: Iterator[Dict]) -> Iterator:
X, y = zip(*[(d, tuple(d[self.label_field])) for d in json_parse])
X, y = np.array(X), self.mlb.transform(y)
if X.shape[0] < self.effective_sample_size:
print(
f"Reqd sample size {self.effective_sample_size} greater than {self.task_name} dataset size {X.shape[0]}, using complete dataset")
X_sub = X
else:
sub_sample_ratio = self.effective_sample_size / X.shape[0]
stratifier = IterativeStratification(n_splits=2, order=1,
sample_distribution_per_fold=[sub_sample_ratio,
1 - sub_sample_ratio, ])
_, indices = next(stratifier.split(X, y))
X_sub = X[indices]
for d in X_sub:
yield d
class IRDataset(AbstractMultiTaskDataset):
def __init__(self, task_name: str, data: datasets.Dataset, tokenizer: PreTrainedTokenizer,
fields: List[str],
sample_size=-1, ctrl_token: str = None, max_len: int = 512):
super().__init__(task_name, data, tokenizer, fields, sample_size, ctrl_token, max_len)
self.effective_sample_size //= 5
def preprocess(self, line: Dict[str, str]) -> List[Tuple[str, List[BatchEncoding]]]:
# Splits the line into text and label and applies preprocessing to the text
query, candidates = line["query"], line["candidates"]
pos_candidates, neg_candidates = [c for c in candidates if c["score"]], [c for c in candidates if
not c["score"]]
num_trips = min(5, len(neg_candidates))
new_pos_candidates = pos_candidates.copy()
if pos_candidates:
pos_candidates = itertools.cycle(pos_candidates)
while len(new_pos_candidates) < num_trips:
new_pos_candidates.append(next(pos_candidates))
query_ctrl_key, cand_ctrl_key = None, None
if type(self.ctrl_token) == dict:
query_ctrl_key = "query"
cand_ctrl_key = "candidates"
tokenized_query = self.tokenized_input(query, query_ctrl_key)
for pos in new_pos_candidates[:num_trips]:
neg = neg_candidates.pop()
tokenized_pos = self.tokenized_input(pos, cand_ctrl_key)
tokenized_neg = self.tokenized_input(neg, cand_ctrl_key)
yield (self.task_name, [tokenized_query, tokenized_pos, tokenized_neg])
def postprocess_iter(self, curr_iter):
# chained_iter = itertools.chain(*curr_iter)
batched_list = []
try:
while True:
while len(batched_list) < 1000:
batched_list += next(curr_iter)
random.shuffle(batched_list)
for x in batched_list:
yield x
batched_list.clear()
except StopIteration:
random.shuffle(batched_list)
for x in batched_list:
yield x
class TripletDataset(AbstractMultiTaskDataset):
def __init__(self, task_name: str, data: datasets.Dataset, tokenizer: PreTrainedTokenizer,
fields: List[str],
sample_size=-1, ctrl_token: str = None, max_len: int = 512):
super().__init__(task_name, data, tokenizer, fields, sample_size, ctrl_token, max_len)
def preprocess(self, line: Dict[str, str]) -> Union[
Tuple[str, BatchEncoding, torch.Tensor], List[Tuple[str, List[BatchEncoding]]]]:
triplet = []
for key in ("query", "pos", "neg"):
triplet.append(self.tokenized_input(line[key]))
return self.task_name, triplet
class CustomChainDataset(ChainDataset):
def __init__(self, datasets: Iterable[Dataset], batch_size, device_rank=0, num_devices=1,
batching_strategy=BatchingStrategy.SEQUENTIAL):
super().__init__(datasets)
self.batch_size = batch_size
self.batching = batching_strategy
self.device_rank = device_rank
self.num_devices = num_devices
self.effective_batch_size = batch_size * num_devices
def iter_slice(self, curr_iter, worker_info):
curr_batch, idx = dict(), 0
try:
while True:
for _ in range(self.effective_batch_size):
curr_batch[idx] = next(curr_iter)
idx += 1
for i, x in curr_batch.items():
if (i // self.batch_size) % self.num_devices == self.device_rank:
if (i // self.effective_batch_size) % worker_info.num_workers == worker_info.id:
yield x
curr_batch.clear()
except StopIteration:
curr_batch.clear()
def __iter__(self):
batch_itr = self.batching.value.get_batch_iter(self.datasets, self.effective_batch_size)
worker_info = get_worker_info()
if worker_info:
batch_itr = self.iter_slice(batch_itr, worker_info)
return batch_itr
class RegressionDataset(AbstractMultiTaskDataset):
def __init__(self, task_name: str, data: datasets.Dataset, tokenizer: PreTrainedTokenizer,
fields: List[str],
label_field: str, sample_size=-1, ctrl_token: str = None, max_len: int = 512):
super().__init__(task_name, data, tokenizer, fields, sample_size, ctrl_token, max_len)
self.label_field = label_field
def preprocess(self, line: Dict[str, str]) -> Tuple[str, Dict[str, BatchEncoding], Union[int, float]]:
# Splits the line into text and label and applies preprocessing to the text
label = np.float32(line[self.label_field])
input_ids = self.tokenized_input(line)
return self.task_name, input_ids, label
def multi_collate(batch: List[Any]) -> Dict[str, List[Any]]:
task_sub_batch = defaultdict(list)
for b in batch:
task_sub_batch[b[0]].append(b[1:])
return {task: default_collate(sub_batch) for task, sub_batch in task_sub_batch.items()}
if __name__ == '__main__':
tokenizer = AutoTokenizer.from_pretrained("allenai/specter")
tokenizer.add_special_tokens({'additional_special_tokens': ["[CLF]"]})
with open("sample_data/mesh_descriptors.txt", "r") as f:
labels = f.readlines()
labels = {l.strip(): i for i, l in enumerate(labels)}
cls_dataset = ClassificationDataset(task_name="mesh", data=
datasets.load_dataset("json", data_files="../../scidocs/data/mesh_plus/train.json", streaming=True)["train"],
tokenizer=tokenizer,
fields=["title", "abstract"],
label_field="descriptor", labels=labels, sample_size=400000)
trip_dataset = IRDataset(task_name="s2and", data=
datasets.load_dataset("json", data_files="sample_data/s2and_small.json", streaming=True)["train"],
tokenizer=tokenizer,
fields=["title", "abstract"], sample_size=400000)
specter_dataset = TripletDataset(task_name="specter", data=
datasets.load_dataset("json", data_files="../../scidocs/data/specter_triplets/train.json", streaming=True)["train"],
tokenizer=tokenizer,
fields=["title", "abstract"], sample_size=400000)
search_dataset = IRDataset(task_name="search", data=
datasets.load_dataset("json", data_files="sample_data/search_small.jsonl", streaming=True)["train"],
tokenizer=tokenizer,
fields=["title", "abstract", "venue", "year"], sample_size=100)
with open("sample_data/fos_labels.txt", "r") as f:
mlc_labels = f.readlines()
mlc_labels = {l.strip(): i for i, l in enumerate(mlc_labels)}
ml_cls_dataset = MultiLabelClassificationDataset(task_name="fos", data_src="sample_data/fos_small.json",
tokenizer=tokenizer,
fields=["title", "abstract"],
label_field="labels_text", labels=mlc_labels, sample_size=100,
ctrl_token="[CLF]")
batch_size = 16
multi_dataset = CustomChainDataset([ml_cls_dataset], batch_size=batch_size,
batching_strategy=BatchingStrategy.MIXED_PROPORTIONAL)
dataloader = DataLoader(multi_dataset, batch_size=batch_size, collate_fn=multi_collate, num_workers=4)
for i, data in enumerate(dataloader):
print(i)
for task, batch in data.items():
d = batch[-1][-1] if task in ("s2and", "specter", "search") else batch[-1]
print(task, d.shape[0])
print(batch)
| 15,001 | 47.083333 | 145 | py |
scirepeval | scirepeval-main/training/schedulers.py | from dataclasses import dataclass, field
import torch.optim.optimizer
from fairseq.dataclass import FairseqDataclass
from omegaconf import II
from torch.optim.optimizer import Optimizer
"""Code picked up from fairseq implementation and adapted to suit pytorch lightning"""
@dataclass
class InverseSquareRootScheduleConfig(FairseqDataclass):
warmup_updates: int = field(
default=4000,
metadata={"help": "warmup the learning rate linearly for the first N updates"},
)
warmup_init_lr: float = field(
default=-1,
metadata={
"help": "initial learning rate during warmup phase; default is args.lr"
},
)
lr: float = II("params.optimization.lr")
class InverseSquareRootSchedule(torch.optim.lr_scheduler._LRScheduler):
"""Decay the LR based on the inverse square root of the update number.
We also support a warmup phase where we linearly increase the learning rate
from some initial learning rate (``--warmup-init-lr``) until the configured
learning rate (``--lr``). Thereafter we decay proportional to the number of
updates, with a decay factor set to align with the configured learning rate.
During warmup::
lrs = torch.linspace(args.warmup_init_lr, args.lr, args.warmup_updates)
lr = lrs[update_num]
After warmup::
decay_factor = args.lr * sqrt(args.warmup_updates)
lr = decay_factor / sqrt(update_num)
"""
def __init__(self, config: InverseSquareRootScheduleConfig, optimizer: Optimizer):
self.config = config
warmup_end_lr = config.lr
if config.warmup_init_lr < 0:
config.warmup_init_lr = 0 if config.warmup_updates > 0 else warmup_end_lr
# linearly warmup for the first args.warmup_updates
self.lr_step = (warmup_end_lr - config.warmup_init_lr) / config.warmup_updates
# then, decay prop. to the inverse square root of the update number
self.decay_factor = warmup_end_lr * config.warmup_updates ** 0.5
# initial learning rate
self.lr = config.warmup_init_lr
super(InverseSquareRootSchedule, self).__init__(optimizer)
def get_lr(self):
"""Update the learning rate after each update."""
num_updates = self._step_count
# self.optimizer.state[self.optimizer.param_groups[0]["params"][-1]]["step"]
lr_list = []
for g in self.optimizer.param_groups:
if num_updates < self.config.warmup_updates:
lr_list.append(self.config.warmup_init_lr + num_updates * self.lr_step)
else:
lr_list.append(self.decay_factor * num_updates ** -0.5)
return lr_list | 2,683 | 36.802817 | 87 | py |
scirepeval | scirepeval-main/training/tasks.py | from typing import Dict
from torch import nn
import torch
import torch.nn.functional as F
import json
class TaskFamily:
def __init__(self, name, loss, type, dataset=None, data_files=None, multi_label=False, input_fields=None,
labels_field=None, labels=None, ctrl_token=None, head=None, contrastive_loss=None, sample_size=-1):
if input_fields is None:
input_fields = ["title", "abstract"]
self.name = name
self.dataset = dataset
self.data_files = data_files
self.type = type
self.multi_label = multi_label
self.loss = loss
self.contrastive_loss = contrastive_loss
self.ctrl_token = ctrl_token
self.head = head
self.labels = labels
self.labels_field = labels_field
self.input_fields = input_fields
self.sample_size = sample_size
if not self.dataset and not self.data_files:
raise ValueError("Either dataset or data_files must be provided")
def __str__(self):
obj_dict = self.__dict__.copy()
del_fields = ["head", "loss"]
for field in del_fields:
del obj_dict[field]
return json.dumps(obj_dict)
class TaskHead(nn.Module):
def __init__(self, num_labels, dim=768):
super().__init__()
self.dim = dim
self.num_labels = num_labels
self.dropout = nn.Dropout(0.2)
self.linear = nn.Linear(dim, num_labels)
def forward(self, encoding):
return self.linear(self.dropout(encoding))
class SCLLoss(nn.Module):
def __init__(self, temp=0.3):
super(SCLLoss, self).__init__()
self.temp = temp
def forward(self, encoding, y, num_classes):
norm_encoding = F.normalize(encoding, p=2, dim=1)
dot_prod = torch.matmul(norm_encoding, norm_encoding.T) / self.temp
y_mask = torch.nn.functional.one_hot(y, num_classes=num_classes).T[y]
diag_mask = torch.ones(y_mask.shape, device=torch.device('cuda')).fill_diagonal_(0)
den = torch.exp(dot_prod) * diag_mask
inner = dot_prod - torch.log(torch.sum(den, dim=1) + 1e-10)
con_loss = inner * y_mask * diag_mask
scl_loss = -1.0 * torch.sum(con_loss, dim=1) / (torch.sum(y_mask, dim=1))
return scl_loss
# Triplet will be an object of TaskFamily as no separate head needed
class TripletLoss(nn.Module):
"""
Triplet loss: copied from https://github.com/allenai/specter/blob/673346f9f76bcf422b38e0d1b448ef4414bcd4df/specter/model.py#L159 without any change
"""
def __init__(self, margin=1.0, distance='l2-norm', reduction='mean'):
"""
Args:
margin: margin (float, optional): Default: `1`.
distance: can be `l2-norm` or `cosine`, or `dot`
reduction (string, optional): Specifies the reduction to apply to the output:
'none' | 'mean' | 'sum'. 'none': no reduction will be applied,
'mean': the sum of the output will be divided by the number of
elements in the output, 'sum': the output will be summed. Note: :attr:`size_average`
and :attr:`reduce` are in the process of being deprecated, and in the meantime,
specifying either of those two args will override :attr:`reduction`. Default: 'mean'
"""
super(TripletLoss, self).__init__()
self.margin = margin
self.distance = distance
self.reduction = reduction
def forward(self, query, positive, negative):
if self.distance == 'l2-norm':
distance_positive = F.pairwise_distance(query, positive)
distance_negative = F.pairwise_distance(query, negative)
losses = F.relu(distance_positive - distance_negative + self.margin)
elif self.distance == 'cosine': # independent of length
distance_positive = F.cosine_similarity(query, positive)
distance_negative = F.cosine_similarity(query, negative)
losses = F.relu(-distance_positive + distance_negative + self.margin)
elif self.distance == 'dot': # takes into account the length of vectors
shapes = query.shape
# batch dot product
distance_positive = torch.bmm(
query.view(shapes[0], 1, shapes[1]),
positive.view(shapes[0], shapes[1], 1)
).reshape(shapes[0], )
distance_negative = torch.bmm(
query.view(shapes[0], 1, shapes[1]),
negative.view(shapes[0], shapes[1], 1)
).reshape(shapes[0], )
losses = F.relu(-distance_positive + distance_negative + self.margin)
else:
raise TypeError(f"Unrecognized option for `distance`:{self.distance}")
if self.reduction == 'mean':
return losses.mean()
elif self.reduction == 'sum':
return losses.sum()
elif self.reduction == 'none':
return losses
else:
raise TypeError(f"Unrecognized option for `reduction`:{self.reduction}")
def load_tasks(tasks_config_file: str = "sample_data/tasks_config.json", hidden_size=768) -> Dict[str, TaskFamily]:
def load_labels(labels_file: str) -> Dict[str, str]:
with open(labels_file, "r") as f:
labels = f.readlines()
labels = {l.strip(): i for i, l in enumerate(labels)}
return labels
import json
task_dict = dict()
task_config = json.load(open(tasks_config_file, "r"))
for task in task_config:
if task["type"] == "classification":
task["labels"] = load_labels(task["labels"])
task["head"] = TaskHead(num_labels=len(task["labels"]), dim=hidden_size)
if task.get("multi_label"):
task["loss"] = nn.BCEWithLogitsLoss(reduction="none")
else:
task["loss"] = nn.CrossEntropyLoss(reduction="none")
use_contrastive = task.pop("contrastive", False)
if use_contrastive:
task["contrastive_loss"] = SCLLoss()
elif task["type"] == "regression":
task["head"] = TaskHead(num_labels=1, dim=hidden_size)
task["loss"] = nn.MSELoss(reduction="none")
else:
task["loss"] = TripletLoss(reduction="none")
task_dict[task["name"]] = TaskFamily(**task)
return task_dict
| 6,393 | 41.065789 | 152 | py |
vqg-unknown | vqg-unknown-master/test.py | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from utils.region_extract import *
from utils.utils import *
from src.net import ResNet
from src.net import ImageCaption
import chainer
from chainer import Variable, serializers, cuda
import chainer.functions as F
from nltk.corpus import wordnet as wn
import argparse
import pickle
from skimage import io
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-label', '--LABEL_PATH', default='./data/synset_words_caffe_ILSVRC12.txt')
parser.add_argument('-emb', '--WORD_EMB', default='./data/word_embeddings.pickle')
parser.add_argument('-id', '--WORD_ID', default='./data/word2id.pickle')
parser.add_argument('-g', '--GPU_ID', type=int, default=0)
parser.add_argument('-cnn', '--CNN_PATH', default='./data/resnet_152.caffemodel')
parser.add_argument('-model', '--MODEL_PATH', default='./data/pretrained_model.h5')
parser.add_argument('-image', '--IMAGE_PATH', default='./data/test.jpg')
parser.add_argument('-save', '--SAVE_FIG', default='False')
parser.add_argument('-hyper', '--HYPER_NUM', type=int, default=2)
args = parser.parse_args()
if args.SAVE_FIG == 'False':
save_flag = False
else:
save_flag = True
label_synset_list = open(args.LABEL_PATH).read().split('\n')
del label_synset_list[-1]
label_list = [x.split()[0] for x in label_synset_list]
with open(args.WORD_EMB, 'rb') as f:
target2vec = pickle.load(f)
with open(args.WORD_ID, 'rb') as f:
word2id = pickle.load(f)
id2word = {word2id[x]: x for x in word2id.keys()}
bos = word2id['<s>']
eos = word2id['</s>']
feature_num = 2005
hidden_num = 1024
vocab_num = len(word2id)
attr_num = 5
max_length = 100
gpu_device = args.GPU_ID
cuda.get_device_from_id(gpu_device).use()
cnn_model = ResNet(args.CNN_PATH, 152)
cnn_model.to_gpu(gpu_device)
CaptionNet = ImageCaption(vocab_num, attr_num, feature_num, hidden_num)
serializers.load_hdf5(args.MODEL_PATH, CaptionNet)
CaptionNet.to_gpu(gpu_device)
image = io.imread(args.IMAGE_PATH)
target_region = region_extract(args.IMAGE_PATH, cnn_model)
x, y, w, h = target_region
region_image = image[y: y + h, x: x + w, :]
entire_feature = cuda.to_cpu(extract_feature(image, cnn_model).data)
region_feature = cuda.to_cpu(extract_feature(region_image, cnn_model).data)
location_vector = locate_feature(image, target_region)
concat_feature = np.concatenate([region_feature[0], entire_feature[0], location_vector], axis=0)
prob = F.softmax(region_feature)
label_index = np.argsort(cuda.to_cpu(prob.data)[0])[::-1]
pred_synset_list = [wn._synset_from_pos_and_offset('n', label_convert(label_list[x])) for x in label_index[:4]]
synset_list = pred_synset_list[:args.HYPER_NUM + 1]
common_synset = common_hypernyms(synset_list)
target_synset = common_synset.name()
try:
target_vec = target2vec[target_synset]
except:
target_vec = np.mean(np.asarray(list(target2vec.values())), axis=0)
target_var = Variable(xp.array([target_vec], dtype=xp.float32))
feature_var = Variable(xp.array([concat_feature], dtype=xp.float32))
with chainer.using_config('train', False), chainer.no_backprop_mode():
CaptionNet.image_init(feature_var)
candidates = [(CaptionNet, [bos], 0)]
next_candidates = beam_search(candidates, target_var)
for i in range(max_length):
next_candidates = beam_search(next_candidates, target_var)
if all([x[1][-1] == eos for x in next_candidates]):
break
result = [k[1] for k in next_candidates]
generated_question = []
for token_ids in result:
tokens = [id2word[token_id] for token_id in token_ids[1:-1]]
generated_question.append(' '.join(tokens))
if save_flag:
fig = plt.figure()
fig = fig.add_subplot(111)
rect = plt.Rectangle((x, y), w, h, edgecolor='red', fill=False, linewidth=3)
fig.add_patch(rect)
fig.imshow(image)
plt.show()
plt.savefig('result.png')
return generated_question[0]
if __name__ == '__main__':
question = main()
print(question)
| 4,368 | 33.952 | 115 | py |
vqg-unknown | vqg-unknown-master/src/feature_extract.py | import pickle
from skimage import io
from scipy.misc import imresize
import chainer
from chainer import Variable, cuda
from chainer.links.model.vision import resnet
import numpy as np
import argparse
from net import ResNet
xp = cuda.cupy
def region_resize(image, region):
x, y, w, h = region
aspect_list = np.array([36/1, 18/2, 12/3, 9/4, 6/6])
size_list = np.array([[36, 1], [18, 2], [12, 3], [9, 4], [6, 6]]) * 32
if w > h:
region_aspect = w / h
new_w, new_h = size_list[np.argmin(np.absolute(aspect_list - region_aspect))]
else:
region_aspect = h / w
new_h, new_w = size_list[np.argmin(np.absolute(aspect_list - region_aspect))]
new_image = imresize(image[y: y + h, x: x + w, :], (int(new_h), int(new_w)), interp='bicubic', mode='RGB')
return new_image
def image_resize(image):
h, w, _ = image.shape
aspect_list = np.array([144/1, 72/2, 48/3, 36/4, 24/6, 18/8, 16/9, 12/12])
size_list = np.array([[144, 1], [72, 2], [48, 3], [36, 4], [24, 6], [18, 8], [16, 9], [12, 12]]) * 32
if w > h:
region_aspect = w / h
new_w, new_h = size_list[np.argmin(np.absolute(aspect_list - region_aspect))]
else:
region_aspect = h / w
new_h, new_w = size_list[np.argmin(np.absolute(aspect_list - region_aspect))]
new_image = imresize(image, (int(new_h), int(new_w)), interp='bicubic', mode='RGB')
return new_image
def feature_extract(img, model):
img = resnet.prepare(img, size=None)
img = np.asarray([img], dtype=np.float32)
img = Variable(cuda.to_gpu(img))
with chainer.using_config("train", False):
feature = cuda.to_cpu(model(img).data)
return feature
def main():
parser = argparse.ArgumentParser(description='extract features')
parser.add_argument('--gpu', '-g', default=0, type=int, help='GPU ID')
parser.add_argument('--ALL_DATA_PATH', '-all', default='../data/processed_data.pickle')
parser.add_argument('--MODEL_PATH', '-model', default='../data/resnet_152.caffemodel')
parser.add_argument('--SAVE_PATH', '-save', default='../data/all_feature.pickle')
args = parser.parse_args()
gpu_device = args.gpu
all_data_path = args.ALL_DATA_PATH
model_path = args.MODEL_PATH
save_path = args.SAVE_PATH
with open(all_data_path, 'rb') as f:
data = pickle.load(f)
cuda.get_device_from_id(gpu_device).use()
model = ResNet(model_path, 152)
model.to_gpu(gpu_device)
all_dic = {}
for each_question in data:
try:
image_path = each_question['image_path']
image = io.imread(image_path)
if image.ndim == 2:
image = np.tile(image[:, :, np.newaxis], (1, 1, 3))
qa_id = each_question['qa_id']
crop_region = [each_question['x'], each_question['y'], each_question['w'], each_question['h']]
h, w, _ = image.shape
x_region, y_region, w_region, h_region = crop_region
resize_entire = image_resize(image)
resize_region = region_resize(image, crop_region)
entire_feature = feature_extract(resize_entire, model)
region_feature = feature_extract(resize_region, model)
concat_feature = np.concatenate((region_feature, entire_feature), axis=1)[0]
x_tl, y_tl, x_br, y_br = x_region, y_region, x_region + w_region, y_region + h_region
region_inf = np.asarray([x_tl/w, y_tl/h, x_br/w, y_br/h, (w_region * h_region)/(w * h)])
concat_all = np.concatenate((concat_feature, region_inf), axis=0)
all_dic[qa_id] = concat_all
except:
continue
with open(save_path, 'wb') as f:
pickle.dump(all_dic, f)
if __name__ == '__main__':
main()
| 3,779 | 33.363636 | 110 | py |
pip | pip-main/src/pip/_vendor/rich/_export_format.py | CONSOLE_HTML_FORMAT = """\
<!DOCTYPE html>
<head>
<meta charset="UTF-8">
<style>
{stylesheet}
body {{
color: {foreground};
background-color: {background};
}}
</style>
</head>
<html>
<body>
<pre style="font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><code>{code}</code></pre>
</body>
</html>
"""
CONSOLE_SVG_FORMAT = """\
<svg class="rich-terminal" viewBox="0 0 {width} {height}" xmlns="http://www.w3.org/2000/svg">
<!-- Generated with Rich https://www.textualize.io -->
<style>
@font-face {{
font-family: "Fira Code";
src: local("FiraCode-Regular"),
url("https://cdnjs.cloudflare.com/ajax/libs/firacode/6.2.0/woff2/FiraCode-Regular.woff2") format("woff2"),
url("https://cdnjs.cloudflare.com/ajax/libs/firacode/6.2.0/woff/FiraCode-Regular.woff") format("woff");
font-style: normal;
font-weight: 400;
}}
@font-face {{
font-family: "Fira Code";
src: local("FiraCode-Bold"),
url("https://cdnjs.cloudflare.com/ajax/libs/firacode/6.2.0/woff2/FiraCode-Bold.woff2") format("woff2"),
url("https://cdnjs.cloudflare.com/ajax/libs/firacode/6.2.0/woff/FiraCode-Bold.woff") format("woff");
font-style: bold;
font-weight: 700;
}}
.{unique_id}-matrix {{
font-family: Fira Code, monospace;
font-size: {char_height}px;
line-height: {line_height}px;
font-variant-east-asian: full-width;
}}
.{unique_id}-title {{
font-size: 18px;
font-weight: bold;
font-family: arial;
}}
{styles}
</style>
<defs>
<clipPath id="{unique_id}-clip-terminal">
<rect x="0" y="0" width="{terminal_width}" height="{terminal_height}" />
</clipPath>
{lines}
</defs>
{chrome}
<g transform="translate({terminal_x}, {terminal_y})" clip-path="url(#{unique_id}-clip-terminal)">
{backgrounds}
<g class="{unique_id}-matrix">
{matrix}
</g>
</g>
</svg>
"""
_SVG_FONT_FAMILY = "Rich Fira Code"
_SVG_CLASSES_PREFIX = "rich-svg"
| 2,100 | 26.285714 | 122 | py |
pip | pip-main/tests/unit/test_req_install.py | import os
import tempfile
from pathlib import Path
import pytest
from pip._vendor.packaging.requirements import Requirement
from pip._internal.exceptions import InstallationError
from pip._internal.req.constructors import (
install_req_from_line,
install_req_from_req_string,
)
from pip._internal.req.req_install import InstallRequirement
class TestInstallRequirementBuildDirectory:
# no need to test symlinks on Windows
@pytest.mark.skipif("sys.platform == 'win32'")
def test_tmp_build_directory(self) -> None:
# when req is None, we can produce a temporary directory
# Make sure we're handling it correctly with real path.
requirement = InstallRequirement(None, None)
tmp_dir = tempfile.mkdtemp("-build", "pip-")
tmp_build_dir = requirement.ensure_build_location(
tmp_dir,
autodelete=False,
parallel_builds=False,
)
assert os.path.dirname(tmp_build_dir) == os.path.realpath(
os.path.dirname(tmp_dir)
)
# are we on a system where /tmp is a symlink
if os.path.realpath(tmp_dir) != os.path.abspath(tmp_dir):
assert os.path.dirname(tmp_build_dir) != os.path.dirname(tmp_dir)
else:
assert os.path.dirname(tmp_build_dir) == os.path.dirname(tmp_dir)
os.rmdir(tmp_dir)
assert not os.path.exists(tmp_dir)
def test_forward_slash_results_in_a_link(self, tmpdir: Path) -> None:
install_dir = tmpdir / "foo" / "bar"
# Just create a file for letting the logic work
setup_py_path = install_dir / "setup.py"
os.makedirs(str(install_dir))
with open(setup_py_path, "w") as f:
f.write("")
requirement = install_req_from_line(install_dir.as_posix())
assert requirement.link is not None
class TestInstallRequirementFrom:
def test_install_req_from_string_invalid_requirement(self) -> None:
"""
Requirement strings that cannot be parsed by
packaging.requirements.Requirement raise an InstallationError.
"""
with pytest.raises(InstallationError) as excinfo:
install_req_from_req_string("http:/this/is/invalid")
assert str(excinfo.value) == ("Invalid requirement: 'http:/this/is/invalid'")
def test_install_req_from_string_without_comes_from(self) -> None:
"""
Test to make sure that install_req_from_string succeeds
when called with URL (PEP 508) but without comes_from.
"""
# Test with a PEP 508 url install string:
wheel_url = (
"https://download.pytorch.org/whl/cu90/"
"torch-1.0.0-cp36-cp36m-win_amd64.whl"
)
install_str = "torch@ " + wheel_url
install_req = install_req_from_req_string(install_str)
assert isinstance(install_req, InstallRequirement)
assert install_req.link is not None
assert install_req.link.url == wheel_url
assert install_req.req is not None
assert install_req.req.url == wheel_url
assert install_req.comes_from is None
assert install_req.is_wheel
def test_install_req_from_string_with_comes_from_without_link(self) -> None:
"""
Test to make sure that install_req_from_string succeeds
when called with URL (PEP 508) and comes_from
does not have a link.
"""
# Test with a PEP 508 url install string:
wheel_url = (
"https://download.pytorch.org/whl/cu90/"
"torch-1.0.0-cp36-cp36m-win_amd64.whl"
)
install_str = "torch@ " + wheel_url
# Dummy numpy "comes_from" requirement without link:
comes_from = InstallRequirement(Requirement("numpy>=1.15.0"), comes_from=None)
# Attempt install from install string comes:
install_req = install_req_from_req_string(install_str, comes_from=comes_from)
assert isinstance(install_req, InstallRequirement)
assert isinstance(install_req.comes_from, InstallRequirement)
assert install_req.comes_from.link is None
assert install_req.link is not None
assert install_req.link.url == wheel_url
assert install_req.req is not None
assert install_req.req.url == wheel_url
assert install_req.is_wheel
| 4,326 | 37.292035 | 86 | py |
pyrebox | pyrebox-master/volatility/volatility/plugins/registry/shimcache.py | # Volatility
# Copyright (C) 2008-2013 Volatility Foundation
# Copyright (C) 2011 Jamie Levy (Gleeda) <jamie@memoryanalysis.net>
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: Jamie Levy (gleeda)
@license: GNU General Public License 2.0
@contact: jamie@memoryanalysis.net
@organization: Volatility Foundation
"""
import volatility.plugins.registry.registryapi as registryapi
from volatility.renderers import TreeGrid
import volatility.debug as debug
import volatility.utils as utils
import volatility.obj as obj
import volatility.plugins.common as common
import volatility.addrspace as addrspace
# Structures taken from the ShimCache Whitepaper: https://blog.mandiant.com/archives/2459
#### SHIMRECS ####
shimrecs_type_xp = {
'ShimRecords' : [ None, {
'Magic' : [ 0x0, ['unsigned int']], #0xDEADBEEF
'NumRecords' : [ 0x8, ['short']],
'Entries' : [0x190, ['array', lambda x: x.NumRecords, ['AppCompatCacheEntry']]],
} ],
}
shimrecs_type_2003vista = {
'ShimRecords' : [ None, {
'Magic' : [ 0x0, ['unsigned int']], #0xBADC0FFE
'NumRecords' : [ 0x4, ['int']],
'Entries' : [0x8, ['array', lambda x: x.NumRecords, ['AppCompatCacheEntry']]],
} ],
}
shimrecs_type_win7 = {
'ShimRecords' : [ None, {
'Magic' : [ 0x0, ['unsigned int']], #0xBADC0FEE
'NumRecords' : [ 0x4, ['int']],
'Entries' : [0x80, ['array', lambda x: x.NumRecords, ['AppCompatCacheEntry']]],
} ],
}
#### APPCOMPAT TYPES ####
appcompat_type_xp_x86 = {
'AppCompatCacheEntry' : [ 0x228, {
'Path' : [ 0x0, ['NullString', dict(length = 0x208, encoding = 'utf8')]],
'LastModified' : [ 0x210, ['WinTimeStamp', dict(is_utc = True)]],
'FileSize': [0x218, ['long long']],
'LastUpdate' : [ 0x220, ['WinTimeStamp', dict(is_utc = True)]],
} ],
}
appcompat_type_2003_x86 = {
'AppCompatCacheEntry' : [ 0x18, {
'Length' : [ 0x0, ['unsigned short']],
'MaximumLength' : [0x2, ['unsigned short']],
'PathOffset' : [ 0x4, ['unsigned int']],
'LastModified' : [ 0x8, ['WinTimeStamp', dict(is_utc = True)]],
'FileSize': [0x10, ['_LARGE_INTEGER']],
} ],
}
appcompat_type_vista_x86 = {
'AppCompatCacheEntry' : [ 0x18, {
'Length' : [ 0x0, ['unsigned short']],
'MaximumLength' : [0x2, ['unsigned short']],
'PathOffset' : [ 0x4, ['unsigned int']],
'LastModified' : [ 0x8, ['WinTimeStamp', dict(is_utc = True)]],
'InsertFlags' : [0x10, ['unsigned int']],
'Flags' : [0x14, ['unsigned int']],
} ],
}
appcompat_type_win7_x86 = {
'AppCompatCacheEntry' : [ 0x20, {
'Length' : [ 0x0, ['unsigned short']],
'MaximumLength' : [0x2, ['unsigned short']],
'PathOffset' : [ 0x4, ['unsigned int']],
'LastModified' : [ 0x8, ['WinTimeStamp', dict(is_utc = True)]],
'InsertFlags' : [0x10, ['unsigned int']],
'ShimFlags' : [0x14, ['unsigned int']],
'BlobSize' : [0x18, ['unsigned int']],
'BlobOffset' : [0x1c, ['unsigned int']],
} ],
}
appcompat_type_2003_x64 = {
'AppCompatCacheEntry' : [ 0x20, {
'Length' : [ 0x0, ['unsigned short']],
'MaximumLength' : [0x2, ['unsigned short']],
'PathOffset' : [ 0x8, ['unsigned long long']],
'LastModified' : [ 0x10, ['WinTimeStamp', dict(is_utc = True)]],
'FileSize': [0x18, ['_LARGE_INTEGER']],
} ],
}
appcompat_type_vista_x64 = {
'AppCompatCacheEntry' : [ 0x20, {
'Length' : [ 0x0, ['unsigned short']],
'MaximumLength' : [0x2, ['unsigned short']],
'PathOffset' : [ 0x8, ['unsigned int']],
'LastModified' : [ 0x10, ['WinTimeStamp', dict(is_utc = True)]],
'InsertFlags' : [0x18, ['unsigned int']],
'Flags' : [0x1c, ['unsigned int']],
} ],
}
appcompat_type_win7_x64 = {
'AppCompatCacheEntry' : [ 0x30, {
'Length' : [ 0x0, ['unsigned short']],
'MaximumLength' : [0x2, ['unsigned short']],
'PathOffset' : [ 0x8, ['unsigned long long']],
'LastModified' : [ 0x10, ['WinTimeStamp', dict(is_utc = True)]],
'InsertFlags' : [0x18, ['unsigned int']],
'ShimFlags' : [0x1c, ['unsigned int']],
'BlobSize' : [0x20, ['unsigned long long']],
'BlobOffset' : [0x28, ['unsigned long long']],
} ],
}
class ShimCacheTypesXPx86(obj.ProfileModification):
before = ['WindowsObjectClasses']
conditions = {'os': lambda x: x == 'windows',
'major': lambda x: x == 5,
'minor': lambda x: x == 1,
'memory_model': lambda x: x == '32bit'}
def modification(self, profile):
profile.vtypes.update(shimrecs_type_xp)
profile.vtypes.update(appcompat_type_xp_x86)
class ShimCacheTypes2003x86(obj.ProfileModification):
before = ['WindowsObjectClasses']
conditions = {'os': lambda x: x == 'windows',
'major': lambda x: x == 5,
'minor': lambda x: x == 2,
'memory_model': lambda x: x == '32bit'}
def modification(self, profile):
profile.vtypes.update(shimrecs_type_2003vista)
profile.vtypes.update(appcompat_type_2003_x86)
class ShimCacheTypesVistax86(obj.ProfileModification):
before = ['WindowsObjectClasses']
conditions = {'os': lambda x: x == 'windows',
'major': lambda x: x == 6,
'minor': lambda x: x == 0,
'memory_model': lambda x: x == '32bit'}
def modification(self, profile):
profile.vtypes.update(shimrecs_type_2003vista)
profile.vtypes.update(appcompat_type_vista_x86)
class ShimCacheTypesWin7x86(obj.ProfileModification):
before = ['WindowsObjectClasses']
conditions = {'os': lambda x: x == 'windows',
'major': lambda x: x == 6,
'minor': lambda x: x == 1,
'memory_model': lambda x: x == '32bit'}
def modification(self, profile):
profile.vtypes.update(shimrecs_type_win7)
profile.vtypes.update(appcompat_type_win7_x86)
class ShimCacheTypes2003x64(obj.ProfileModification):
before = ['WindowsObjectClasses']
conditions = {'os': lambda x: x == 'windows',
'major': lambda x: x == 5,
'minor': lambda x: x == 2,
'memory_model': lambda x: x == '64bit'}
def modification(self, profile):
profile.vtypes.update(shimrecs_type_2003vista)
profile.vtypes.update(appcompat_type_2003_x64)
class ShimCacheTypesVistax64(obj.ProfileModification):
before = ['WindowsObjectClasses']
conditions = {'os': lambda x: x == 'windows',
'major': lambda x: x == 6,
'minor': lambda x: x == 0,
'memory_model': lambda x: x == '64bit'}
def modification(self, profile):
profile.vtypes.update(shimrecs_type_2003vista)
profile.vtypes.update(appcompat_type_vista_x64)
class ShimCacheTypesWin7x64(obj.ProfileModification):
before = ['WindowsObjectClasses']
conditions = {'os': lambda x: x == 'windows',
'major': lambda x: x == 6,
'minor': lambda x: x == 1,
'memory_model': lambda x: x == '64bit'}
def modification(self, profile):
profile.vtypes.update(shimrecs_type_win7)
profile.vtypes.update(appcompat_type_win7_x64)
class ShimCache(common.AbstractWindowsCommand):
"""Parses the Application Compatibility Shim Cache registry key"""
def __init__(self, config, *args, **kwargs):
self._addrspace = None
common.AbstractWindowsCommand.__init__(self, config, *args, **kwargs)
@staticmethod
def is_valid_profile(profile):
return profile.metadata.get('os', 'unknown').lower() == 'windows'
@staticmethod
def remove_unprintable(item):
return ''.join([str(c) for c in item if (ord(c) > 31 or ord(c) == 9) and ord(c) <= 126])
@staticmethod
def get_entries(addr_space, regapi):
regapi.reset_current()
currentcs = regapi.reg_get_currentcontrolset()
if currentcs == None:
currentcs = "ControlSet001"
version = (addr_space.profile.metadata.get('major', 0),
addr_space.profile.metadata.get('minor', 0))
xp = False
if version <= (5, 1):
key = currentcs + "\\Control\\Session Manager\\AppCompatibility"
xp = True
else:
key = currentcs + "\\Control\\Session Manager\\AppCompatCache"
data_raw = regapi.reg_get_value('system', key, "AppCompatCache")
if data_raw == None or len(data_raw) < 0x1c:
debug.warning("No ShimCache data found")
raise StopIteration
bufferas = addrspace.BufferAddressSpace(addr_space.get_config(), data = data_raw)
shimdata = obj.Object("ShimRecords", offset = 0, vm = bufferas)
if shimdata == None:
debug.warning("No ShimCache data found")
raise StopIteration
if shimdata.Magic not in [0xDEADBEEF, 0xBADC0FFE, 0xBADC0FEE]:
debug.warning("ShimRecords.Magic value {0:X} is not valid".format(shimdata.Magic))
raise StopIteration
for e in shimdata.Entries:
if xp:
yield e.Path, e.LastModified, e.LastUpdate
else:
yield ShimCache.remove_unprintable(bufferas.read(int(e.PathOffset), int(e.Length))), e.LastModified, None
def calculate(self):
addr_space = utils.load_as(self._config)
regapi = registryapi.RegistryApi(self._config)
for entry in self.get_entries(addr_space, regapi):
yield entry
def unified_output(self, data):
# blank header in case there is no shimcache data
return TreeGrid([("Last Modified", str),
("Last Update", str),
("Path", str),
], self.generator(data))
def generator(self, data):
for path, lm, lu in data:
if lu:
yield (0, [str(lm), str(lu), str(path).strip()])
else:
yield (0, [str(lm), "-", str(path).strip()])
def render_text(self, outfd, data):
first = True
for path, lm, lu in data:
if lu:
if first:
self.table_header(outfd, [("Last Modified", "30"),
("Last Update", "30"),
("Path", ""),
])
first = False
outfd.write("{0:30} {1:30} {2}\n".format(lm, lu, path))
else:
if first:
self.table_header(outfd, [("Last Modified", "30"),
("Path", ""),
])
first = False
outfd.write("{0:30} {1}\n".format(lm, path))
| 11,646 | 36.938111 | 121 | py |
pyrebox | pyrebox-master/volatility/volatility/plugins/malware/svcscan.py | # Volatility
# Copyright (C) 2007-2013 Volatility Foundation
# Copyright (c) 2010, 2011, 2012 Michael Ligh <michael.ligh@mnin.org>
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
import volatility.utils as utils
import volatility.obj as obj
import volatility.plugins.common as common
import volatility.win32.tasks as tasks
import volatility.debug as debug
import volatility.plugins.registry.registryapi as registryapi
from volatility.renderers import TreeGrid
from volatility.renderers.basic import Address
#--------------------------------------------------------------------------------
# vtypes
#--------------------------------------------------------------------------------
SERVICE_TYPE_FLAGS = {
'SERVICE_KERNEL_DRIVER': 0,
'SERVICE_FILE_SYSTEM_DRIVER': 1,
'SERVICE_WIN32_OWN_PROCESS': 4,
'SERVICE_WIN32_SHARE_PROCESS': 5,
'SERVICE_INTERACTIVE_PROCESS': 8}
SERVICE_STATE_ENUM = {
1: 'SERVICE_STOPPED',
2: 'SERVICE_START_PENDING',
3: 'SERVICE_STOP_PENDING',
4: 'SERVICE_RUNNING',
5: 'SERVICE_CONTINUE_PENDING',
6: 'SERVICE_PAUSE_PENDING',
7: 'SERVICE_PAUSED'}
SERVICE_START_ENUM = {
0: 'SERVICE_BOOT_START',
1: 'SERVICE_SYSTEM_START',
2: 'SERVICE_AUTO_START',
3: 'SERVICE_DEMAND_START',
4: 'SERVICE_DISABLED'}
svcscan_base_x86 = {
'_SERVICE_HEADER': [ None, {
'Tag': [ 0x0, ['array', 4, ['unsigned char']]],
'ServiceRecord': [ 0xC, ['pointer', ['_SERVICE_RECORD']]],
} ],
'_SERVICE_LIST_ENTRY' : [ 0x8, {
'Blink' : [ 0x0, ['pointer', ['_SERVICE_RECORD']]],
'Flink' : [ 0x4, ['pointer', ['_SERVICE_RECORD']]],
} ],
'_SERVICE_RECORD' : [ None, {
'ServiceList' : [ 0x0, ['_SERVICE_LIST_ENTRY']],
'ServiceName' : [ 0x8, ['pointer', ['String', dict(encoding = 'utf16', length = 512)]]],
'DisplayName' : [ 0xc, ['pointer', ['String', dict(encoding = 'utf16', length = 512)]]],
'Order' : [ 0x10, ['unsigned int']],
'Tag' : [ 0x18, ['array', 4, ['unsigned char']]],
'DriverName' : [ 0x24, ['pointer', ['String', dict(encoding = 'utf16', length = 256)]]],
'ServiceProcess' : [ 0x24, ['pointer', ['_SERVICE_PROCESS']]],
'Type' : [ 0x28, ['Flags', {'bitmap': SERVICE_TYPE_FLAGS}]],
'State' : [ 0x2c, ['Enumeration', dict(target = 'long', choices = SERVICE_STATE_ENUM)]],
'Start' : [ 0x44, ['Enumeration', dict(target = 'long', choices = SERVICE_START_ENUM)]],
} ],
'_SERVICE_PROCESS' : [ None, {
'BinaryPath' : [ 0x8, ['pointer', ['String', dict(encoding = 'utf16', length = 256)]]],
'ProcessId' : [ 0xc, ['unsigned int']],
} ],
}
svcscan_base_x64 = {
'_SERVICE_HEADER': [ None, {
'Tag': [ 0x0, ['array', 4, ['unsigned char']]],
'ServiceRecord': [ 0x10, ['pointer', ['_SERVICE_RECORD']]],
} ],
'_SERVICE_LIST_ENTRY' : [ 0x8, {
'Blink' : [ 0x0, ['pointer', ['_SERVICE_RECORD']]],
'Flink' : [ 0x10, ['pointer', ['_SERVICE_RECORD']]],
} ],
'_SERVICE_RECORD' : [ None, {
'ServiceList' : [ 0x0, ['_SERVICE_LIST_ENTRY']],
'ServiceName' : [ 0x8, ['pointer', ['String', dict(encoding = 'utf16', length = 512)]]],
'DisplayName' : [ 0x10, ['pointer', ['String', dict(encoding = 'utf16', length = 512)]]],
'Order' : [ 0x18, ['unsigned int']],
'Tag' : [ 0x20, ['array', 4, ['unsigned char']]],
'DriverName' : [ 0x30, ['pointer', ['String', dict(encoding = 'utf16', length = 256)]]],
'ServiceProcess' : [ 0x30, ['pointer', ['_SERVICE_PROCESS']]],
'Type' : [ 0x38, ['Flags', {'bitmap': SERVICE_TYPE_FLAGS}]],
'State' : [ 0x3C, ['Enumeration', dict(target = 'long', choices = SERVICE_STATE_ENUM)]],
'Start' : [ 0x54, ['Enumeration', dict(target = 'long', choices = SERVICE_START_ENUM)]],
} ],
'_SERVICE_PROCESS': [ None, {
'BinaryPath': [ 0x10, ['pointer', ['String', dict(encoding = 'utf16', length = 256)]]],
'ProcessId': [ 0x18, ['unsigned int']],
} ],
}
#--------------------------------------------------------------------------------
# object Classes
#--------------------------------------------------------------------------------
class _SERVICE_RECORD_LEGACY(obj.CType):
"Service records for XP/2003 x86 and x64"
@property
def Binary(self):
"Return the binary path for a service"
# No path in memory for services that aren't running
# (if needed, query the registry key)
if str(self.State) != 'SERVICE_RUNNING':
return obj.NoneObject("No path, service isn't running")
# Depending on whether the service is for a process
# or kernel driver, the binary path is stored differently
if 'PROCESS' in str(self.Type):
return self.ServiceProcess.BinaryPath.dereference()
else:
return self.DriverName.dereference()
@property
def Pid(self):
"Return the process ID for a service"
if str(self.State) == 'SERVICE_RUNNING':
if 'PROCESS' in str(self.Type):
return self.ServiceProcess.ProcessId
return obj.NoneObject("Cannot get process ID")
def is_valid(self):
"Check some fields for validity"
type_flags_max = sum([(1 << v) for v in SERVICE_TYPE_FLAGS.values()])
return obj.CType.is_valid(self) and self.Order > 0 and \
self.Order < 0xFFFF and \
self.State.v() in SERVICE_STATE_ENUM and \
self.Start.v() in SERVICE_START_ENUM and \
self.Type.v() < type_flags_max
def traverse(self):
rec = self # Include this object in the list
while rec and rec.is_valid():
yield rec
rec = rec.ServiceList.Blink.dereference()
class _SERVICE_RECORD_RECENT(_SERVICE_RECORD_LEGACY):
"Service records for 2008, Vista, 7 x86 and x64"
def traverse(self):
"""Generator that walks the singly-linked list"""
if self.is_valid():
yield self # Include this object in the list
# Make sure we dereference these pointers, or the
# is_valid() checks will apply to the pointer and
# not the _SERVICE_RECORD object as intended.
rec = self.PrevEntry.dereference()
while rec and rec.is_valid():
yield rec
rec = rec.PrevEntry.dereference()
class _SERVICE_HEADER(obj.CType):
"Service headers for 2008, Vista, 7 x86 and x64"
def is_valid(self):
"Check some fields for validity"
return (obj.CType.is_valid(self) and
self.ServiceRecord.is_valid() and
self.ServiceRecord.Order < 0xFFFF)
#--------------------------------------------------------------------------------
# profile modifications
#--------------------------------------------------------------------------------
class ServiceBase(obj.ProfileModification):
"""The base applies to XP and 2003 SP0-SP1"""
before = ['WindowsOverlay', 'WindowsObjectClasses']
conditions = {'os': lambda x: x == 'windows'}
def modification(self, profile):
profile.object_classes.update({
'_SERVICE_RECORD': _SERVICE_RECORD_LEGACY,
'_SERVICE_HEADER': _SERVICE_HEADER,
})
profile.merge_overlay({'VOLATILITY_MAGIC': [ None, {
'ServiceTag': [ 0x0, ['VolatilityMagic', dict(value = "sErv")]]
}]})
profile.vtypes.update(svcscan_base_x86)
class ServiceBasex64(obj.ProfileModification):
"""This overrides the base x86 vtypes with x64 vtypes"""
before = ['WindowsOverlay', 'WindowsObjectClasses', 'ServiceBase']
conditions = {'os': lambda x: x == 'windows',
'memory_model': lambda x: x == '64bit'}
def modification(self, profile):
profile.vtypes.update(svcscan_base_x64)
class ServiceVista(obj.ProfileModification):
"""Override the base with OC's for Vista, 2008, and 7"""
before = ['WindowsOverlay', 'WindowsObjectClasses', 'ServiceBase']
conditions = {'os': lambda x: x == 'windows',
'major': lambda x: x >= 6}
def modification(self, profile):
profile.object_classes.update({
'_SERVICE_RECORD': _SERVICE_RECORD_RECENT,
})
profile.merge_overlay({'VOLATILITY_MAGIC': [ None, {
'ServiceTag': [ 0x0, ['VolatilityMagic', dict(value = "serH")]]
}]})
class ServiceVistax86(obj.ProfileModification):
"""Override the base with vtypes for x86 Vista, 2008, and 7"""
before = ['WindowsOverlay', 'WindowsObjectClasses', 'ServiceBase']
conditions = {'os': lambda x: x == 'windows',
'major': lambda x: x == 6,
'minor': lambda x: x < 2,
'memory_model': lambda x: x == '32bit'}
def modification(self, profile):
profile.merge_overlay({'_SERVICE_RECORD': [ None, {
'PrevEntry': [ 0x0, ['pointer', ['_SERVICE_RECORD']]],
'ServiceName': [ 0x4, ['pointer', ['String', dict(encoding = 'utf16', length = 512)]]],
'DisplayName': [ 0x8, ['pointer', ['String', dict(encoding = 'utf16', length = 512)]]],
'Order': [ 0xC, ['unsigned int']],
'ServiceProcess': [ 0x1C, ['pointer', ['_SERVICE_PROCESS']]],
'DriverName': [ 0x1C, ['pointer', ['String', dict(encoding = 'utf16', length = 256)]]],
'Type' : [ 0x20, ['Flags', {'bitmap': SERVICE_TYPE_FLAGS}]],
'State': [ 0x24, ['Enumeration', dict(target = 'long', choices = SERVICE_STATE_ENUM)]],
'Start' : [ 0x3C, ['Enumeration', dict(target = 'long', choices = SERVICE_START_ENUM)]],
'ServiceList' : [ 0x5C, ['_SERVICE_LIST_ENTRY']],
}]})
class ServiceVistax64(obj.ProfileModification):
"""Override the base with vtypes for x64 Vista, 2008, and 7"""
before = ['WindowsOverlay', 'WindowsObjectClasses', 'ServiceBase']
conditions = {'os': lambda x: x == 'windows',
'major': lambda x: x == 6,
'minor': lambda x: x < 2,
'memory_model': lambda x: x == '64bit'}
def modification(self, profile):
profile.merge_overlay({'_SERVICE_RECORD': [ None, {
'PrevEntry': [ 0x0, ['pointer', ['_SERVICE_RECORD']]],
'ServiceName': [ 0x8, ['pointer', ['String', dict(encoding = 'utf16', length = 512)]]],
'DisplayName': [ 0x10, ['pointer', ['String', dict(encoding = 'utf16', length = 512)]]],
'Order': [ 0x18, ['unsigned int']],
'ServiceProcess': [ 0x28, ['pointer', ['_SERVICE_PROCESS']]],
'DriverName': [ 0x28, ['pointer', ['String', dict(encoding = 'utf16', length = 256)]]],
'Type' : [ 0x30, ['Flags', {'bitmap': SERVICE_TYPE_FLAGS}]],
'State': [ 0x34, ['Enumeration', dict(target = 'long', choices = SERVICE_STATE_ENUM)]],
'Start' : [ 0x4C, ['Enumeration', dict(target = 'long', choices = SERVICE_START_ENUM)]],
'ServiceList' : [ 0x78, ['_SERVICE_LIST_ENTRY']],
}]})
class Service8x64(obj.ProfileModification):
"""Service structures for Win8/8.1 and Server2012/R2 64-bit"""
before = ['WindowsOverlay', 'WindowsObjectClasses', 'ServiceBase', 'ServiceVista']
conditions = {'os': lambda x: x == 'windows',
'major': lambda x: x == 6,
'minor': lambda x: x >= 2,
'memory_model': lambda x: x == '64bit'}
def modification(self, profile):
profile.merge_overlay({
'_SERVICE_RECORD' : [ None, {
'Tag' : [ 0x0, ['String', dict(length = 4)]],
'PrevEntry': [ 0x8, ['pointer', ['_SERVICE_RECORD']]],
'ServiceName' : [ 0x10, ['pointer', ['String', dict(encoding = 'utf16', length = 512)]]],
'DisplayName' : [ 0x18, ['pointer', ['String', dict(encoding = 'utf16', length = 512)]]],
'Order' : [ 0x20, ['unsigned int']],
'DriverName' : [ 0x38, ['pointer', ['String', dict(encoding = 'utf16', length = 256)]]],
'ServiceProcess' : [ 0x38, ['pointer', ['_SERVICE_PROCESS']]],
'Type' : [ 0x40, ['Flags', {'bitmap': SERVICE_TYPE_FLAGS}]],
'State' : [ 0x44, ['Enumeration', dict(target = 'long', choices = SERVICE_STATE_ENUM)]],
'Start' : [ 0x5C, ['Enumeration', dict(target = 'long', choices = SERVICE_START_ENUM)]],
} ],
'_SERVICE_PROCESS': [ None, {
'BinaryPath': [ 0x18, ['pointer', ['String', dict(encoding = 'utf16', length = 256)]]],
'ProcessId': [ 0x20, ['unsigned int']],
} ],
})
class Service10_15063x64(obj.ProfileModification):
"""Service structures for Win10 15063 (Creators)"""
before = ['WindowsOverlay', 'WindowsObjectClasses', 'ServiceBase', 'ServiceVista', 'Service8x64', 'ServiceVistax64']
conditions = {'os': lambda x: x == 'windows',
'major': lambda x: x == 6,
'minor': lambda x: x == 4,
'build': lambda x: x >= 15063,
'memory_model': lambda x: x == '64bit'}
def modification(self, profile):
profile.merge_overlay({
'_SERVICE_RECORD' : [ None, {
'PrevEntry': [ 0x10, ['pointer', ['_SERVICE_RECORD']]],
'ServiceName' : [ 0x38, ['pointer', ['String', dict(encoding = 'utf16', length = 512)]]],
'DisplayName' : [ 0x40, ['pointer', ['String', dict(encoding = 'utf16', length = 512)]]],
'Order' : [ 0x20, ['unsigned int']],
'DriverName' : [ 0xe8, ['pointer', ['String', dict(encoding = 'utf16', length = 256)]]],
'ServiceProcess' : [ 0xe8, ['pointer', ['_SERVICE_PROCESS']]],
'Type' : [ 0x48, ['Flags', {'bitmap': SERVICE_TYPE_FLAGS}]],
'State' : [ 0x4C, ['Enumeration', dict(target = 'long', choices = SERVICE_STATE_ENUM)]],
'Start' : [ 0x24, ['Enumeration', dict(target = 'long', choices = SERVICE_START_ENUM)]],
} ],
})
class Service10_16299x64(obj.ProfileModification):
"""Service structures for Win10 16299 (Fall Creators)"""
before = ['WindowsOverlay', 'WindowsObjectClasses', 'ServiceBase', 'ServiceVista', 'Service8x64']
conditions = {'os': lambda x: x == 'windows',
'major': lambda x: x == 6,
'minor': lambda x: x == 4,
'build': lambda x: x >= 16299,
'memory_model': lambda x: x == '64bit'}
def modification(self, profile):
profile.merge_overlay({
'_SERVICE_PROCESS': [ None, {
'BinaryPath': [ 0x18, ['pointer', ['String', dict(encoding = 'utf16', length = 256)]]],
'ProcessId': [ 0x28, ['unsigned int']],
}]})
class Service8x86(obj.ProfileModification):
"""Service structures for Win8/8.1 32-bit"""
before = ['WindowsOverlay', 'WindowsObjectClasses', 'ServiceBase', 'ServiceVista']
conditions = {'os': lambda x: x == 'windows',
'major': lambda x: x == 6,
'minor': lambda x: x >= 2,
'memory_model': lambda x: x == '32bit'}
def modification(self, profile):
profile.vtypes.update({
'_SERVICE_RECORD' : [ None, {
'Tag' : [ 0x0, ['String', dict(length = 4)]],
'PrevEntry': [ 0x4, ['pointer', ['_SERVICE_RECORD']]],
'ServiceName' : [ 0x8, ['pointer', ['String', dict(encoding = 'utf16', length = 512)]]],
'DisplayName' : [ 0xc, ['pointer', ['String', dict(encoding = 'utf16', length = 512)]]],
'Order' : [ 0x10, ['unsigned int']],
'DriverName' : [ 0x24, ['pointer', ['String', dict(encoding = 'utf16', length = 256)]]],
'ServiceProcess' : [ 0x24, ['pointer', ['_SERVICE_PROCESS']]],
'Type' : [ 0x28, ['Flags', {'bitmap': SERVICE_TYPE_FLAGS}]],
'State' : [ 0x2c, ['Enumeration', dict(target = 'long', choices = SERVICE_STATE_ENUM)]],
'Start' : [ 0x44, ['Enumeration', dict(target = 'long', choices = SERVICE_START_ENUM)]],
} ],
'_SERVICE_PROCESS': [ None, {
'BinaryPath': [ 0xc, ['pointer', ['String', dict(encoding = 'utf16', length = 256)]]],
'ProcessId': [ 0x10, ['unsigned int']],
} ],
})
class Service10_15063x86(obj.ProfileModification):
"""Service structures for Win10 15063 (Creators)"""
before = ['WindowsOverlay', 'WindowsObjectClasses', 'ServiceBase', 'ServiceVista', 'Service8x86']
conditions = {'os': lambda x: x == 'windows',
'major': lambda x: x == 6,
'minor': lambda x: x == 4,
'build': lambda x: x >= 15063,
'memory_model': lambda x: x == '32bit'}
def modification(self, profile):
profile.vtypes.update({
'_SERVICE_RECORD' : [ None, {
'PrevEntry': [ 0xC, ['pointer', ['_SERVICE_RECORD']]],
'ServiceName' : [ 0x2C, ['pointer', ['String', dict(encoding = 'utf16', length = 512)]]],
'DisplayName' : [ 0x30, ['pointer', ['String', dict(encoding = 'utf16', length = 512)]]],
'Order' : [ 0x14, ['unsigned int']],
'DriverName' : [ 0x9C, ['pointer', ['String', dict(encoding = 'utf16', length = 256)]]],
'ServiceProcess' : [ 0x9C, ['pointer', ['_SERVICE_PROCESS']]],
## needs updating
'Type' : [ 0x34, ['Flags', {'bitmap': SERVICE_TYPE_FLAGS}]],
'State' : [ 0x38, ['Enumeration', dict(target = 'long', choices = SERVICE_STATE_ENUM)]],
'Start' : [ 0x18, ['Enumeration', dict(target = 'long', choices = SERVICE_START_ENUM)]],
} ],
})
class Service10_16299x86(obj.ProfileModification):
"""Service structures for Win10 16299 (Fall Creators)"""
before = ['WindowsOverlay', 'WindowsObjectClasses', 'ServiceBase', 'ServiceVista', 'Service8x86']
conditions = {'os': lambda x: x == 'windows',
'major': lambda x: x == 6,
'minor': lambda x: x == 4,
'build': lambda x: x >= 16299,
'memory_model': lambda x: x == '32bit'}
def modification(self, profile):
profile.merge_overlay({
'_SERVICE_PROCESS': [ None, {
'BinaryPath': [ 0xc, ['pointer', ['String', dict(encoding = 'utf16', length = 256)]]],
'ProcessId': [ 0x14, ['unsigned int']],
}]})
#--------------------------------------------------------------------------------
# svcscan plugin
#--------------------------------------------------------------------------------
class SvcScan(common.AbstractWindowsCommand):
"Scan for Windows services"
def calculate(self):
addr_space = utils.load_as(self._config)
# Get the version we're analyzing
version = (addr_space.profile.metadata.get('major', 0),
addr_space.profile.metadata.get('minor', 0))
tag = obj.VolMagic(addr_space).ServiceTag.v()
# On systems more recent than XP/2003, the serH marker doesn't
# find *all* services, but the ones it does find have linked
# lists to the others. We use this variable to track which
# ones we've seen so as to not yield duplicates.
records = []
for task in tasks.pslist(addr_space):
# We only want the Service Control Manager process
if str(task.ImageFileName).lower() != "services.exe":
continue
# Process AS must be valid
process_space = task.get_process_address_space()
if process_space == None:
continue
# Find all instances of the record tag
for address in task.search_process_memory([tag], vad_filter = lambda x: x.Length < 0x40000000):
if version <= (5, 2):
# Windows XP/2003
rec = obj.Object("_SERVICE_RECORD", offset = address -
addr_space.profile.get_obj_offset('_SERVICE_RECORD', 'Tag'),
vm = process_space
)
# Apply our sanity checks
if rec.is_valid():
yield rec
else:
# Windows Vista, 2008, and 7
svc_hdr = obj.Object('_SERVICE_HEADER', offset = address,
vm = process_space)
# Apply our sanity checks
if svc_hdr.is_valid():
# Since we walk the s-list backwards, if we've seen
# an object, then we've also seen all objects that
# exist before it, thus we can break at that time.
for rec in svc_hdr.ServiceRecord.traverse():
if rec in records:
break
records.append(rec)
yield rec
def render_dot(self, outfd, data):
"""Generate a dot graph of service relationships.
This currently only works for XP/2003 profiles,
because the linked list was removed after that.
"""
## Collect all the service records from calculate()
all_services = [d for d in data]
## Abort if we're not using the supported profiles
if all_services[0].obj_vm.profile.metadata.get('major', 0) != 5:
debug.error("This profile does not support --output=dot format")
objects = set()
links = set()
for svc in all_services:
label = "{{ {0:#x} \\n {1} \\n {2} \\n F:{3:#x} B:{4:#x} }}".format(
svc.obj_offset,
svc.ServiceName.dereference(),
str(svc.State),
svc.ServiceList.Flink.v(),
svc.ServiceList.Blink.v())
objects.add('"{0:#x}" [label="{1}" shape="record"];\n'.format(
svc.obj_offset, label))
## Check the linked list pointers
flink = svc.ServiceList.Flink.dereference()
blink = svc.ServiceList.Blink.dereference()
if flink.is_valid():
links.add('"{0:#x}" -> "{1:#x}" [];\n'.format(
svc.obj_offset, flink.obj_offset))
if blink.is_valid():
links.add('"{0:#x}" -> "{1:#x}" [];\n'.format(
svc.obj_offset, blink.obj_offset))
## Now write the graph nodes
outfd.write("digraph svctree { \ngraph [rankdir = \"TB\"];\n")
for item in objects:
outfd.write(item)
for link in links:
outfd.write(link)
outfd.write("}\n")
@staticmethod
def get_service_info(regapi):
ccs = regapi.reg_get_currentcontrolset()
key_name = "{0}\\services".format(ccs)
info = {}
for subkey in regapi.reg_get_all_subkeys(hive_name = "system", key = key_name):
path_value = ""
dll_value = ""
failure_value = ""
image_path = regapi.reg_get_value(hive_name = "system", key = "", value = "ImagePath", given_root = subkey)
if image_path:
# this could be REG_SZ or REG_MULTI_SZ
if isinstance(image_path, list):
image_path = image_path[0]
path_value = utils.remove_unprintable(image_path)
failure_path = regapi.reg_get_value(hive_name = "system", key = "", value = "FailureCommand", given_root = subkey)
if failure_path:
failure_value = utils.remove_unprintable(failure_path)
for rootkey in regapi.reg_get_all_subkeys(hive_name = "system", key = "", given_root = subkey):
if rootkey.Name == "Parameters":
service_dll = regapi.reg_get_value(hive_name = "system", key = "", value = "ServiceDll", given_root = rootkey)
if service_dll != None:
dll_value = utils.remove_unprintable(service_dll)
break
last_write = int(subkey.LastWriteTime)
info[utils.remove_unprintable(str(subkey.Name))] = (dll_value, path_value, failure_value, last_write)
return info
def unified_output(self, data):
if self._config.VERBOSE:
return TreeGrid([("Offset", Address),
("Order", int),
("Start", str),
("PID", int),
("ServiceName", str),
("DisplayName", str),
("ServiceType", str),
("State", str),
("BinaryPath", str),
("ServiceDll", str),
("ImagePath", str),
("FailureCommand", str)],
self.generator(data))
return TreeGrid([("Offset", Address),
("Order", int),
("Start", str),
("PID", int),
("ServiceName", str),
("DisplayName", str),
("ServiceType", str),
("State", str),
("BinaryPath", str)],
self.generator(data))
def generator(self, data):
if self._config.VERBOSE:
regapi = registryapi.RegistryApi(self._config)
info = self.get_service_info(regapi)
for rec in data:
if self._config.VERBOSE:
vals = info.get("{0}".format(rec.ServiceName.dereference()), None)
yield (0, [Address(rec.obj_offset),
int(rec.Order),
str(rec.Start),
int(rec.Pid),
str(rec.ServiceName.dereference() or ""),
str(rec.DisplayName.dereference() or ""),
str(rec.Type),
str(rec.State),
str(rec.Binary or ""),
str(vals[0] if vals else ""),
str(vals[1] if vals else ""),
str(vals[2] if vals else "")])
else:
yield (0, [Address(rec.obj_offset),
int(rec.Order),
str(rec.Start),
int(rec.Pid),
str(rec.ServiceName.dereference() or ""),
str(rec.DisplayName.dereference() or ""),
str(rec.Type),
str(rec.State),
str(rec.Binary or "")])
def render_text(self, outfd, data):
if self._config.VERBOSE:
regapi = registryapi.RegistryApi(self._config)
info = self.get_service_info(regapi)
for rec in data:
# This can't possibly look neat in a table with columns...
outfd.write("Offset: {0:#x}\n".format(rec.obj_offset))
outfd.write("Order: {0}\n".format(rec.Order))
outfd.write("Start: {0}\n".format(rec.Start))
outfd.write("Process ID: {0}\n".format(rec.Pid))
outfd.write("Service Name: {0}\n".format(rec.ServiceName.dereference()))
outfd.write("Display Name: {0}\n".format(rec.DisplayName.dereference()))
outfd.write("Service Type: {0}\n".format(rec.Type))
outfd.write("Service State: {0}\n".format(rec.State))
outfd.write("Binary Path: {0}\n".format(rec.Binary))
if self._config.VERBOSE:
vals = info.get("{0}".format(rec.ServiceName.dereference()), None)
if vals:
outfd.write("ServiceDll: {0}\n".format(vals[0]))
outfd.write("ImagePath: {0}\n".format(vals[1]))
outfd.write("FailureCommand: {0}\n".format(vals[2]))
outfd.write("\n")
| 28,929 | 44.344828 | 130 | py |
pyrebox | pyrebox-master/volatility/volatility/plugins/overlays/windows/vista.py | # Volatility
# Copyright (c) 2008-2013 Volatility Foundation
# Copyright (c) 2008 Brendan Dolan-Gavitt <bdolangavitt@wesleyan.edu>
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: The Volatility Foundation
@license: GNU General Public License 2.0
@contact: awalters@4tphi.net
This file provides support for Windows Vista.
"""
#pylint: disable-msg=C0111
import windows
import volatility.debug as debug #pylint: disable-msg=W0611
import volatility.obj as obj
class _ETHREAD(windows._ETHREAD):
"""A class for Windows 7 ETHREAD objects"""
def owning_process(self):
"""Return the EPROCESS that owns this thread"""
return self.Tcb.Process.dereference_as("_EPROCESS")
class _POOL_HEADER(windows._POOL_HEADER):
"""A class for pool headers"""
@property
def NonPagedPool(self):
return self.PoolType.v() % 2 == 0 and self.PoolType.v() > 0
@property
def PagedPool(self):
return self.PoolType.v() % 2 == 1
class _TOKEN(windows._TOKEN):
def privileges(self):
"""Generator for privileges.
@yields a tuple (value, present, enabled, default).
"""
for i in range(0, 64):
bit_position = 1 << i
present = self.Privileges.Present & bit_position != 0
enabled = self.Privileges.Enabled & bit_position != 0
default = self.Privileges.EnabledByDefault & bit_position != 0
yield i, present, enabled, default
class VistaWin7KPCR(obj.ProfileModification):
before = ['WindowsOverlay']
conditions = {'os' : lambda x: x == 'windows',
'major': lambda x: x == 6}
def modification(self, profile):
overlay = {'VOLATILITY_MAGIC': [ None, {
'KPCR' : [ None, ['VolatilityKPCR', dict(configname = "KPCR")]],
}]}
profile.merge_overlay(overlay)
class Vistax86DTB(obj.ProfileModification):
before = ['WindowsOverlay']
conditions = {'os': lambda x: x == 'windows',
'major': lambda x: x == 6,
'minor': lambda x: x == 0,
'memory_model': lambda x: x == '32bit',
}
def modification(self, profile):
overlay = {'VOLATILITY_MAGIC': [ None, {
'DTBSignature' : [ None, ['VolatilityMagic', dict(value = "\x03\x00\x20\x00")]],
}]}
profile.merge_overlay(overlay)
class Vistax64DTB(obj.ProfileModification):
before = ['WindowsOverlay', 'Windows64Overlay']
conditions = {'os': lambda x: x == 'windows',
'major': lambda x: x == 6,
'minor': lambda x: x == 0,
'memory_model': lambda x: x == '64bit',
}
def modification(self, profile):
overlay = {'VOLATILITY_MAGIC': [ None, {
'DTBSignature' : [ None, ['VolatilityMagic', dict(value = "\x03\x00\x30\x00")]],
}]}
profile.merge_overlay(overlay)
class VistaObjectClasses(obj.ProfileModification):
before = ['WindowsOverlay', 'WindowsObjectClasses']
conditions = {'os': lambda x: x == 'windows',
'major': lambda x: x >= 6,
}
def modification(self, profile):
profile.object_classes.update({'_ETHREAD' : _ETHREAD,
'_POOL_HEADER': _POOL_HEADER,
'_TOKEN': _TOKEN,
'wchar': windows._UNICODE_STRING})
class VistaKDBG(windows.AbstractKDBGMod):
before = ['WindowsOverlay']
conditions = {'os': lambda x : x == 'windows',
'major': lambda x: x == 6,
'minor': lambda x: x == 0}
kdbgsize = 0x328
class VistaSP1KDBG(windows.AbstractKDBGMod):
before = ['WindowsOverlay', 'VistaKDBG']
conditions = {'os': lambda x: x == 'windows',
'major': lambda x: x == 6,
'minor': lambda x: x == 0,
'build': lambda x: x >= 6001,
}
kdbgsize = 0x330
class VistaPolicyKey(obj.ProfileModification):
before = ['WindowsOverlay']
conditions = {'os': lambda x : x == 'windows',
'major': lambda x: x == 6}
def modification(self, profile):
overlay = {'VOLATILITY_MAGIC': [ None, {
'PolicyKey': [0x0, ['VolatilityMagic', dict(value = "PolEKList")]],
}]}
profile.merge_overlay(overlay)
class VistaSP0x86Hiber(obj.ProfileModification):
before = ['WindowsOverlay']
conditions = {'os': lambda x: x == 'windows',
'memory_model': lambda x: x == '32bit',
'major': lambda x: x == 6,
'minor': lambda x: x == 0,
'build': lambda x: x == 6000}
def modification(self, profile):
overlay = {'VOLATILITY_MAGIC': [ None, {
'HibrProcPage' : [ None, ['VolatilityMagic', dict(value = 0x4)]],
'HibrEntryCount' : [ None, ['VolatilityMagic', dict(value = 0xff)]],
}]}
profile.merge_overlay(overlay)
class VistaSP1x86Hiber(obj.ProfileModification):
before = ['WindowsOverlay']
conditions = {'os': lambda x: x == 'windows',
'memory_model': lambda x: x == '32bit',
'major': lambda x: x == 6,
'minor': lambda x: x == 0,
'build': lambda x: x == 6001}
def modification(self, profile):
overlay = {'VOLATILITY_MAGIC': [ None, {
'HibrProcPage' : [ None, ['VolatilityMagic', dict(value = 0x1)]],
'HibrEntryCount' : [ None, ['VolatilityMagic', dict(value = 0xff)]],
}]}
profile.merge_overlay(overlay)
class VistaSP2x86Hiber(obj.ProfileModification):
before = ['WindowsOverlay']
conditions = {'os': lambda x: x == 'windows',
'memory_model': lambda x: x == '32bit',
'major': lambda x: x == 6,
'minor': lambda x: x == 0,
'build': lambda x: x == 6002}
def modification(self, profile):
overlay = {'VOLATILITY_MAGIC': [ None, {
'HibrProcPage' : [ None, ['VolatilityMagic', dict(value = 0x1)]],
'HibrEntryCount' : [ None, ['VolatilityMagic', dict(value = 0x1fe)]],
}]}
profile.merge_overlay(overlay)
class VistaSP0x64Hiber(obj.ProfileModification):
before = ['WindowsOverlay']
conditions = {'os': lambda x: x == 'windows',
'memory_model': lambda x: x == '64bit',
'major': lambda x: x == 6,
'minor': lambda x: x == 0,
'build': lambda x: x == 6000}
def modification(self, profile):
overlay = {'VOLATILITY_MAGIC': [ None, {
'HibrProcPage' : [ None, ['VolatilityMagic', dict(value = 0x4)]],
'HibrEntryCount' : [ None, ['VolatilityMagic', dict(value = 0x7f)]],
}]}
profile.merge_overlay(overlay)
class VistaSP1x64Hiber(obj.ProfileModification):
before = ['WindowsOverlay']
conditions = {'os': lambda x: x == 'windows',
'memory_model': lambda x: x == '64bit',
'major': lambda x: x == 6,
'minor': lambda x: x == 0,
'build': lambda x: x == 6001}
def modification(self, profile):
overlay = {'VOLATILITY_MAGIC': [ None, {
'HibrProcPage' : [ None, ['VolatilityMagic', dict(value = 0x1)]],
'HibrEntryCount' : [ None, ['VolatilityMagic', dict(value = 0x7f)]],
}]}
profile.merge_overlay(overlay)
class VistaSP2x64Hiber(obj.ProfileModification):
before = ['WindowsOverlay']
conditions = {'os': lambda x: x == 'windows',
'memory_model': lambda x: x == '64bit',
'major': lambda x: x == 6,
'minor': lambda x: x == 0,
'build': lambda x: x == 6002}
def modification(self, profile):
overlay = {'VOLATILITY_MAGIC': [ None, {
'HibrProcPage' : [ None, ['VolatilityMagic', dict(value = 0x1)]],
'HibrEntryCount' : [ None, ['VolatilityMagic', dict(value = 0xfe)]],
}]}
profile.merge_overlay(overlay)
class VistaSP0x86(obj.Profile):
""" A Profile for Windows Vista SP0 x86 """
_md_major = 6
_md_minor = 0
_md_build = 6000
_md_memory_model = '32bit'
_md_os = 'windows'
_md_vtype_module = 'volatility.plugins.overlays.windows.vista_sp0_x86_vtypes'
_md_product = ["NtProductWinNt"]
class VistaSP0x64(obj.Profile):
""" A Profile for Windows Vista SP0 x64 """
_md_major = 6
_md_minor = 0
_md_build = 6000
_md_memory_model = '64bit'
_md_os = 'windows'
_md_vtype_module = 'volatility.plugins.overlays.windows.vista_sp0_x64_vtypes'
_md_product = ["NtProductWinNt"]
class VistaSP1x86(obj.Profile):
""" A Profile for Windows Vista SP1 x86 """
_md_major = 6
_md_minor = 0
_md_build = 6001
_md_memory_model = '32bit'
_md_os = 'windows'
_md_vtype_module = 'volatility.plugins.overlays.windows.vista_sp1_x86_vtypes'
_md_product = ["NtProductWinNt"]
class VistaSP1x64(obj.Profile):
""" A Profile for Windows Vista SP1 x64 """
_md_major = 6
_md_minor = 0
_md_build = 6001
_md_memory_model = '64bit'
_md_os = 'windows'
_md_vtype_module = 'volatility.plugins.overlays.windows.vista_sp1_x64_vtypes'
_md_product = ["NtProductWinNt"]
class VistaSP2x86(obj.Profile):
""" A Profile for Windows Vista SP2 x86 """
_md_major = 6
_md_minor = 0
_md_build = 6002
_md_memory_model = '32bit'
_md_os = 'windows'
_md_vtype_module = 'volatility.plugins.overlays.windows.vista_sp2_x86_vtypes'
_md_product = ["NtProductWinNt"]
class VistaSP2x64(obj.Profile):
""" A Profile for Windows Vista SP2 x64 """
_md_major = 6
_md_minor = 0
_md_build = 6002
_md_memory_model = '64bit'
_md_os = 'windows'
_md_vtype_module = 'volatility.plugins.overlays.windows.vista_sp2_x64_vtypes'
_md_product = ["NtProductWinNt"]
class Win2008SP1x64(VistaSP1x64):
""" A Profile for Windows 2008 SP1 x64 """
_md_product = ["NtProductLanManNt", "NtProductServer"]
class Win2008SP2x64(VistaSP2x64):
""" A Profile for Windows 2008 SP2 x64 """
_md_product = ["NtProductLanManNt", "NtProductServer"]
class Win2008SP1x86(VistaSP1x86):
""" A Profile for Windows 2008 SP1 x86 """
_md_product = ["NtProductLanManNt", "NtProductServer"]
class Win2008SP2x86(VistaSP2x86):
""" A Profile for Windows 2008 SP2 x86 """
_md_product = ["NtProductLanManNt", "NtProductServer"]
| 11,726 | 37.198697 | 100 | py |
pytorch_convNd | pytorch_convNd-master/main.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from convNd import convNd
import time
import matplotlib.pyplot as plt
import sys
torch.backends.cudnn.deterministic = True
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f'Python version: {sys.version}')
print(f'Pytorch version: {torch.__version__}')
# Define 3D tensor to test on
inChans = 8
outChans = 16
x = torch.rand(1, inChans, 21, 21, 21).to(device)
ks = 5
padding = 2
output_padding = 2
# Only zeros allowed by pytorch
padding_mode = 'zeros'
stride = 4
weight = 1
bias = 1
groups = 1
# ConvNd where d = 3
conv = convNd(inChans, outChans, 3, ks, stride, padding, use_bias=True,
is_transposed=False, padding_mode=padding_mode, groups=groups,
kernel_initializer=lambda x: torch.nn.init.constant_(x, weight),
bias_initializer=lambda x: torch.nn.init.constant_(x, bias)).to(device)
# Transposed convolution
convT = convNd(inChans, outChans, 3, ks, stride, padding,
is_transposed=True, groups=groups, output_padding=output_padding,
kernel_initializer=lambda x: torch.nn.init.constant_(x, weight),
bias_initializer=lambda x: torch.nn.init.constant_(x, bias)).to(device)
# Create 3D gt convolutions
convGT = nn.Conv3d(inChans, outChans, ks, stride, padding=padding, bias=True,
padding_mode=padding_mode, groups=groups,)
torch.nn.init.constant_(convGT.weight, weight)
torch.nn.init.constant_(convGT.bias, bias).to(device)
# Transposed
convGTT = nn.ConvTranspose3d(inChans, outChans, ks, stride,
padding=padding, output_padding=output_padding,
padding_mode=padding_mode, groups=groups,)
torch.nn.init.constant_(convGTT.weight, weight)
torch.nn.init.constant_(convGTT.bias, bias).to(device)
convGT = convGT.to(device)
convGTT = convGTT.to(device)
# Run timers to compare with torch implementations
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
print(' ')
# Convolve with ConvNd
torch.cuda.synchronize()
start.record()
out = conv(x)
end.record()
torch.cuda.synchronize()
print("ConvNd time: " + str(start.elapsed_time(end)))
start.record()
outGT = convGT(x)
end.record()
torch.cuda.synchronize()
print("ConvGT time: " + str(start.elapsed_time(end)))
diff = abs(out-outGT)
print("convND error: " + str((torch.sum(diff)/outGT.sum()).item()) + " %",end='\n\n')
# Convolve with ConvTransposeNd
torch.cuda.synchronize()
start.record()
outT = convT(x)
end.record()
torch.cuda.synchronize()
print("ConvTransposeNd time: " + str(start.elapsed_time(end)))
start.record()
outGTT = convGTT(x)
end.record()
torch.cuda.synchronize()
print("ConvTransposeGT time: " + str(start.elapsed_time(end)))
diff = abs(outT-outGTT)
print("convTransposeND error: " + str((torch.sum(diff)/outGTT.sum()).item()) + " %")
plt.figure(1)
plt.subplot(221)
plt.imshow(x[0,0,:,:,:].sum(2).cpu().data.detach())
plt.title('input')
plt.subplot(222)
plt.imshow(outT[0,:,:,:,:].sum(2).sum(0).cpu().data.detach())
plt.title('transposed convND out')
plt.subplot(223)
plt.imshow(outGTT[0,:,:,:,:].sum(2).sum(0).cpu().data.detach())
plt.title('transposed GT out')
plt.subplot(224)
plt.imshow(diff[0,:,:,:,:].sum(2).sum(0).cpu().data.detach())
plt.title('diff out')
plt.show()
| 3,193 | 27.265487 | 85 | py |
pytorch_convNd | pytorch_convNd-master/convNd.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Tuple, Callable
import math
class convNd(nn.Module):
"""Some Information about convNd"""
def __init__(self,in_channels: int,
out_channels: int,
num_dims: int,
kernel_size: Tuple,
stride,
padding,
is_transposed = False,
padding_mode = 'zeros',
output_padding = 0,
dilation: int = 1,
groups: int = 1,
rank: int = 0,
use_bias: bool = True,
bias_initializer: Callable = None,
kernel_initializer: Callable = None):
super(convNd, self).__init__()
# ---------------------------------------------------------------------
# Assertions for constructor arguments
# ---------------------------------------------------------------------
if not isinstance(kernel_size, Tuple):
kernel_size = tuple(kernel_size for _ in range(num_dims))
if not isinstance(stride, Tuple):
stride = tuple(stride for _ in range(num_dims))
if not isinstance(padding, Tuple):
padding = tuple(padding for _ in range(num_dims))
if not isinstance(output_padding, Tuple):
output_padding = tuple(output_padding for _ in range(num_dims))
if not isinstance(dilation, Tuple):
dilation = tuple(dilation for _ in range(num_dims))
# This parameter defines which Pytorch convolution to use as a base, for 3 Conv2D is used
if rank==0 and num_dims<=3:
max_dims = num_dims-1
else:
max_dims = 3
if is_transposed:
self.conv_f = (nn.ConvTranspose1d, nn.ConvTranspose2d, nn.ConvTranspose3d)[max_dims - 1]
else:
self.conv_f = (nn.Conv1d, nn.Conv2d, nn.Conv3d)[max_dims - 1]
assert len(kernel_size) == num_dims, \
'nD kernel size expected!'
assert len(stride) == num_dims, \
'nD stride size expected!'
assert len(padding) == num_dims, \
'nD padding size expected!'
assert len(output_padding) == num_dims, \
'nD output_padding size expected!'
assert sum(dilation) == num_dims, \
'Dilation rate other than 1 not yet implemented!'
# ---------------------------------------------------------------------
# Store constructor arguments
# ---------------------------------------------------------------------
self.rank = rank
self.is_transposed = is_transposed
self.in_channels = in_channels
self.out_channels = out_channels
self.num_dims = num_dims
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.padding_mode = padding_mode
self.output_padding = output_padding
self.dilation = dilation
self.groups = groups
self.use_bias = use_bias
if use_bias:
self.bias = nn.Parameter(torch.zeros(out_channels))
else:
self.register_parameter('bias', None)
self.bias_initializer = bias_initializer
self.kernel_initializer = kernel_initializer
# Custom initializer, to correctly compute the fan in/out
if rank==0 and self.kernel_initializer is None:
k = 1/torch.sqrt(self.in_channels * torch.prod(torch.tensor(self.kernel_size)))
self.kernel_initializer = lambda x: torch.nn.init.uniform_(x, -k, k)
# ---------------------------------------------------------------------
# Construct 3D convolutional layers
# ---------------------------------------------------------------------
if self.bias_initializer is not None:
if self.use_bias:
self.bias_initializer(self.bias)
# Use a ModuleList to store layers to make the Conv4d layer trainable
self.conv_layers = torch.nn.ModuleList()
# Compute the next dimension, so for a conv4D, get index 3
next_dim_len = self.kernel_size[0]
for _ in range(next_dim_len):
if self.num_dims-1 > max_dims:
# Initialize a Conv_n-1_D layer
conv_layer = convNd(in_channels=self.in_channels,
out_channels=self.out_channels,
use_bias=self.use_bias,
num_dims=self.num_dims-1,
rank=self.rank-1,
is_transposed=self.is_transposed,
kernel_size=self.kernel_size[1:],
stride=self.stride[1:],
groups=self.groups,
dilation=self.dilation[1:],
padding=self.padding[1:],
padding_mode=self.padding_mode,
output_padding=self.output_padding[1:],
kernel_initializer=self.kernel_initializer,
bias_initializer=self.bias_initializer)
else:
# Initialize a Conv layer
# bias should only be applied by the top most layer, so we disable bias in the internal convs
conv_layer = self.conv_f(in_channels=self.in_channels,
out_channels=self.out_channels,
bias=False,
kernel_size=self.kernel_size[1:],
dilation=self.dilation[1:],
stride=self.stride[1:],
padding=self.padding[1:],
padding_mode=self.padding_mode,
groups=self.groups)
if self.is_transposed:
conv_layer.output_padding = self.output_padding[1:]
# Apply initializer functions to weight and bias tensor
if self.kernel_initializer is not None:
self.kernel_initializer(conv_layer.weight)
# Store the layer
self.conv_layers.append(conv_layer)
# -------------------------------------------------------------------------
def forward(self, input):
# Pad the input if is not transposed convolution
if not self.is_transposed:
padding = list(self.padding)
# Pad input if this is the parent convolution ie rank=0
if self.rank==0:
inputShape = list(input.shape)
inputShape[2] += 2*self.padding[0]
padSize = (0,0,self.padding[0],self.padding[0])
padding[0] = 0
if self.padding_mode == 'zeros':
input = F.pad(input.view(input.shape[0],input.shape[1],input.shape[2],-1),padSize,'constant',0).view(inputShape)
else:
input = F.pad(input.view(input.shape[0],input.shape[1],input.shape[2],-1),padSize,self.padding_mode).view(inputShape)
# Define shortcut names for dimensions of input and kernel
(b, c_i) = tuple(input.shape[0:2])
size_i = tuple(input.shape[2:])
size_k = self.kernel_size
if not self.is_transposed:
# Compute the size of the output tensor based on the zero padding
size_o = tuple([math.floor((size_i[x] + 2 * padding[x] - size_k[x]) / self.stride[x] + 1) for x in range(len(size_i))])
# Compute size of the output without stride
size_ons = tuple([size_i[x] - size_k[x] + 1 for x in range(len(size_i))])
else:
# Compute the size of the output tensor based on the zero padding
size_o = tuple([(size_i[x] - 1) * self.stride[x] - 2 * self.padding[x] + (size_k[x]-1) + 1 + self.output_padding[x] for x in range(len(size_i))])
# Output tensors for each 3D frame
frame_results = size_o[0] * [torch.zeros((b,self.out_channels) + size_o[1:], device=input.device)]
empty_frames = size_o[0] * [None]
# Convolve each kernel frame i with each input frame j
for i in range(size_k[0]):
# iterate inputs first dimmension
for j in range(size_i[0]):
# Add results to this output frame
if self.is_transposed:
out_frame = i + j*self.stride[0] - self.padding[0]
else:
out_frame = j - (i - size_k[0] // 2) - (size_i[0] - size_ons[0]) // 2 - (1-size_k[0]%2)
k_center_position = out_frame % self.stride[0]
out_frame = math.floor(out_frame / self.stride[0])
if k_center_position != 0:
continue
if out_frame < 0 or out_frame >= size_o[0]:
continue
# Prepate input for next dimmension
conv_input = input.view(b, c_i, size_i[0], -1)
conv_input = conv_input[:, :, j, :].view((b, c_i) + size_i[1:])
# Convolve
frame_conv = \
self.conv_layers[i](conv_input)
if empty_frames[out_frame] is None:
frame_results[out_frame] = frame_conv
empty_frames[out_frame] = 1
else:
frame_results[out_frame] += frame_conv
result = torch.stack(frame_results, dim=2)
if self.use_bias:
resultShape = result.shape
result = result.view(b,resultShape[1],-1)
for k in range(self.out_channels):
result[:,k,:] += self.bias[k]
return result.view(resultShape)
else:
return result
| 10,228 | 45.076577 | 157 | py |
pytorch_convNd | pytorch_convNd-master/mainNd.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from convNd import convNd
import time
import matplotlib.pyplot as plt
torch.backends.cudnn.deterministic = True
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Define 4D tensor to test on
inChans = 1
outChans = 1
x = torch.ones(1, inChans, 5, 5, 5, 5, 5).to(device)
ks = 3
padding = 0
# Only zeros allowed by pytorch
padding_mode = 'zeros'
stride = 2
weight = 1
bias = 0
groups = 1
# ConvNd where d = 5
conv = convNd(inChans, outChans, 5, ks, stride, padding, use_bias=True,
padding_mode=padding_mode, groups=groups,
kernel_initializer=lambda x: torch.nn.init.constant_(x, weight),
bias_initializer=lambda x: torch.nn.init.constant_(x, bias)).to(device)
# Transposed convolution
convT = convNd(inChans, outChans, 5, ks, stride, padding,
groups=groups, is_transposed=True,
kernel_initializer=lambda x: torch.nn.init.constant_(x, weight),
bias_initializer=lambda x: torch.nn.init.constant_(x, bias)).to(device)
# Run timers to compare with torch implementations
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
print(' ')
# Convolve with ConvNd
torch.cuda.synchronize()
start.record()
out = conv(x)
end.record()
torch.cuda.synchronize()
print("ConvNd time: " + str(start.elapsed_time(end)))
print(out.shape)
# Convolve with ConvTransposeNd
torch.cuda.synchronize()
start.record()
outT = convT(x)
end.record()
torch.cuda.synchronize()
print("ConvTransposeNd time: " + str(start.elapsed_time(end)))
print(outT.shape)
| 1,553 | 26.263158 | 72 | py |
flexible-video-diffusion-modeling | flexible-video-diffusion-modeling-main/setup.py | from setuptools import setup
setup(
name="improved-diffusion",
py_modules=["improved_diffusion"],
install_requires=["blobfile>=1.0.5", "torch", "tqdm"],
)
| 168 | 20.125 | 58 | py |
flexible-video-diffusion-modeling | flexible-video-diffusion-modeling-main/improved_diffusion/rpe.py | import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torch as th
from .nn import zero_module, normalization, checkpoint
class RPENet(nn.Module):
def __init__(self, channels, num_heads, time_embed_dim):
super().__init__()
self.embed_distances = nn.Linear(3, channels)
self.embed_diffusion_time = nn.Linear(time_embed_dim, channels)
self.silu = nn.SiLU()
self.out = nn.Linear(channels, channels)
self.out.weight.data *= 0.
self.out.bias.data *= 0.
self.channels = channels
self.num_heads = num_heads
def forward(self, temb, relative_distances):
distance_embs = th.stack(
[th.log(1+(relative_distances).clamp(min=0)),
th.log(1+(-relative_distances).clamp(min=0)),
(relative_distances == 0).float()],
dim=-1
) # BxTxTx3
B, T, _ = relative_distances.shape
C = self.channels
emb = self.embed_diffusion_time(temb).view(B, T, 1, C) \
+ self.embed_distances(distance_embs) # B x T x T x C
return self.out(self.silu(emb)).view(*relative_distances.shape, self.num_heads, self.channels//self.num_heads)
class RPE(nn.Module):
# Based on https://github.com/microsoft/Cream/blob/6fb89a2f93d6d97d2c7df51d600fe8be37ff0db4/iRPE/DeiT-with-iRPE/rpe_vision_transformer.py
def __init__(self, channels, num_heads, time_embed_dim, use_rpe_net=False):
""" This module handles the relative positional encoding.
Args:
channels (int): Number of input channels.
num_heads (int): Number of attention heads.
"""
super().__init__()
self.num_heads = num_heads
self.head_dim = channels // self.num_heads
self.use_rpe_net = use_rpe_net
if use_rpe_net:
self.rpe_net = RPENet(channels, num_heads, time_embed_dim)
else:
self.lookup_table_weight = nn.Parameter(
th.zeros(2 * self.beta + 1,
self.num_heads,
self.head_dim))
def get_R(self, pairwise_distances, temb):
if self.use_rpe_net:
return self.rpe_net(temb, pairwise_distances)
else:
return self.lookup_table_weight[pairwise_distances] # BxTxTxHx(C/H)
def forward(self, x, pairwise_distances, temb, mode):
if mode == "qk":
return self.forward_qk(x, pairwise_distances, temb)
elif mode == "v":
return self.forward_v(x, pairwise_distances, temb)
else:
raise ValueError(f"Unexpected RPE attention mode: {mode}")
def forward_qk(self, qk, pairwise_distances, temb):
# qv is either of q or k and has shape BxDxHxTx(C/H)
# Output shape should be # BxDxHxTxT
R = self.get_R(pairwise_distances, temb)
return th.einsum( # See Eq. 16 in https://arxiv.org/pdf/2107.14222.pdf
"bdhtf,btshf->bdhts", qk, R # BxDxHxTxT
)
def forward_v(self, attn, pairwise_distances, temb):
# attn has shape BxDxHxTxT
# Output shape should be # BxDxHxYx(C/H)
R = self.get_R(pairwise_distances, temb)
th.einsum("bdhts,btshf->bdhtf", attn, R)
return th.einsum( # See Eq. 16ish in https://arxiv.org/pdf/2107.14222.pdf
"bdhts,btshf->bdhtf", attn, R # BxDxHxTxT
)
def forward_safe_qk(self, x, pairwise_distances, temb):
R = self.get_R(pairwise_distances, temb)
B, T, _, H, F = R.shape
D = x.shape[1]
res = x.new_zeros(B, D, H, T, T) # attn shape
for b in range(B):
for d in range(D):
for h in range(H):
for i in range(T):
for j in range(T):
res[b, d, h, i, j] = x[b, d, h, i].dot(R[b, i, j, h])
return res
class RPEAttention(nn.Module):
# Based on https://github.com/microsoft/Cream/blob/6fb89a2f93d6d97d2c7df51d600fe8be37ff0db4/iRPE/DeiT-with-iRPE/rpe_vision_transformer.py#L42
def __init__(self, channels, num_heads, use_checkpoint=False,
time_embed_dim=None, use_rpe_net=None,
use_rpe_q=True, use_rpe_k=True, use_rpe_v=True,
):
super().__init__()
self.num_heads = num_heads
head_dim = channels // num_heads
self.scale = head_dim ** -0.5
self.use_checkpoint = use_checkpoint
self.qkv = nn.Linear(channels, channels * 3)
self.proj_out = zero_module(nn.Linear(channels, channels))
self.norm = normalization(channels)
if use_rpe_q or use_rpe_k or use_rpe_v:
assert use_rpe_net is not None
def make_rpe_func():
return RPE(
channels=channels, num_heads=num_heads,
time_embed_dim=time_embed_dim, use_rpe_net=use_rpe_net,
)
self.rpe_q = make_rpe_func() if use_rpe_q else None
self.rpe_k = make_rpe_func() if use_rpe_k else None
self.rpe_v = make_rpe_func() if use_rpe_v else None
def forward(self, x, temb, frame_indices, attn_mask=None, attn_weights_list=None):
out, attn = checkpoint(self._forward, (x, temb, frame_indices, attn_mask), self.parameters(), self.use_checkpoint)
if attn_weights_list is not None:
B, D, C, T = x.shape
attn_weights_list.append(attn.detach().view(B*D, -1, T, T).mean(dim=1).abs()) # logging attn weights
return out
def _forward(self, x, temb, frame_indices, attn_mask):
B, D, C, T = x.shape
x = x.reshape(B*D, C, T)
x = self.norm(x)
x = x.view(B, D, C, T)
x = th.einsum("BDCT -> BDTC", x) # just a permutation
qkv = self.qkv(x).reshape(B, D, T, 3, self.num_heads, C // self.num_heads)
qkv = th.einsum("BDTtHF -> tBDHTF", qkv)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
# q, k, v shapes: BxDxHxTx(C/H)
q *= self.scale
attn = (q @ k.transpose(-2, -1)) # BxDxHxTxT
if self.rpe_q is not None or self.rpe_k is not None or self.rpe_v is not None:
pairwise_distances = (frame_indices.unsqueeze(-1) - frame_indices.unsqueeze(-2)) # BxTxT
# relative position on keys
if self.rpe_k is not None:
attn += self.rpe_k(q, pairwise_distances, temb=temb, mode="qk")
# relative position on queries
if self.rpe_q is not None:
attn += self.rpe_q(k * self.scale, pairwise_distances, temb=temb, mode="qk").transpose(-1, -2)
# softmax where all elements with mask==0 can attend to eachother and all with mask==1
# can attend to eachother (but elements with mask==0 can't attend to elements with mask==1)
def softmax(w, attn_mask):
if attn_mask is not None:
allowed_interactions = attn_mask.view(B, 1, T) * attn_mask.view(B, T, 1)
allowed_interactions += (1-attn_mask.view(B, 1, T)) * (1-attn_mask.view(B, T, 1))
inf_mask = (1-allowed_interactions)
inf_mask[inf_mask == 1] = th.inf
w = w - inf_mask.view(B, 1, 1, T, T) # BxDxHxTxT
return th.softmax(w.float(), dim=-1).type(w.dtype)
attn = softmax(attn, attn_mask)
out = attn @ v
# relative position on values
if self.rpe_v is not None:
out += self.rpe_v(attn, pairwise_distances, temb=temb, mode="v")
out = th.einsum("BDHTF -> BDTHF", out).reshape(B, D, T, C)
out = self.proj_out(out)
x = x + out
x = th.einsum("BDTC -> BDCT", x)
return x, attn | 7,700 | 43.258621 | 145 | py |
flexible-video-diffusion-modeling | flexible-video-diffusion-modeling-main/improved_diffusion/frechet_video_distance.py | # Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Minimal Reference implementation for the Frechet Video Distance (FVD).
FVD is a metric for the quality of video generation models. It is inspired by
the FID (Frechet Inception Distance) used for images, but uses a different
embedding to be better suitable for videos.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
import tensorflow.compat.v1 as tf
import tensorflow_hub as hub
import os
import numpy as np
import scipy
os.environ["TFHUB_CACHE_DIR"] = f"{os.environ['PWD']}/.tfhub_cache"
######################################################################
## Feature extraction ##
######################################################################
def preprocess(videos, target_resolution):
"""Runs some preprocessing on the videos for I3D model.
Args:
videos: <T>[batch_size, num_frames, height, width, depth] The videos to be
preprocessed. We don't care about the specific dtype of the videos, it can
be anything that tf.image.resize_bilinear accepts. Values are expected to
be in the range 0-255.
target_resolution: (width, height): target video resolution
Returns:
videos: <float32>[batch_size, num_frames, height, width, depth]
"""
videos_shape = videos.shape.as_list()
all_frames = tf.reshape(videos, [-1] + videos_shape[-3:])
resized_videos = tf.image.resize_bilinear(all_frames, size=target_resolution)
target_shape = [videos_shape[0], -1] + list(target_resolution) + [3]
output_videos = tf.reshape(resized_videos, target_shape)
scaled_videos = 2. * tf.cast(output_videos, tf.float32) / 255. - 1
return scaled_videos
def _is_in_graph(tensor_name):
"""Checks whether a given tensor does exists in the graph."""
try:
tf.get_default_graph().get_tensor_by_name(tensor_name)
except KeyError:
return False
return True
def create_id3_embedding(videos, batch_size=16):
"""Embeds the given videos using the Inflated 3D Convolution network.
Downloads the graph of the I3D from tf.hub and adds it to the graph on the
first call.
Args:
videos: <float32>[batch_size, num_frames, height=224, width=224, depth=3].
Expected range is [-1, 1].
Returns:
embedding: <float32>[batch_size, embedding_size]. embedding_size depends
on the model used.
Raises:
ValueError: when a provided embedding_layer is not supported.
"""
module_spec = "https://tfhub.dev/deepmind/i3d-kinetics-400/1"
# Making sure that we import the graph separately for
# each different input video tensor.
module_name = "fvd_kinetics-400_id3_module_" + six.ensure_str(
videos.name).replace(":", "_")
assert_ops = [
tf.Assert(
tf.reduce_max(videos) <= 1.001,
["max value in frame is > 1", videos]),
tf.Assert(
tf.reduce_min(videos) >= -1.001,
["min value in frame is < -1", videos]),
tf.assert_equal(
tf.shape(videos)[0],
batch_size, ["invalid frame batch size: ",
tf.shape(videos)],
summarize=6),
]
with tf.control_dependencies(assert_ops):
videos = tf.identity(videos)
module_scope = "%s_apply_default/" % module_name
# To check whether the module has already been loaded into the graph, we look
# for a given tensor name. If this tensor name exists, we assume the function
# has been called before and the graph was imported. Otherwise we import it.
# Note: in theory, the tensor could exist, but have wrong shapes.
# This will happen if create_id3_embedding is called with a frames_placehoder
# of wrong size/batch size, because even though that will throw a tf.Assert
# on graph-execution time, it will insert the tensor (with wrong shape) into
# the graph. This is why we need the following assert.
video_batch_size = int(videos.shape[0])
assert video_batch_size in [batch_size, -1, None], "Invalid batch size"
tensor_name = module_scope + "RGB/inception_i3d/Mean:0"
if not _is_in_graph(tensor_name):
i3d_model = hub.Module(hub.resolve(module_spec), name=module_name)
i3d_model(videos)
# gets the kinetics-i3d-400-logits layer
tensor_name = module_scope + "RGB/inception_i3d/Mean:0"
tensor = tf.get_default_graph().get_tensor_by_name(tensor_name)
return tensor
######################################################################
## Frechet distance computation ##
######################################################################
# Adopted from https://github.com/toshas/torch-fidelity/blob/master/torch_fidelity/metric_fid.py
def frechet_statistics_from_features(features):
mu = np.mean(features, axis=0)
sigma = np.cov(features, rowvar=False)
return {
'mu': mu,
'sigma': sigma,
}
def frechet_statistics_to_frechet_metric(stat_1, stat_2):
eps = 1e-6
mu1, sigma1 = stat_1['mu'], stat_1['sigma']
mu2, sigma2 = stat_2['mu'], stat_2['sigma']
assert mu1.shape == mu2.shape and mu1.dtype == mu2.dtype
assert sigma1.shape == sigma2.shape and sigma1.dtype == sigma2.dtype
mu1 = np.atleast_1d(mu1)
mu2 = np.atleast_1d(mu2)
sigma1 = np.atleast_2d(sigma1)
sigma2 = np.atleast_2d(sigma2)
assert mu1.shape == mu2.shape, 'Training and test mean vectors have different lengths'
assert sigma1.shape == sigma2.shape, 'Training and test covariances have different dimensions'
diff = mu1 - mu2
# Product might be almost singular
covmean, _ = scipy.linalg.sqrtm(sigma1.dot(sigma2), disp=False)
if not np.isfinite(covmean).all():
print(
f'WARNING: fid calculation produces singular product; '
f'adding {eps} to diagonal of cov estimates'
)
offset = np.eye(sigma1.shape[0]) * eps
covmean = scipy.linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset), disp=True)
# Numerical error might give slight imaginary component
if np.iscomplexobj(covmean):
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
m = np.max(np.abs(covmean.imag))
assert False, 'Imaginary component {}'.format(m)
covmean = covmean.real
tr_covmean = np.trace(covmean)
out = float(diff.dot(diff) + np.trace(sigma1) + np.trace(sigma2) - 2 * tr_covmean)
return out
def fid_features_to_metric(features_1, features_2):
assert isinstance(features_1, np.ndarray) and features_1.ndim == 2
assert isinstance(features_2, np.ndarray) and features_2.ndim == 2
assert features_1.shape[1] == features_2.shape[1]
stat_1 = frechet_statistics_from_features(features_1)
stat_2 = frechet_statistics_from_features(features_2)
return frechet_statistics_to_frechet_metric(stat_1, stat_2)
######################################################################
## Kernel distance computation ##
######################################################################
# Adopted from https://github.com/toshas/torch-fidelity/blob/master/torch_fidelity/metric_kid.py
KEY_METRIC_KID_MEAN = 'kernel_inception_distance_mean'
KEY_METRIC_KID_STD = 'kernel_inception_distance_std'
def mmd2(K_XX, K_XY, K_YY, unit_diagonal=False, mmd_est='unbiased'):
assert mmd_est in ('biased', 'unbiased', 'u-statistic'), 'Invalid value of mmd_est'
m = K_XX.shape[0]
assert K_XX.shape == (m, m)
assert K_XY.shape == (m, m)
assert K_YY.shape == (m, m)
# Get the various sums of kernels that we'll use
# Kts drop the diagonal, but we don't need to compute them explicitly
if unit_diagonal:
diag_X = diag_Y = 1
sum_diag_X = sum_diag_Y = m
else:
diag_X = np.diagonal(K_XX)
diag_Y = np.diagonal(K_YY)
sum_diag_X = diag_X.sum()
sum_diag_Y = diag_Y.sum()
Kt_XX_sums = K_XX.sum(axis=1) - diag_X
Kt_YY_sums = K_YY.sum(axis=1) - diag_Y
K_XY_sums_0 = K_XY.sum(axis=0)
Kt_XX_sum = Kt_XX_sums.sum()
Kt_YY_sum = Kt_YY_sums.sum()
K_XY_sum = K_XY_sums_0.sum()
if mmd_est == 'biased':
mmd2 = ((Kt_XX_sum + sum_diag_X) / (m * m)
+ (Kt_YY_sum + sum_diag_Y) / (m * m)
- 2 * K_XY_sum / (m * m))
else:
mmd2 = (Kt_XX_sum + Kt_YY_sum) / (m * (m-1))
if mmd_est == 'unbiased':
mmd2 -= 2 * K_XY_sum / (m * m)
else:
mmd2 -= 2 * (K_XY_sum - np.trace(K_XY)) / (m * (m-1))
return mmd2
def polynomial_kernel(X, Y, degree=3, gamma=None, coef0=1):
if gamma is None:
gamma = 1.0 / X.shape[1]
K = (np.matmul(X, Y.T) * gamma + coef0) ** degree
return K
def polynomial_mmd(features_1, features_2, degree, gamma, coef0):
k_11 = polynomial_kernel(features_1, features_1, degree=degree, gamma=gamma, coef0=coef0)
k_22 = polynomial_kernel(features_2, features_2, degree=degree, gamma=gamma, coef0=coef0)
k_12 = polynomial_kernel(features_1, features_2, degree=degree, gamma=gamma, coef0=coef0)
return mmd2(k_11, k_12, k_22)
def kid_features_to_metric(features_1, features_2,
kid_subsets=100, kid_subset_size=1000,
kid_degree=3, kid_gamma=None, kid_coef0=1,
rng_seed=2020, verbose=False):
# Default arguments from https://github.com/toshas/torch-fidelity/blob/a5cba01a8edc2b0f5303570e13c0f48eb6e96819/torch_fidelity/defaults.py
assert isinstance(features_1, np.ndarray) and features_1.ndim == 2
assert isinstance(features_2, np.ndarray) and features_2.ndim == 2
assert features_1.shape[1] == features_2.shape[1]
n_samples_1, n_samples_2 = len(features_1), len(features_2)
assert \
n_samples_1 >= kid_subset_size and n_samples_2 >= kid_subset_size,\
f'KID subset size {kid_subset_size} cannot be smaller than the number of samples (input_1: {n_samples_1}, '\
f'input_2: {n_samples_2}). Consider using "kid_subset_size" kwarg or "--kid-subset-size" command line key to '\
f'proceed.'
mmds = np.zeros(kid_subsets)
rng = np.random.RandomState(rng_seed)
for i in range(kid_subsets):
f1 = features_1[rng.choice(n_samples_1, kid_subset_size, replace=False)]
f2 = features_2[rng.choice(n_samples_2, kid_subset_size, replace=False)]
o = polynomial_mmd(
f1,
f2,
kid_degree,
kid_gamma,
kid_coef0,
)
mmds[i] = o
out = {
KEY_METRIC_KID_MEAN: float(np.mean(mmds)),
KEY_METRIC_KID_STD: float(np.std(mmds)),
}
return out | 11,237 | 36.211921 | 142 | py |
flexible-video-diffusion-modeling | flexible-video-diffusion-modeling-main/improved_diffusion/resample.py | from abc import ABC, abstractmethod
import numpy as np
import torch as th
import torch.distributed as dist
def create_named_schedule_sampler(name, diffusion):
"""
Create a ScheduleSampler from a library of pre-defined samplers.
:param name: the name of the sampler.
:param diffusion: the diffusion object to sample for.
"""
if name == "uniform":
return UniformSampler(diffusion)
elif name == "loss-second-moment":
return LossSecondMomentResampler(diffusion)
else:
raise NotImplementedError(f"unknown schedule sampler: {name}")
class ScheduleSampler(ABC):
"""
A distribution over timesteps in the diffusion process, intended to reduce
variance of the objective.
By default, samplers perform unbiased importance sampling, in which the
objective's mean is unchanged.
However, subclasses may override sample() to change how the resampled
terms are reweighted, allowing for actual changes in the objective.
"""
@abstractmethod
def weights(self):
"""
Get a numpy array of weights, one per diffusion step.
The weights needn't be normalized, but must be positive.
"""
def sample(self, batch_size, device):
"""
Importance-sample timesteps for a batch.
:param batch_size: the number of timesteps.
:param device: the torch device to save to.
:return: a tuple (timesteps, weights):
- timesteps: a tensor of timestep indices.
- weights: a tensor of weights to scale the resulting losses.
"""
w = self.weights()
p = w / np.sum(w)
indices_np = np.random.choice(len(p), size=(batch_size,), p=p)
indices = th.from_numpy(indices_np).long().to(device)
weights_np = 1 / (len(p) * p[indices_np])
weights = th.from_numpy(weights_np).float().to(device)
return indices, weights
class UniformSampler(ScheduleSampler):
def __init__(self, diffusion):
self.diffusion = diffusion
self._weights = np.ones([diffusion.num_timesteps])
def weights(self):
return self._weights
class LossAwareSampler(ScheduleSampler):
def update_with_local_losses(self, local_ts, local_losses):
"""
Update the reweighting using losses from a model.
Call this method from each rank with a batch of timesteps and the
corresponding losses for each of those timesteps.
This method will perform synchronization to make sure all of the ranks
maintain the exact same reweighting.
:param local_ts: an integer Tensor of timesteps.
:param local_losses: a 1D Tensor of losses.
"""
batch_sizes = [
th.tensor([0], dtype=th.int32, device=local_ts.device)
for _ in range(dist.get_world_size())
]
dist.all_gather(
batch_sizes,
th.tensor([len(local_ts)], dtype=th.int32, device=local_ts.device),
)
# Pad all_gather batches to be the maximum batch size.
batch_sizes = [x.item() for x in batch_sizes]
max_bs = max(batch_sizes)
timestep_batches = [th.zeros(max_bs).to(local_ts) for bs in batch_sizes]
loss_batches = [th.zeros(max_bs).to(local_losses) for bs in batch_sizes]
dist.all_gather(timestep_batches, local_ts)
dist.all_gather(loss_batches, local_losses)
timesteps = [
x.item() for y, bs in zip(timestep_batches, batch_sizes) for x in y[:bs]
]
losses = [x.item() for y, bs in zip(loss_batches, batch_sizes) for x in y[:bs]]
self.update_with_all_losses(timesteps, losses)
@abstractmethod
def update_with_all_losses(self, ts, losses):
"""
Update the reweighting using losses from a model.
Sub-classes should override this method to update the reweighting
using losses from the model.
This method directly updates the reweighting without synchronizing
between workers. It is called by update_with_local_losses from all
ranks with identical arguments. Thus, it should have deterministic
behavior to maintain state across workers.
:param ts: a list of int timesteps.
:param losses: a list of float losses, one per timestep.
"""
class LossSecondMomentResampler(LossAwareSampler):
def __init__(self, diffusion, history_per_term=10, uniform_prob=0.001):
self.diffusion = diffusion
self.history_per_term = history_per_term
self.uniform_prob = uniform_prob
self._loss_history = np.zeros(
[diffusion.num_timesteps, history_per_term], dtype=np.float64
)
self._loss_counts = np.zeros([diffusion.num_timesteps], dtype=np.int)
def weights(self):
if not self._warmed_up():
return np.ones([self.diffusion.num_timesteps], dtype=np.float64)
weights = np.sqrt(np.mean(self._loss_history ** 2, axis=-1))
weights /= np.sum(weights)
weights *= 1 - self.uniform_prob
weights += self.uniform_prob / len(weights)
return weights
def update_with_all_losses(self, ts, losses):
for t, loss in zip(ts, losses):
if self._loss_counts[t] == self.history_per_term:
# Shift out the oldest loss term.
self._loss_history[t, :-1] = self._loss_history[t, 1:]
self._loss_history[t, -1] = loss
else:
self._loss_history[t, self._loss_counts[t]] = loss
self._loss_counts[t] += 1
def _warmed_up(self):
return (self._loss_counts == self.history_per_term).all()
| 5,689 | 35.709677 | 87 | py |
flexible-video-diffusion-modeling | flexible-video-diffusion-modeling-main/improved_diffusion/carla_regressor.py | from email.mime import base
import os
from pathlib import Path
import argparse
import numpy as np
import torch as th
from torchvision import transforms
import torchvision
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import copy
import wandb
from improved_diffusion.script_util import str2bool
import multiprocessing as mp
from functools import partial
def video_to_image(fname, video_path, frame_path):
coords_fname = fname.replace('.pt', '.npy').replace('video_', 'coords_')
video = th.load(video_path / fname).numpy()
coords = np.load(video_path / coords_fname)
print("Processing video:", str(video_path / fname))
for frame_idx, (frame, coord) in enumerate(zip(video, coords)):
frame_fname = fname.replace('.pt', f'_frame_{frame_idx}.npy')
coord_fname = coords_fname.replace('.npy', f'_frame_{frame_idx}.npy')
np.save(frame_path / frame_fname, frame)
np.save(frame_path / coord_fname, coord)
def get_cell(coord):
count, _, _ = np.histogram2d([coord[0]], [coord[1]], bins=10, range=[[-10,400], [-10,400]]) # range is specific to Carla Town01
cell = count.flatten().nonzero()[0]
return cell
class CarlaRegressorDataset(th.utils.data.Dataset):
def __init__(self, train, path, transforms=None):
"""
Uses video directory like
```
dataset/
video_0.pt
coords_0.npy
...
```
Videos in this directory are split and saved into a subfolder like:
```
dataset/
individual-frames/
train/
video_0_frame_0.npy
coords_0_frame_0.npy
video_0_frame_1.npy
...
test/
...
```
"""
super().__init__()
self.train = train
self.transforms = transforms
self.video_path = Path(path)
self.path = self.video_path / 'individual-frames' / ("train" if train else "test")
video_split_path = self.video_path / f"video_{'train' if train else 'test'}.csv"
self.video_fnames = [line.rstrip('\n').split('/')[-1] for line in open(video_split_path, 'r').readlines() if '.pt' in line]
self.videos_to_images()
self.paths = list(self.path.glob("video_*.npy"))
def videos_to_images(self):
self.path.mkdir(exist_ok=True, parents=True)
# count number of files matching globstring
videos_done = len(list(self.path.glob("video_*_frame_0.npy")))
if videos_done == len(self.video_fnames):
return
#for fname in self.video_fnames:
try:
n_cpus = len(os.sched_getaffinity(0))
except:
n_cpus = mp.cpu_count()
mp.Pool(processes=n_cpus).map(
partial(video_to_image, video_path=self.video_path, frame_path=self.path),
self.video_fnames
)
def __len__(self):
return len(self.paths)
def __getitem__(self, idx):
# load images and masks
img_path = self.paths[idx]
target_path = str(img_path).replace('video', 'coords')
img = np.load(img_path)
target = np.load(target_path)
target = target[[0, 1]] # use only 2D position
if self.transforms is not None:
img = self.transforms(img)
return img, target, get_cell(target)
with_classifier = False
model = 'resnet18'
with_transforms = True
base_data_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
def make_dataloaders(data_dir, with_transforms, batch_size):
if with_transforms:
train_transform = transforms.Compose([
transforms.ToPILImage(),
transforms.GaussianBlur(kernel_size=(5, 9), sigma=(0.1, 5)),
transforms.ColorJitter(brightness=.1, hue=.1),
base_data_transform,
])
else:
train_transform = base_data_transform
train_dataset = CarlaRegressorDataset(train=True, path=data_dir, transforms=train_transform)
test_dataset = CarlaRegressorDataset(train=False, path=data_dir, transforms=base_data_transform)
make_loader = lambda dataset: th.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=2)
train_loader = make_loader(train_dataset)
test_loader = make_loader(test_dataset)
return {'train': train_loader, 'test': test_loader}
class MultiHeadEfficientNet_b7(nn.Module):
@staticmethod
def make_last_layer(in_dim, out_dim):
return nn.Sequential(
nn.Dropout(p=0.5, inplace=False),
nn.Linear(in_dim, out_dim))
def __init__(self, pretrained=True):
super().__init__()
self.efficientnet_b7 = torchvision.models.efficientnet_b7(pretrained=pretrained)
self.efficientnet_b7.classifier = nn.Identity()
self.regressors = nn.ModuleList([self.make_last_layer(2560, 2) for i in range(100)])
def forward(self, inputs, cells):
emb = self.efficientnet_b7(inputs)
coords = []
for idx, cell in enumerate(cells):
coords.append(self.regressors[cell](emb[idx]))
coords = th.stack(coords)
return coords
class MultiHeadResNet152(nn.Module):
def __init__(self, pretrained=True):
super().__init__()
self.resnet = torchvision.models.resnet152(pretrained=pretrained)
in_features = self.resnet.fc.in_features
self.resnet.fc = nn.Identity()
self.regressors = nn.ModuleList([nn.Linear(in_features, 2) for i in range(100)])
def forward(self, inputs, cells):
emb = self.resnet(inputs)
coords = []
for idx, cell in enumerate(cells):
coords.append(self.regressors[cell](emb[idx]))
coords = th.stack(coords)
return coords
def get_resnet152_classifier(pretrained=True):
model = torchvision.models.resnet152(pretrained=pretrained)
in_features = model.fc.in_features
model.fc = nn.Linear(in_features, 100)
return model
def get_efficientnet_b7_classifier(pretrained=True):
model = torchvision.models.efficientnet_b7(pretrained=pretrained)
model.classifier = nn.Linear(2560, 100)
return model
def set_up_model(is_classifier, model_name, device, pretrained=True):
if is_classifier and model_name == 'resnet152':
model_conv = get_resnet152_classifier(pretrained=pretrained)
elif is_classifier and model_name == 'efficientnet_b7':
model_conv = get_efficientnet_b7_classifier(pretrained=pretrained)
elif model_name == 'resnet152':
model_conv = MultiHeadResNet152(pretrained=pretrained)
elif model_name == 'efficientnet_b7':
model_conv = MultiHeadEfficientNet_b7(pretrained=pretrained)
else:
raise ValueError('Unknown model')
return model_conv.to(device)
def parse_train_args():
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, default='resnet152', help='Model to use')
parser.add_argument('--is_classifier', type=str2bool, default=False, help='Train classifier vs the multi-headed regressor.')
parser.add_argument('--data_dir', type=str, default=None)
parser.add_argument('--with_transforms', type=str2bool, default=True)
parser.add_argument('--lr', type=float, default=0.001, help='Learning rate')
parser.add_argument('--num_epochs', type=int, default=25, help='Number of epochs')
parser.add_argument('--batch_size', type=int, default=4, help='Batch size')
parser.add_argument('--device', type=str, default='cuda:0', help='Device to use')
return parser.parse_args()
def train():
args = parse_train_args()
wandb.init(project=os.environ['WANDB_PROJECT'], entity=os.environ['WANDB_ENTITY'],
config=args)
args.device = th.device("cuda:0" if th.cuda.is_available() else "cpu")
model = set_up_model(args.is_classifier, args.model, args.device)
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=0.9)
scheduler = lr_scheduler.StepLR(optimizer, step_size=7, gamma=0.1) # Decay LR by a factor of 0.1 every 7 epochs
dataloaders = make_dataloaders(args.data_dir, args.with_transforms, args.batch_size)
best_model_wts = copy.deepcopy(model.state_dict())
best_loss = np.float('inf')
for epoch in range(args.num_epochs):
losses = {}
# Each epoch has a training and validation phase
for phase in ['train', 'test']:
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
# Iterate over data.
for inputs, coords, cells in dataloaders[phase]:
inputs = inputs.to(args.device)
coords = coords.to(args.device).float()
cells = cells.to(args.device)
optimizer.zero_grad()
with th.set_grad_enabled(phase == 'train'):
outputs = model(inputs) if args.is_classifier else model(inputs, cells)
if args.is_classifier:
loss = nn.BCELoss()(nn.Sigmoid()(outputs), cells)
else:
loss = nn.MSELoss()(outputs, coords)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
running_loss += loss.item() * inputs.size(0)
if phase == 'train':
scheduler.step()
epoch_loss = running_loss / len(dataloaders[phase].dataset)
losses[phase] = epoch_loss
# deep copy the model
if phase == 'test' and epoch_loss < best_loss:
best_loss = epoch_loss
best_model_wts = copy.deepcopy(model.state_dict())
th.save(model.state_dict(), os.path.join(wandb.run.dir, f"model_{epoch}.pth"))
wandb.save(os.path.join(wandb.run.dir, f"model_{epoch}.pth"))
wandb.log({f"{k}_loss": v for k, v in losses.items()})
print(f'Best val loss: {best_loss:4f}')
# load best model weights
model.load_state_dict(best_model_wts)
return model
def load_classifier_regressor_like_paper(classifier_path, regressor_path, device):
classifier = set_up_model(is_classifier=True, model_name='resnet152', device=device, pretrained=False)
regressor = set_up_model(is_classifier=False, model_name='resnet152', device=device, pretrained=False)
classifier.load_state_dict(th.load(classifier_path))
regressor.load_state_dict(th.load(regressor_path))
classifier.eval()
regressor.eval()
return classifier.to(device), regressor.to(device)
@th.no_grad()
def predict_coord_batch(frames, classifier, regressor):
orig_device = frames.device
device = next(classifier.parameters()).device
frames = frames.to(device)
cells = classifier(frames).argmax(dim=1)
return regressor(frames, cells).to(orig_device)
def predict_coords(frames, classifier, regressor, batch_size):
coords = []
while len(frames) > 0:
some_frames = frames[:batch_size]
frames = frames[batch_size:]
coords.append(predict_coord_batch(some_frames, classifier, regressor))
return th.cat(coords, dim=0) | 11,511 | 35.546032 | 132 | py |
flexible-video-diffusion-modeling | flexible-video-diffusion-modeling-main/improved_diffusion/losses.py | """
Helpers for various likelihood-based losses. These are ported from the original
Ho et al. diffusion models codebase:
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/utils.py
"""
import numpy as np
import torch as th
def normal_kl(mean1, logvar1, mean2, logvar2):
"""
Compute the KL divergence between two gaussians.
Shapes are automatically broadcasted, so batches can be compared to
scalars, among other use cases.
"""
tensor = None
for obj in (mean1, logvar1, mean2, logvar2):
if isinstance(obj, th.Tensor):
tensor = obj
break
assert tensor is not None, "at least one argument must be a Tensor"
# Force variances to be Tensors. Broadcasting helps convert scalars to
# Tensors, but it does not work for th.exp().
logvar1, logvar2 = [
x if isinstance(x, th.Tensor) else th.tensor(x).to(tensor)
for x in (logvar1, logvar2)
]
return 0.5 * (
-1.0
+ logvar2
- logvar1
+ th.exp(logvar1 - logvar2)
+ ((mean1 - mean2) ** 2) * th.exp(-logvar2)
)
def approx_standard_normal_cdf(x):
"""
A fast approximation of the cumulative distribution function of the
standard normal.
"""
return 0.5 * (1.0 + th.tanh(np.sqrt(2.0 / np.pi) * (x + 0.044715 * th.pow(x, 3))))
def discretized_gaussian_log_likelihood(x, *, means, log_scales):
"""
Compute the log-likelihood of a Gaussian distribution discretizing to a
given image.
:param x: the target images. It is assumed that this was uint8 values,
rescaled to the range [-1, 1].
:param means: the Gaussian mean Tensor.
:param log_scales: the Gaussian log stddev Tensor.
:return: a tensor like x of log probabilities (in nats).
"""
assert x.shape == means.shape == log_scales.shape
centered_x = x - means
inv_stdv = th.exp(-log_scales)
plus_in = inv_stdv * (centered_x + 1.0 / 255.0)
cdf_plus = approx_standard_normal_cdf(plus_in)
min_in = inv_stdv * (centered_x - 1.0 / 255.0)
cdf_min = approx_standard_normal_cdf(min_in)
log_cdf_plus = th.log(cdf_plus.clamp(min=1e-12))
log_one_minus_cdf_min = th.log((1.0 - cdf_min).clamp(min=1e-12))
cdf_delta = cdf_plus - cdf_min
log_probs = th.where(
x < -0.999,
log_cdf_plus,
th.where(x > 0.999, log_one_minus_cdf_min, th.log(cdf_delta.clamp(min=1e-12))),
)
assert log_probs.shape == x.shape
return log_probs
| 2,534 | 31.5 | 109 | py |
flexible-video-diffusion-modeling | flexible-video-diffusion-modeling-main/improved_diffusion/rng_util.py | import random
import torch as th
import numpy as np
def set_random_seed(seed):
random.seed(seed)
th.manual_seed(seed+1)
th.cuda.manual_seed_all(seed+2)
np.random.seed(seed+3)
def get_random_state():
return {
"python": random.getstate(),
"torch": th.get_rng_state(),
"cuda": th.cuda.get_rng_state_all(),
"numpy": np.random.get_state()
}
def set_random_state(state):
random.setstate(state["python"])
th.set_rng_state(state["torch"])
th.cuda.set_rng_state_all(state["cuda"])
np.random.set_state(state["numpy"])
class RNG():
def __init__(self, seed=None, state=None):
self.state = get_random_state()
with self:
if seed is not None:
set_random_seed(seed)
elif state is not None:
set_random_state(state)
def __enter__(self):
self.external_state = get_random_state()
set_random_state(self.state)
def __exit__(self, *args):
self.state = get_random_state()
set_random_state(self.external_state)
def get_state(self):
return self.state
def set_state(self, state):
self.state = state
class rng_decorator():
def __init__(self, seed):
self.seed = seed
def __call__(self, f):
def wrapped_f(*args, **kwargs):
with RNG(self.seed):
return f(*args, **kwargs)
return wrapped_f | 1,449 | 20.969697 | 48 | py |
flexible-video-diffusion-modeling | flexible-video-diffusion-modeling-main/improved_diffusion/test_util.py | import os
from filelock import FileLock
from pathlib import Path
import torch as th
import numpy as np
from PIL import Image
import imageio
class Protect(FileLock):
""" Given a file path, this class will create a lock file and prevent race conditions
using a FileLock. The FileLock path is automatically inferred from the file path.
"""
def __init__(self, path, timeout=2, **kwargs):
path = Path(path)
lock_path = Path(path).parent / f"{path.name}.lock"
super().__init__(lock_path, timeout=timeout, **kwargs)
def get_model_results_path(args):
"""
Given arguments passed to an evaluation run, returns the path to the results path.
The path has the format "results/<checkpoint_dir_subpath>/checkpoint name" where
<checkpoint_dir_subpath> is the subset of checkpoint path after ".*checkpoint.*/"
For example, if "/scratch/video-diffusion/saeids-checkpoints/abcdefg/ema_latest.pt"
is the checkpoint path, the result path will be
"results/abcdefg/ema_latest_<checkpoint_step>/". In this path, <checkpoint_step> is the
training step of the checkpoint and will only be added if the checkpoint path ends with
"latest", since otherwise the checkpoint name itself ends with the step number.
If args.eval_dir is not None, this function does nothing and returns the same path.
args is expected to have the following attributes:
- use_ddim
- timesptep_respacing
- outdir
"""
# Extract the diffusion sampling arguments string (DDIM/respacing)
postfix = ""
if args.use_ddim:
postfix += "_ddim"
if args.timestep_respacing != "":
postfix += "_" + f"respace{args.timestep_respacing}"
# Create the output directory (if does not exist)
if args.eval_dir is None:
checkpoint_path = Path(args.checkpoint_path)
name = f"{checkpoint_path.stem}"
if name.endswith("latest"):
checkpoint_step = th.load(args.checkpoint_path, map_location="cpu")["step"]
name += f"_{checkpoint_step}"
if postfix != "":
name += postfix
path = None
for idx, x in enumerate(checkpoint_path.parts):
if "checkpoint" in x:
path = Path(*(checkpoint_path.parts[idx+1:]))
break
assert path is not None
return Path("results") / path.parent / name
else:
return Path(args.eval_dir)
def get_eval_run_identifier(args):
res = args.sampling_scheme
if hasattr(args, "optimality") and args.optimality is not None:
res += f"_optimal-{args.optimality}"
res += f"_{args.max_frames}_{args.max_latent_frames}_{args.T}_{args.n_obs}"
if hasattr(args, "dataset_partition") and args.dataset_partition == "train":
res = "trainset_" + res
return res
################################################################################
# Visualization functions #
################################################################################
def mark_as_observed(images, color=[255, 0, 0]):
for i, c in enumerate(color):
images[..., i, :, 1:2] = c
images[..., i, 1:2, :] = c
images[..., i, :, -2:-1] = c
images[..., i, -2:-1, :] = c
def tensor2pil(tensor, drange=[0,1]):
"""Given a tensor of shape (Bx)3xwxh with pixel values in drange, returns a PIL image
of the tensor. Returns a list of images if the input tensor is a batch.
Args:
tensor: A tensor of shape (Bx)3xwxh
drange (list, optional): Range of pixel values in the input tensor. Defaults to [0,1].
"""
assert tensor.ndim == 3 or tensor.ndim == 4
if tensor.ndim == 3:
return tensor2pil(tensor.unsqueeze(0), drange=drange)[0]
img_batch = tensor.cpu().numpy().transpose([0, 2, 3, 1])
img_batch = (img_batch - drange[0]) / (drange[1] - drange[0]) * 255 # img_batch with pixel values in [0, 255]
img_batch = img_batch.astype(np.uint8)
return [Image.fromarray(img) for img in img_batch]
def tensor2gif(tensor, path, drange=[0, 1], random_str=""):
frames = tensor2pil(tensor, drange=drange)
tmp_path = f"/tmp/tmp_{random_str}.png"
res = []
for frame in frames:
frame.save(tmp_path)
res.append(imageio.imread(tmp_path))
imageio.mimsave(path, res)
def tensor2mp4(tensor, path, drange=[0, 1], random_str=""):
gif_path = f"/tmp/tmp_{random_str}.gif"
tensor2gif(tensor, path=gif_path, drange=drange, random_str=random_str)
os.system(f"ffmpeg -y -hide_banner -loglevel error -i {gif_path} -r 10 -movflags faststart -pix_fmt yuv420p -vf \"scale=trunc(iw/2)*2:trunc(ih/2)*2\" {path}") | 4,757 | 41.864865 | 162 | py |
flexible-video-diffusion-modeling | flexible-video-diffusion-modeling-main/improved_diffusion/video_datasets.py | import os
import numpy as np
import torch as th
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import ToTensor
from pathlib import Path
import shutil
from mpi4py import MPI
from .test_util import Protect
video_data_paths_dict = {
"minerl": "datasets/minerl_navigate-torch",
"mazes_cwvae": "datasets/gqn_mazes-torch",
"carla_no_traffic": "datasets/carla/no-traffic",
}
default_T_dict = {
"minerl": 500,
"mazes_cwvae": 300,
"carla_no_traffic": 1000,
}
default_image_size_dict = {
"minerl": 64,
"mazes_cwvae": 64,
"carla_no_traffic": 128,
}
def load_data(dataset_name, batch_size, T=None, deterministic=False, num_workers=1, return_dataset=False):
data_path = video_data_paths_dict[dataset_name]
T = default_T_dict[dataset_name] if T is None else T
shard = MPI.COMM_WORLD.Get_rank()
num_shards = MPI.COMM_WORLD.Get_size()
if dataset_name == "minerl":
data_path = os.path.join(data_path, "train")
dataset = MineRLDataset(data_path, shard=shard, num_shards=num_shards, T=T)
elif dataset_name == "mazes_cwvae":
data_path = os.path.join(data_path, "train")
dataset = GQNMazesDataset(data_path, shard=shard, num_shards=num_shards, T=T)
elif dataset_name == "carla_no_traffic":
dataset = CarlaDataset(train=True, path=data_path, shard=shard, num_shards=num_shards, T=T)
else:
raise Exception("no dataset", dataset_name)
if return_dataset:
return dataset
else:
loader = DataLoader(
dataset, batch_size=batch_size, shuffle=(not deterministic), num_workers=num_workers, drop_last=True
)
while True:
yield from loader
def get_train_dataset(dataset_name, T=None):
return load_data(
dataset_name, return_dataset=False, T=T,
batch_size=None, deterministic=None, num_workers=None
)
def get_test_dataset(dataset_name, T=None):
if dataset_name == "mazes":
raise Exception('Deprecated dataset.')
data_root = Path(os.environ["DATA_ROOT"] if "DATA_ROOT" in os.environ and os.environ["DATA_ROOT"] != "" else ".")
data_path = data_root / video_data_paths_dict[dataset_name]
T = default_T_dict[dataset_name] if T is None else T
if dataset_name == "minerl":
data_path = os.path.join(data_path, "test")
dataset = MineRLDataset(data_path, shard=0, num_shards=1, T=T)
elif dataset_name == "mazes_cwvae":
data_path = os.path.join(data_path, "test")
dataset = GQNMazesDataset(data_path, shard=0, num_shards=1, T=T)
elif dataset_name == "carla_no_traffic":
dataset = CarlaDataset(train=False, path=data_path, shard=0, num_shards=1, T=T)
else:
raise Exception("no dataset", dataset_name)
dataset.set_test()
return dataset
class BaseDataset(Dataset):
""" The base class for our video datasets. It is used for datasets where each video is stored under <dataset_root_path>/<split>
as a single file. This class provides the ability of caching the dataset items in a temporary directory (if
specified as an environment variable DATA_ROOT) as the items are read. In other words, every time an item is
retrieved from the dataset, it will try to load it from the temporary directory first. If it is not found, it
will be first copied from the original location.
This class provides a default implementation for __len__ as the number of file in the dataset's original directory.
It also provides the following two helper functions:
- cache_file: Given a path to a dataset file, makes sure the file is copied to the temporary directory. Does
nothing unless DATA_ROOT is set.
- get_video_subsequence: Takes a video and a video length as input. If the video length is smaller than the
input video's length, it returns a random subsequence of the video. Otherwise, it returns the whole video.
A child class should implement the following methods:
- getitem_path: Given an index, returns the path to the video file.
- loaditem: Given a path to a video file, loads and returns the video.
- postprocess_video: Given a video, performs any postprocessing on the video.
Args:
path (str): path to the dataset split
"""
def __init__(self, path, T):
super().__init__()
self.T = T
self.path = Path(path)
self.is_test = False
def __len__(self):
path = self.get_src_path(self.path)
return len(list(path.iterdir()))
def __getitem__(self, idx):
path = self.getitem_path(idx)
self.cache_file(path)
try:
video = self.loaditem(path)
except Exception as e:
print(f"Failed on loading {path}")
raise e
video = self.postprocess_video(video)
return self.get_video_subsequence(video, self.T), {}
def getitem_path(self, idx):
raise NotImplementedError
def loaditem(self, path):
raise NotImplementedError
def postprocess_video(self, video):
raise NotImplementedError
def cache_file(self, path):
# Given a path to a dataset item, makes sure that the item is cached in the temporary directory.
if not path.exists():
path.parent.mkdir(parents=True, exist_ok=True)
src_path = self.get_src_path(path)
with Protect(path):
shutil.copyfile(str(src_path), str(path))
@staticmethod
def get_src_path(path):
""" Returns the source path to a file. This function is mainly used to handle SLURM_TMPDIR on ComputeCanada.
If DATA_ROOT is defined as an environment variable, the datasets are copied to it as they are accessed. This function is called
when we need the source path from a given path under DATA_ROOT.
"""
if "DATA_ROOT" in os.environ and os.environ["DATA_ROOT"] != "":
# Verify that the path is under
data_root = Path(os.environ["DATA_ROOT"])
assert data_root in path.parents, f"Expected dataset item path ({path}) to be located under the data root ({data_root})."
src_path = Path(*path.parts[len(data_root.parts):]) # drops the data_root part from the path, to get the relative path to the source file.
return src_path
return path
def set_test(self):
self.is_test = True
print('setting test mode')
def get_video_subsequence(self, video, T):
if T is None:
return video
if T < len(video):
# Take a subsequence of the video.
start_i = 0 if self.is_test else np.random.randint(len(video) - T + 1)
video = video[start_i:start_i+T]
assert len(video) == T
return video
class CarlaDataset(BaseDataset):
def __init__(self, train, path, shard, num_shards, T):
super().__init__(path=path, T=T)
self.split_path = self.path / f"video_{'train' if train else 'test'}.csv"
self.cache_file(self.split_path)
self.fnames = [line.rstrip('\n').split('/')[-1] for line in open(self.split_path, 'r').readlines() if '.pt' in line]
self.fnames = self.fnames[shard::num_shards]
print(f"Loading {len(self.fnames)} files (Carla dataset).")
def loaditem(self, path):
return th.load(path)
def getitem_path(self, idx):
return self.path / self.fnames[idx]
def postprocess_video(self, video):
return -1 + 2 * (video.permute(0, 3, 1, 2).float()/255)
def __len__(self):
return len(self.fnames)
class GQNMazesDataset(BaseDataset):
""" based on https://github.com/iShohei220/torch-gqn/blob/master/gqn_dataset.py .
"""
def __init__(self, path, shard, num_shards, T):
assert shard == 0, "Distributed training is not supported by the MineRL dataset yet."
assert num_shards == 1, "Distributed training is not supported by the MineRL dataset yet."
super().__init__(path=path, T=T)
def getitem_path(self, idx):
return self.path / f"{idx}.npy"
def loaditem(self, path):
return np.load(path)
def postprocess_video(self, video):
byte_to_tensor = lambda x: ToTensor()(x)
video = th.stack([byte_to_tensor(frame) for frame in video])
video = 2 * video - 1
return video
class MineRLDataset(BaseDataset):
def __init__(self, path, shard, num_shards, T):
assert shard == 0, "Distributed training is not supported by the MineRL dataset yet."
assert num_shards == 1, "Distributed training is not supported by the MineRL dataset yet."
super().__init__(path=path, T=T)
def getitem_path(self, idx):
return self.path / f"{idx}.npy"
def loaditem(self, path):
return np.load(path)
def postprocess_video(self, video):
byte_to_tensor = lambda x: ToTensor()(x)
video = th.stack([byte_to_tensor(frame) for frame in video])
video = 2 * video - 1
return video
| 9,109 | 38.267241 | 150 | py |
flexible-video-diffusion-modeling | flexible-video-diffusion-modeling-main/improved_diffusion/sampling_schemes.py | import numpy as np
import torch as th
try:
import lpips
class LpipsEmbedder(lpips.LPIPS):
""" Similar to lpips.LPIPS, but takes a single batch of images and returns an embedding
such that the LPIPS distance between two images is the squared distance between these embeddings.
Whereas lpips.LPIPS takes two images and directly returns the LPIPS distance.
"""
def not_spatial_average(self, in_tens, keepdim=True):
B, C, H, W = in_tens.shape
reshaped = in_tens.view(B, C*H*W, 1, 1) if keepdim else in_tens.view(B, C*H*W)
return reshaped / (H*W)**0.5
def scale_by_proj_weights(self, proj, x):
conv = proj.model[-1]
proj_weights = conv.weight
return (proj_weights**0.5) * x
def forward(self, x):
outs = self.net.forward(self.scaling_layer(x))
feats = {}
for kk in range(self.L):
feats[kk] = lpips.normalize_tensor(outs[kk])
res = [self.not_spatial_average(self.scale_by_proj_weights(self.lins[kk], feats[kk]), keepdim=True)
for kk in range(self.L)]
return th.cat(res, dim=1)
except ImportError:
print("Could not import lpips. Adaptive sampling schemes will not work.")
class SamplingSchemeBase:
def __init__(self, video_length: int, num_obs: int, max_frames: int, step_size: int, optimal_schedule_path=None):
""" Sampling scheme base class. It provides an iterator that returns
the indices of the frames that should be observed and the frames that should be generated.
Args:
video_length (int): Length of the videos.
num_obs (int): Number of frames that are observed from the beginning of the video.
max_frames (int): Maximum number of frames (observed or latent) that can be passed to the model in one shot.
step_size (int): Number of frames to generate in each step.
optimal_schedule_path (str): If you want to run this sampling scheme with an optimal schedule,
pass in the path to the optimal schedule file here. Then, it will choose the observed
frames based on the optimal schedule file. Otherwise, it will behave normally.
The optimal schedule file is a .pt file containing a dictionary from step number to the
list of frames that should be observed in that step.
"""
print_str = f"Inferring using the sampling scheme \"{self.typename}\""
if optimal_schedule_path is not None:
print_str += f", and the optimal schedule stored at {optimal_schedule_path}."
else:
print_str += "."
print(print_str)
self._video_length = video_length
self._max_frames = max_frames
self._num_obs = num_obs
self._done_frames = set(range(self._num_obs))
self._obs_frames = list(range(self._num_obs))
self._step_size = step_size
self.optimal_schedule = None if optimal_schedule_path is None else th.load(optimal_schedule_path)
self._current_step = 0 # Counts the number of steps.
self.B = None
def get_unconditional_indices(self):
return list(range(self._max_frames))
def __next__(self):
# Check if the video is fully generated.
if self.is_done():
raise StopIteration
unconditional = False
if self._num_obs == 0 and self._current_step == 0:
# Handles unconditional sampling by sampling a batch of self._max_frame latent frames in the first
# step, then proceeding as usual in the next steps.
obs_frame_indices = []
latent_frame_indices = self.get_unconditional_indices()
unconditional = True
else:
# Get the next indices from the function overloaded by each sampling scheme.
obs_frame_indices, latent_frame_indices = self.next_indices()
# If using the optimal schedule, overwrite the observed frames with the optimal schedule.
if self.optimal_schedule is not None:
if self._current_step not in self.optimal_schedule:
print(f"WARNING: optimal observations for prediction step #{self._current_step} was not found in the saved optimal schedule.")
obs_frame_indices = []
else:
obs_frame_indices = self.optimal_schedule[self._current_step]
# Type checks. Both observed and latent indices should be lists.
assert isinstance(obs_frame_indices, list) and isinstance(latent_frame_indices, list)
# Make sure the observed frames are either osbserved or already generated before
for idx in obs_frame_indices:
assert idx in self._done_frames, f"Attempting to condition on frame {idx} while it is not generated yet.\nGenerated frames: {self._done_frames}\nObserving: {obs_frame_indices}\nGenerating: {latent_frame_indices}"
assert np.all(np.array(latent_frame_indices) < self._video_length)
self._done_frames.update([idx for idx in latent_frame_indices if idx not in self._done_frames])
if unconditional:
# Allows the unconditional sampling to continue the next sampling stages as if it was a conditional model.
self._obs_frames = latent_frame_indices
self._current_step += 1
if self.B is not None:
obs_frame_indices = [obs_frame_indices]*self.B
latent_frame_indices = [latent_frame_indices]*self.B
return obs_frame_indices, latent_frame_indices
def is_done(self):
return len(self._done_frames) >= self._video_length
def __iter__(self):
self.step = 0
return self
def next_indices(self):
raise NotImplementedError
@property
def typename(self):
return type(self).__name__
def set_videos(self, videos):
self.B = len(videos)
class Autoregressive(SamplingSchemeBase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def next_indices(self):
if len(self._done_frames) == 0:
return [], list(range(self._max_frames))
obs_frame_indices = sorted(self._done_frames)[-(self._max_frames - self._step_size):]
first_idx = obs_frame_indices[-1] + 1
latent_frame_indices = list(range(first_idx, min(first_idx + self._step_size, self._video_length)))
return obs_frame_indices, latent_frame_indices
class LongRangeAutoregressive(SamplingSchemeBase):
def next_indices(self):
n_to_condition_on = self._max_frames - self._step_size
n_autoreg_frames = n_to_condition_on // 2
frames_to_condition_on = set(sorted(self._done_frames)[-n_autoreg_frames:])
reversed_obs_frames = sorted(self._obs_frames)[::-1]
for i in reversed_obs_frames:
frames_to_condition_on.add(i)
if len(frames_to_condition_on) == n_to_condition_on:
break
obs_frame_indices = sorted(frames_to_condition_on)
first_idx = max(self._done_frames) + 1
latent_frame_indices = list(range(first_idx, min(first_idx + self._step_size, self._video_length)))
return obs_frame_indices, latent_frame_indices
class HierarchyNLevel(SamplingSchemeBase):
def get_unconditional_indices(self):
self.current_level = 1
self.last_sampled_idx = self._video_length - 1
return list(int(i) for i in np.linspace(0, self._video_length-1, self._max_frames))
@property
def N(self):
raise NotImplementedError
@property
def sample_every(self):
sample_every_on_level_1 = (self._video_length - len(self._obs_frames)) / (self._step_size-1)
return int(sample_every_on_level_1 ** ((self.N-self.current_level)/(self.N-1)))
def next_indices(self):
if len(self._done_frames) == 0:
self.current_level = 1
self.last_sampled_idx = self._video_length - 1
return [], list(int(i) for i in np.linspace(0, self._video_length-1, self._max_frames))
if len(self._done_frames) == len(self._obs_frames):
self.current_level = 1
self.last_sampled_idx = max(self._obs_frames)
n_to_condition_on = self._max_frames - self._step_size
n_to_sample = self._step_size
# select the grid of latent_frame_indices (shifting by 1 to avoid already-existing frames)
idx = self.last_sampled_idx + self.sample_every
if len([i for i in range(idx, self._video_length) if i not in self._done_frames]) == 0:
# reset if there are no frames left to sample after idx
self.current_level += 1
self.last_sampled_idx = 0
idx = min(i for i in range(self._video_length) if i not in self._done_frames) - 1 + self.sample_every
if self.current_level == 1:
latent_frame_indices = list(int(i) for i in np.linspace(max(self._obs_frames)+1, self._video_length-0.001, n_to_sample))
else:
latent_frame_indices = []
while len(latent_frame_indices) < n_to_sample and idx < self._video_length:
if idx not in self._done_frames:
latent_frame_indices.append(idx)
idx += self.sample_every
elif idx in self._done_frames:
idx += 1
# observe any frames in between latent frames
obs_frame_indices = [i for i in range(min(latent_frame_indices), max(latent_frame_indices)) if i in self._done_frames]
obs_before_and_after = n_to_condition_on - len(obs_frame_indices)
if obs_before_and_after < 2: # reduce step_size if necessary to ensure conditioning before + after latents
if self._step_size == 1:
raise Exception('Cannot condition before and after even with step size of 1')
sample_every = self.sample_every
self._step_size -= 1
result = self.next_indices()
self._step_size += 1
return result
max_n_after = obs_before_and_after // 2
# observe `n_after` frames afterwards
obs_frame_indices.extend([i for i in range(max(latent_frame_indices)+1, self._video_length) if i in self._done_frames][:max_n_after])
n_before = n_to_condition_on - len(obs_frame_indices)
# observe `n_before` frames before...
if self.current_level == 1:
obs_frame_indices.extend(list(np.linspace(0, max(self._obs_frames)+0.999, n_before).astype(np.int32)))
else:
obs_frame_indices.extend([i for i in range(min(latent_frame_indices)-1, -1, -1) if i in self._done_frames][:n_before])
self.last_sampled_idx = max(latent_frame_indices)
return obs_frame_indices, latent_frame_indices
@property
def typename(self):
return f"{super().typename}-{self.N}"
class AdaptiveSamplingSchemeBase(SamplingSchemeBase):
def embed(self, indices):
net = LpipsEmbedder(net='alex', spatial=False).to(self.videos.device)
embs = [net(self.videos[:, i]) for i in indices]
return th.stack(embs, dim=1)
def set_videos(self, videos):
self.videos = videos
def select_obs_indices(self, possible_next_indices, n, always_selected=(0,)):
B = len(self.videos)
embs = self.embed(possible_next_indices)
batch_selected_indices = []
for b in range(B):
min_distances_from_selected = [np.inf for _ in possible_next_indices]
selected_indices = [possible_next_indices[always_selected[0]]]
selected_embs = [embs[b, always_selected[0]]] # always begin with next possible index
for i in range(1, n):
# update min_distances_from_selected
for f, dist in enumerate(min_distances_from_selected):
dist_to_newest = ((selected_embs[-1] - embs[b][f])**2).sum().cpu().item()
min_distances_from_selected[f] = min(dist, dist_to_newest)
if i < len(always_selected):
best_index = always_selected[i]
else:
# select one with maximum min_distance_from_selected
best_index = np.argmax(min_distances_from_selected)
selected_indices.append(possible_next_indices[best_index])
selected_embs.append(embs[b, best_index])
batch_selected_indices.append(selected_indices)
return batch_selected_indices
def __next__(self):
if self._num_obs == 0 and self._current_step == 0:
# Unconditional sampling
obs_frame_indices, latent_frame_indices = super().__next__()
return [obs_frame_indices for _ in range(len(self.videos))], [latent_frame_indices for _ in range(len(self.videos))]
# Check if the video is fully generated.
if self.is_done():
raise StopIteration
# Get the next indices from the function overloaded by each sampling scheme.
obs_frame_indices, latent_frame_indices = self.next_indices()
# Type checks. Both observed and latent indices should be lists.
assert isinstance(obs_frame_indices, list) and isinstance(latent_frame_indices, list)
# Make sure the observed frames are either osbserved or already generated before
for idx in np.array(obs_frame_indices).flatten():
assert idx in self._done_frames, f"Attempting to condition on frame {idx} while it is not generated yet.\nGenerated frames: {self._done_frames}\nObserving: {obs_frame_indices}\nGenerating: {latent_frame_indices}"
assert np.all(np.array(latent_frame_indices) < self._video_length)
self._done_frames.update([idx for idx in latent_frame_indices if idx not in self._done_frames])
self._current_step += 1
return obs_frame_indices, [latent_frame_indices]*len(obs_frame_indices)
class AdaptiveAutoregressive(AdaptiveSamplingSchemeBase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def next_indices(self):
if len(self._done_frames) == 0:
return [[]]*len(self.videos), list(range(self._max_frames))
first_idx = max(self._done_frames) + 1
latent_frame_indices = list(range(first_idx, min(first_idx + self._step_size, self._video_length)))
possible_obs_indices = sorted(self._done_frames)[::-1]
n_obs = self._max_frames - self._step_size
obs_frame_indices = self.select_obs_indices(possible_obs_indices, n_obs)
return obs_frame_indices, latent_frame_indices
class AdaptiveHierarchyNLevel(AdaptiveSamplingSchemeBase, HierarchyNLevel):
def next_indices(self):
"""
Certainly not mainly copy-pasted from HierarchyNLevel...
"""
if len(self._done_frames) == 0:
self.current_level = 1
self.last_sampled_idx = self._video_length - 1
return [], list(int(i) for i in np.linspace(0, self._video_length-1, self._max_frames))
if len(self._done_frames) == len(self._obs_frames):
self.current_level = 1
self.last_sampled_idx = max(self._obs_frames)
n_to_condition_on = self._max_frames - self._step_size
n_to_sample = self._step_size
# select the grid of latent_frame_indices (shifting by 1 to avoid already-existing frames)
idx = self.last_sampled_idx + self.sample_every
if len([i for i in range(idx, self._video_length) if i not in self._done_frames]) == 0:
# reset if there are no frames left to sample after idx
self.current_level += 1
self.last_sampled_idx = 0
idx = min(i for i in range(self._video_length) if i not in self._done_frames) - 1 + self.sample_every
if self.current_level == 1:
latent_frame_indices = list(int(i) for i in np.linspace(max(self._obs_frames)+1, self._video_length-0.001, n_to_sample))
else:
latent_frame_indices = []
while len(latent_frame_indices) < n_to_sample and idx < self._video_length:
if idx not in self._done_frames:
latent_frame_indices.append(idx)
idx += self.sample_every
elif idx in self._done_frames:
idx += 1
# observe any frames in between latent frames
obs_frame_indices = [i for i in range(min(latent_frame_indices), max(latent_frame_indices)) if i in self._done_frames]
obs_before_and_after = n_to_condition_on - len(obs_frame_indices)
if obs_before_and_after < 2: # reduce step_size if necessary to ensure conditioning before + after latents
if self._step_size == 1:
raise Exception('Cannot condition before and after even with step size of 1')
sample_every = self.sample_every
self._step_size -= 1
result = self.next_indices()
self._step_size += 1
return result
# observe closest frame before and after latent frames
i = min(latent_frame_indices)
while i not in self._done_frames:
i -= 1
obs_frame_indices.append(i)
# actually closest two before...
i -= 1
while i not in self._done_frames:
i -= 1
obs_frame_indices.append(i)
i = max(latent_frame_indices)
while i not in self._done_frames and i < self._video_length:
i += 1
if i < self._video_length:
obs_frame_indices.append(i)
# select which other frames to use adaptively
possible_next_indices = list(self._done_frames)
always_selected = [possible_next_indices.index(i) for i in obs_frame_indices]
print('ALWAYS SELECTED', obs_frame_indices)
obs_frame_indices = self.select_obs_indices(
possible_next_indices=possible_next_indices, n=n_to_condition_on, always_selected=always_selected,
)
self.last_sampled_idx = max(latent_frame_indices)
return obs_frame_indices, latent_frame_indices
def get_hierarchy_n_level(n):
class Hierarchy(HierarchyNLevel):
N = n
return Hierarchy
def get_adaptive_hierarchy_n_level(n):
class AdaptiveHierarchy(AdaptiveHierarchyNLevel):
N = n
return AdaptiveHierarchy
sampling_schemes = {
'autoreg': Autoregressive,
'long-range': LongRangeAutoregressive,
'hierarchy-2': get_hierarchy_n_level(2),
'hierarchy-3': get_hierarchy_n_level(3),
'hierarchy-4': get_hierarchy_n_level(4),
'hierarchy-5': get_hierarchy_n_level(5),
'adaptive-autoreg': AdaptiveAutoregressive,
'adaptive-hierarchy-2': get_adaptive_hierarchy_n_level(2),
'adaptive-hierarchy-3': get_adaptive_hierarchy_n_level(3),
} | 18,909 | 46.393484 | 224 | py |
flexible-video-diffusion-modeling | flexible-video-diffusion-modeling-main/improved_diffusion/nn.py | """
Various utilities for neural networks.
"""
import math
import torch as th
import torch.nn as nn
# PyTorch 1.7 has SiLU, but we support PyTorch 1.5.
class SiLU(nn.Module):
def forward(self, x):
return x * th.sigmoid(x)
class GroupNorm32(nn.GroupNorm):
def forward(self, x):
return super().forward(x.float()).type(x.dtype)
def conv_nd(dims, *args, **kwargs):
"""
Create a 1D, 2D, or 3D convolution module.
"""
if dims == 1:
return nn.Conv1d(*args, **kwargs)
elif dims == 2:
return nn.Conv2d(*args, **kwargs)
elif dims == 3:
return nn.Conv3d(*args, **kwargs)
raise ValueError(f"unsupported dimensions: {dims}")
def linear(*args, **kwargs):
"""
Create a linear module.
"""
return nn.Linear(*args, **kwargs)
def avg_pool_nd(dims, *args, **kwargs):
"""
Create a 1D, 2D, or 3D average pooling module.
"""
if dims == 1:
return nn.AvgPool1d(*args, **kwargs)
elif dims == 2:
return nn.AvgPool2d(*args, **kwargs)
elif dims == 3:
return nn.AvgPool3d(*args, **kwargs)
raise ValueError(f"unsupported dimensions: {dims}")
def update_ema(target_params, source_params, rate=0.99):
"""
Update target parameters to be closer to those of source parameters using
an exponential moving average.
:param target_params: the target parameter sequence.
:param source_params: the source parameter sequence.
:param rate: the EMA rate (closer to 1 means slower).
"""
for targ, src in zip(target_params, source_params):
targ.detach().mul_(rate).add_(src, alpha=1 - rate)
def zero_module(module):
"""
Zero out the parameters of a module and return it.
"""
for p in module.parameters():
p.detach().zero_()
return module
def scale_module(module, scale):
"""
Scale the parameters of a module and return it.
"""
for p in module.parameters():
p.detach().mul_(scale)
return module
def mean_flat(tensor, mask=None):
"""
Take the mean over all non-batch dimensions.
"""
if mask is not None:
tensor = tensor * mask
return tensor.mean(dim=list(range(1, len(tensor.shape))))
def normalization(channels):
"""
Make a standard normalization layer.
:param channels: number of input channels.
:return: an nn.Module for normalization.
"""
return GroupNorm32(32, channels)
def timestep_embedding(timesteps, dim, max_period=10000):
"""
Create sinusoidal timestep embeddings.
:param timesteps: a 1-D Tensor of N indices, one per batch element.
These may be fractional.
:param dim: the dimension of the output.
:param max_period: controls the minimum frequency of the embeddings.
:return: an [N x dim] Tensor of positional embeddings.
"""
half = dim // 2
freqs = th.exp(
-math.log(max_period) * th.arange(start=0, end=half, dtype=th.float32) / half
).to(device=timesteps.device)
args = timesteps[:, None].float() * freqs[None]
embedding = th.cat([th.cos(args), th.sin(args)], dim=-1)
if dim % 2:
embedding = th.cat([embedding, th.zeros_like(embedding[:, :1])], dim=-1)
return embedding
def checkpoint(func, inputs, params, flag):
"""
Evaluate a function without caching intermediate activations, allowing for
reduced memory at the expense of extra compute in the backward pass.
:param func: the function to evaluate.
:param inputs: the argument sequence to pass to `func`.
:param params: a sequence of parameters `func` depends on but does not
explicitly take as arguments.
:param flag: if False, disable gradient checkpointing.
"""
if flag:
args = tuple(inputs) + tuple(params)
return CheckpointFunction.apply(func, len(inputs), *args)
else:
return func(*inputs)
class CheckpointFunction(th.autograd.Function):
@staticmethod
def forward(ctx, run_function, length, *args):
ctx.run_function = run_function
ctx.input_tensors = list(args[:length])
ctx.input_params = list(args[length:])
with th.no_grad():
output_tensors = ctx.run_function(*ctx.input_tensors)
return output_tensors
@staticmethod
def backward(ctx, *output_grads):
ctx.input_tensors = [x.detach().requires_grad_(True) for x in ctx.input_tensors]
with th.enable_grad():
# Fixes a bug where the first op in run_function modifies the
# Tensor storage in place, which is not allowed for detach()'d
# Tensors.
shallow_copies = [x.view_as(x) for x in ctx.input_tensors]
output_tensors = ctx.run_function(*shallow_copies)
input_grads = th.autograd.grad(
output_tensors,
ctx.input_tensors + ctx.input_params,
output_grads,
allow_unused=True,
)
del ctx.input_tensors
del ctx.input_params
del output_tensors
return (None, None) + input_grads
| 5,087 | 28.410405 | 88 | py |
flexible-video-diffusion-modeling | flexible-video-diffusion-modeling-main/improved_diffusion/fp16_util.py | """
Helpers to train with 16-bit precision.
"""
import torch.nn as nn
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
def convert_module_to_f16(l):
"""
Convert primitive modules to float16.
"""
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Conv3d)):
l.weight.data = l.weight.data.half()
l.bias.data = l.bias.data.half()
def convert_module_to_f32(l):
"""
Convert primitive modules to float32, undoing convert_module_to_f16().
"""
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Conv3d)):
l.weight.data = l.weight.data.float()
l.bias.data = l.bias.data.float()
def make_master_params(model_params):
"""
Copy model parameters into a (differently-shaped) list of full-precision
parameters.
"""
master_params = _flatten_dense_tensors(
[param.detach().float() for param in model_params]
)
master_params = nn.Parameter(master_params)
master_params.requires_grad = True
return [master_params]
def model_grads_to_master_grads(model_params, master_params):
"""
Copy the gradients from the model parameters into the master parameters
from make_master_params().
"""
master_params[0].grad = _flatten_dense_tensors(
[param.grad.data.detach().float() for param in model_params]
)
def master_params_to_model_params(model_params, master_params):
"""
Copy the master parameter data back into the model parameters.
"""
# Without copying to a list, if a generator is passed, this will
# silently not copy any parameters.
model_params = list(model_params)
for param, master_param in zip(
model_params, unflatten_master_params(model_params, master_params)
):
param.detach().copy_(master_param)
def unflatten_master_params(model_params, master_params):
"""
Unflatten the master parameters to look like model_params.
"""
return _unflatten_dense_tensors(master_params[0].detach(), model_params)
def zero_grad(model_params):
for param in model_params:
# Taken from https://pytorch.org/docs/stable/_modules/torch/optim/optimizer.html#Optimizer.add_param_group
if param.grad is not None:
param.grad.detach_()
param.grad.zero_()
| 2,282 | 28.649351 | 114 | py |
flexible-video-diffusion-modeling | flexible-video-diffusion-modeling-main/improved_diffusion/unet.py | from abc import abstractmethod
import math
import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as F
from .fp16_util import convert_module_to_f16, convert_module_to_f32
from .nn import (
SiLU,
conv_nd,
linear,
avg_pool_nd,
zero_module,
normalization,
timestep_embedding,
checkpoint,
)
from .rpe import RPEAttention
class TimestepBlock(nn.Module):
"""
Any module where forward() takes timestep embeddings as a second argument.
"""
@abstractmethod
def forward(self, x, emb):
"""
Apply the module to `x` given `emb` timestep embeddings.
"""
class TimestepEmbedAttnThingsSequential(nn.Sequential, TimestepBlock):
"""
A sequential module that passes extra things to the children that
support it as an extra input.
"""
def forward(self, x, emb, attn_mask, T=1, frame_indices=None, attn_weights_list=None):
for layer in self:
if isinstance(layer, TimestepBlock):
kwargs = dict(emb=emb)
kwargs['emb'] = emb
elif isinstance(layer, FactorizedAttentionBlock):
kwargs = dict(
temb=emb,
attn_mask=attn_mask,
T=T,
frame_indices=frame_indices,
attn_weights_list=attn_weights_list,
)
else:
kwargs = {}
x = layer(x, **kwargs)
return x
class Upsample(nn.Module):
"""
An upsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
upsampling occurs in the inner-two dimensions.
"""
def __init__(self, channels, use_conv, dims=2):
super().__init__()
self.channels = channels
self.use_conv = use_conv
self.dims = dims
if use_conv:
self.conv = conv_nd(dims, channels, channels, 3, padding=1)
def forward(self, x):
assert x.shape[1] == self.channels
if self.dims == 3:
x = F.interpolate(
x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest"
)
else:
x = F.interpolate(x, scale_factor=2, mode="nearest")
if self.use_conv:
x = self.conv(x)
return x
class Downsample(nn.Module):
"""
A downsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
downsampling occurs in the inner-two dimensions.
"""
def __init__(self, channels, use_conv, dims=2):
super().__init__()
self.channels = channels
self.use_conv = use_conv
self.dims = dims
stride = 2 if dims != 3 else (1, 2, 2)
if use_conv:
self.op = conv_nd(dims, channels, channels, 3, stride=stride, padding=1)
else:
self.op = avg_pool_nd(stride)
def forward(self, x):
assert x.shape[1] == self.channels
return self.op(x)
class ResBlock(TimestepBlock):
"""
A residual block that can optionally change the number of channels.
:param channels: the number of input channels.
:param emb_channels: the number of timestep embedding channels.
:param dropout: the rate of dropout.
:param out_channels: if specified, the number of out channels.
:param use_conv: if True and out_channels is specified, use a spatial
convolution instead of a smaller 1x1 convolution to change the
channels in the skip connection.
:param dims: determines if the signal is 1D, 2D, or 3D.
:param use_checkpoint: if True, use gradient checkpointing on this module.
"""
def __init__(
self,
channels,
emb_channels,
dropout,
out_channels=None,
use_conv=False,
use_scale_shift_norm=False,
dims=2,
use_checkpoint=False,
):
super().__init__()
self.channels = channels
self.emb_channels = emb_channels
self.dropout = dropout
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.use_checkpoint = use_checkpoint
self.use_scale_shift_norm = use_scale_shift_norm
self.in_layers = nn.Sequential(
normalization(channels),
SiLU(),
conv_nd(dims, channels, self.out_channels, 3, padding=1),
)
self.emb_layers = nn.Sequential(
SiLU(),
linear(
emb_channels,
2 * self.out_channels if use_scale_shift_norm else self.out_channels,
),
)
self.out_layers = nn.Sequential(
normalization(self.out_channels),
SiLU(),
nn.Dropout(p=dropout),
zero_module(
conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1)
),
)
if self.out_channels == channels:
self.skip_connection = nn.Identity()
elif use_conv:
self.skip_connection = conv_nd(
dims, channels, self.out_channels, 3, padding=1
)
else:
self.skip_connection = conv_nd(dims, channels, self.out_channels, 1)
def forward(self, x, emb):
"""
Apply the block to a Tensor, conditioned on a timestep embedding.
:param x: an [N x C x ...] Tensor of features.
:param emb: an [N x emb_channels] Tensor of timestep embeddings.
:return: an [N x C x ...] Tensor of outputs.
"""
return checkpoint(
self._forward, (x, emb), self.parameters(), self.use_checkpoint
)
def _forward(self, x, emb):
h = self.in_layers(x)
emb_out = self.emb_layers(emb).type(h.dtype)
while len(emb_out.shape) < len(h.shape):
emb_out = emb_out[..., None]
if self.use_scale_shift_norm:
out_norm, out_rest = self.out_layers[0], self.out_layers[1:]
scale, shift = th.chunk(emb_out, 2, dim=1)
h = out_norm(h) * (1 + scale) + shift
h = out_rest(h)
else:
h = h + emb_out
h = self.out_layers(h)
return self.skip_connection(x) + h
class FactorizedAttentionBlock(nn.Module):
def __init__(self, channels, num_heads, use_rpe_net, time_embed_dim=None, use_checkpoint=False):
super().__init__()
self.spatial_attention = RPEAttention(
channels=channels, num_heads=num_heads, use_checkpoint=use_checkpoint,
use_rpe_q=False, use_rpe_k=False, use_rpe_v=False,
)
self.temporal_attention = RPEAttention(
channels=channels, num_heads=num_heads, use_checkpoint=use_checkpoint,
time_embed_dim=time_embed_dim, use_rpe_net=use_rpe_net,
)
def forward(self, x, attn_mask, temb, T, frame_indices=None, attn_weights_list=None):
BT, C, H, W = x.shape
B = BT//T
# reshape to have T in the last dimension becuase that's what we attend over
x = x.view(B, T, C, H, W).permute(0, 3, 4, 2, 1) # B, H, W, C, T
x = x.reshape(B, H*W, C, T)
x = self.temporal_attention(x,
temb,
frame_indices,
attn_mask=attn_mask.flatten(start_dim=2).squeeze(dim=2), # B x T
attn_weights_list=None if attn_weights_list is None else attn_weights_list['temporal'],)
# Now we attend over the spatial dimensions by reshaping the input
x = x.view(B, H, W, C, T).permute(0, 4, 3, 1, 2) # B, T, C, H, W
x = x.reshape(B, T, C, H*W)
x = self.spatial_attention(x,
temb,
frame_indices=None,
attn_weights_list=None if attn_weights_list is None else attn_weights_list['spatial'])
x = x.reshape(BT, C, H, W)
return x
class UNetVideoModel(nn.Module):
"""
The full UNet model with attention and timestep embedding.
:param in_channels: channels in the input Tensor.
:param model_channels: base channel count for the model.
:param out_channels: channels in the output Tensor.
:param num_res_blocks: number of residual blocks per downsample.
:param attention_resolutions: a collection of downsample rates at which
attention will take place. May be a set, list, or tuple.
For example, if this contains 4, then at 4x downsampling, attention
will be used.
:param dropout: the dropout probability.
:param channel_mult: channel multiplier for each level of the UNet.
:param conv_resample: if True, use learned convolutions for upsampling and
downsampling.
:param dims: determines if the signal is 1D, 2D, or 3D.
:param use_checkpoint: use gradient checkpointing to reduce memory usage.
:param num_heads: the number of attention heads in each attention layer.
"""
def __init__(
self,
in_channels,
model_channels,
out_channels,
num_res_blocks,
attention_resolutions,
image_size=None,
dropout=0,
channel_mult=(1, 2, 4, 8),
conv_resample=True,
dims=2,
use_checkpoint=False,
num_heads=1,
num_heads_upsample=-1,
use_scale_shift_norm=False,
use_rpe_net=False,
):
super().__init__()
if num_heads_upsample == -1:
num_heads_upsample = num_heads
self.in_channels = in_channels + 1
self.model_channels = model_channels
self.out_channels = out_channels
self.num_res_blocks = num_res_blocks
self.attention_resolutions = attention_resolutions
self.dropout = dropout
self.channel_mult = channel_mult
self.conv_resample = conv_resample
self.use_checkpoint = use_checkpoint
self.num_heads = num_heads
self.num_heads_upsample = num_heads_upsample
self.use_rpe_net = use_rpe_net
time_embed_dim = model_channels * 4
self.time_embed = nn.Sequential(
linear(model_channels, time_embed_dim),
SiLU(),
linear(time_embed_dim, time_embed_dim),
)
self.input_blocks = nn.ModuleList(
[
TimestepEmbedAttnThingsSequential(
conv_nd(dims, self.in_channels, model_channels, 3, padding=1)
)
]
)
input_block_chans = [model_channels]
ch = model_channels
ds = 1
for level, mult in enumerate(channel_mult):
for _ in range(num_res_blocks):
layers = [
ResBlock(
ch,
time_embed_dim,
dropout,
out_channels=mult * model_channels,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
)
]
ch = mult * model_channels
if ds in attention_resolutions:
layers.append(
FactorizedAttentionBlock(
ch, use_checkpoint=use_checkpoint, num_heads=num_heads, use_rpe_net=use_rpe_net, time_embed_dim=time_embed_dim,
)
)
self.input_blocks.append(TimestepEmbedAttnThingsSequential(*layers))
input_block_chans.append(ch)
if level != len(channel_mult) - 1:
self.input_blocks.append(
TimestepEmbedAttnThingsSequential(Downsample(ch, conv_resample, dims=dims))
)
input_block_chans.append(ch)
ds *= 2
self.middle_block = TimestepEmbedAttnThingsSequential(
ResBlock(
ch,
time_embed_dim,
dropout,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
),
FactorizedAttentionBlock(ch, use_checkpoint=use_checkpoint, num_heads=num_heads, use_rpe_net=use_rpe_net, time_embed_dim=time_embed_dim),
ResBlock(
ch,
time_embed_dim,
dropout,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
),
)
self.output_blocks = nn.ModuleList([])
for level, mult in list(enumerate(channel_mult))[::-1]:
for i in range(num_res_blocks + 1):
layers = [
ResBlock(
ch + input_block_chans.pop(),
time_embed_dim,
dropout,
out_channels=model_channels * mult,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
)
]
ch = model_channels * mult
if ds in attention_resolutions:
layers.append(
FactorizedAttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads_upsample,
use_rpe_net=use_rpe_net,
time_embed_dim=time_embed_dim,
)
)
if level and i == num_res_blocks:
layers.append(Upsample(ch, conv_resample, dims=dims))
ds //= 2
self.output_blocks.append(TimestepEmbedAttnThingsSequential(*layers))
self.out = nn.Sequential(
normalization(ch),
SiLU(),
zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)),
)
def convert_to_fp16(self):
"""
Convert the torso of the model to float16.
"""
self.input_blocks.apply(convert_module_to_f16)
self.middle_block.apply(convert_module_to_f16)
self.output_blocks.apply(convert_module_to_f16)
def convert_to_fp32(self):
"""
Convert the torso of the model to float32.
"""
self.input_blocks.apply(convert_module_to_f32)
self.middle_block.apply(convert_module_to_f32)
self.output_blocks.apply(convert_module_to_f32)
@property
def inner_dtype(self):
"""
Get the dtype used by the torso of the model.
"""
return next(self.input_blocks.parameters()).dtype
def forward(self, x, *, x0, timesteps, frame_indices=None,
obs_mask=None, latent_mask=None, return_attn_weights=False
):
"""
Apply the model to an input batch.
:param x: an [N x C x ...] Tensor of inputs.
:param timesteps: a 1-D batch of timesteps.
:param y: an [N] Tensor of labels, if class-conditional.
:return: an [N x C x ...] Tensor of outputs.
"""
B, T, C, H, W = x.shape
timesteps = timesteps.view(B, 1).expand(B, T)
attn_mask = (obs_mask + latent_mask).clip(max=1)
# add channel to indicate obs
indicator_template = th.ones_like(x[:, :, :1, :, :])
obs_indicator = indicator_template * obs_mask
x = th.cat([x*(1-obs_mask) + x0*obs_mask,
obs_indicator],
dim=2,
)
x = x.reshape(B*T, self.in_channels, H, W)
timesteps = timesteps.reshape(B*T)
hs = []
emb = self.time_embed(timestep_embedding(timesteps, self.model_channels))
h = x.type(self.inner_dtype)
attns = {'spatial': [], 'temporal': [], 'mixed': []} if return_attn_weights else None
for layer, module in enumerate(self.input_blocks):
h = module(h, emb, attn_mask, T=T, attn_weights_list=attns, frame_indices=frame_indices)
hs.append(h)
h = self.middle_block(h, emb, attn_mask, T=T, attn_weights_list=attns, frame_indices=frame_indices)
for module in self.output_blocks:
cat_in = th.cat([h, hs.pop()], dim=1)
h = module(cat_in, emb, attn_mask, T=T, attn_weights_list=attns, frame_indices=frame_indices)
h = h.type(x.dtype)
out = self.out(h)
return out.view(B, T, self.out_channels, H, W), attns
def get_feature_vectors(self, x, timesteps, y=None):
"""
Apply the model and return all of the intermediate tensors.
:param x: an [N x C x ...] Tensor of inputs.
:param timesteps: a 1-D batch of timesteps.
:param y: an [N] Tensor of labels, if class-conditional.
:return: a dict with the following keys:
- 'down': a list of hidden state tensors from downsampling.
- 'middle': the tensor of the output of the lowest-resolution
block in the model.
- 'up': a list of hidden state tensors from upsampling.
"""
hs = []
emb = self.time_embed(timestep_embedding(timesteps, self.model_channels))
result = dict(down=[], up=[])
h = x.type(self.inner_dtype)
for module in self.input_blocks:
h = module(h, emb)
hs.append(h)
result["down"].append(h.type(x.dtype))
h = self.middle_block(h, emb)
result["middle"] = h.type(x.dtype)
for module in self.output_blocks:
cat_in = th.cat([h, hs.pop()], dim=1)
h = module(cat_in, emb)
result["up"].append(h.type(x.dtype))
return result | 18,242 | 36.004057 | 149 | py |
flexible-video-diffusion-modeling | flexible-video-diffusion-modeling-main/improved_diffusion/gaussian_diffusion.py | """
This code started out as a PyTorch port of Ho et al's diffusion models:
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/diffusion_utils_2.py
Docstrings have been added, as well as DDIM sampling and a new collection of beta schedules.
"""
import enum
import math
import numpy as np
import torch as th
from .nn import mean_flat
from .losses import normal_kl, discretized_gaussian_log_likelihood
def get_named_beta_schedule(schedule_name, num_diffusion_timesteps):
"""
Get a pre-defined beta schedule for the given name.
The beta schedule library consists of beta schedules which remain similar
in the limit of num_diffusion_timesteps.
Beta schedules may be added, but should not be removed or changed once
they are committed to maintain backwards compatibility.
"""
if schedule_name == "linear":
# Linear schedule from Ho et al, extended to work for any number of
# diffusion steps.
scale = 1000 / num_diffusion_timesteps
beta_start = scale * 0.0001
beta_end = scale * 0.02
return np.linspace(
beta_start, beta_end, num_diffusion_timesteps, dtype=np.float64
)
elif schedule_name == "cosine":
return betas_for_alpha_bar(
num_diffusion_timesteps,
lambda t: math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2,
)
else:
raise NotImplementedError(f"unknown beta schedule: {schedule_name}")
def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999):
"""
Create a beta schedule that discretizes the given alpha_t_bar function,
which defines the cumulative product of (1-beta) over time from t = [0,1].
:param num_diffusion_timesteps: the number of betas to produce.
:param alpha_bar: a lambda that takes an argument t from 0 to 1 and
produces the cumulative product of (1-beta) up to that
part of the diffusion process.
:param max_beta: the maximum beta to use; use values lower than 1 to
prevent singularities.
"""
betas = []
for i in range(num_diffusion_timesteps):
t1 = i / num_diffusion_timesteps
t2 = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
return np.array(betas)
class ModelMeanType(enum.Enum):
"""
Which type of output the model predicts.
"""
PREVIOUS_X = enum.auto() # the model predicts x_{t-1}
START_X = enum.auto() # the model predicts x_0
EPSILON = enum.auto() # the model predicts epsilon
class ModelVarType(enum.Enum):
"""
What is used as the model's output variance.
The LEARNED_RANGE option has been added to allow the model to predict
values between FIXED_SMALL and FIXED_LARGE, making its job easier.
"""
LEARNED = enum.auto()
FIXED_SMALL = enum.auto()
FIXED_LARGE = enum.auto()
LEARNED_RANGE = enum.auto()
class LossType(enum.Enum):
MSE = enum.auto() # use raw MSE loss (and KL when learning variances)
RESCALED_MSE = (
enum.auto()
) # use raw MSE loss (with RESCALED_KL when learning variances)
KL = enum.auto() # use the variational lower-bound
RESCALED_KL = enum.auto() # like KL, but rescale to estimate the full VLB
def is_vb(self):
return self == LossType.KL or self == LossType.RESCALED_KL
class GaussianDiffusion:
"""
Utilities for training and sampling diffusion models.
Ported directly from here, and then adapted over time to further experimentation.
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/diffusion_utils_2.py#L42
:param betas: a 1-D numpy array of betas for each diffusion timestep,
starting at T and going to 1.
:param model_mean_type: a ModelMeanType determining what the model outputs.
:param model_var_type: a ModelVarType determining how variance is output.
:param loss_type: a LossType determining the loss function to use.
:param rescale_timesteps: if True, pass floating point timesteps into the
model so that they are always scaled like in the
original paper (0 to 1000).
"""
def __init__(
self,
*,
betas,
model_mean_type,
model_var_type,
loss_type,
rescale_timesteps=False,
):
self.model_mean_type = model_mean_type
self.model_var_type = model_var_type
self.loss_type = loss_type
self.rescale_timesteps = rescale_timesteps
# Use float64 for accuracy.
betas = np.array(betas, dtype=np.float64)
self.betas = betas
assert len(betas.shape) == 1, "betas must be 1-D"
assert (betas > 0).all() and (betas <= 1).all()
self.num_timesteps = int(betas.shape[0])
alphas = 1.0 - betas
self.alphas_cumprod = np.cumprod(alphas, axis=0)
self.alphas_cumprod_prev = np.append(1.0, self.alphas_cumprod[:-1])
self.alphas_cumprod_next = np.append(self.alphas_cumprod[1:], 0.0)
assert self.alphas_cumprod_prev.shape == (self.num_timesteps,)
# calculations for diffusion q(x_t | x_{t-1}) and others
self.sqrt_alphas_cumprod = np.sqrt(self.alphas_cumprod)
self.sqrt_one_minus_alphas_cumprod = np.sqrt(1.0 - self.alphas_cumprod)
self.log_one_minus_alphas_cumprod = np.log(1.0 - self.alphas_cumprod)
self.sqrt_recip_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod)
self.sqrt_recipm1_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod - 1)
# calculations for posterior q(x_{t-1} | x_t, x_0)
self.posterior_variance = (
betas * (1.0 - self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod)
)
# log calculation clipped because the posterior variance is 0 at the
# beginning of the diffusion chain.
self.posterior_log_variance_clipped = np.log(
np.append(self.posterior_variance[1], self.posterior_variance[1:])
)
self.posterior_mean_coef1 = (
betas * np.sqrt(self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod)
)
self.posterior_mean_coef2 = (
(1.0 - self.alphas_cumprod_prev)
* np.sqrt(alphas)
/ (1.0 - self.alphas_cumprod)
)
def q_mean_variance(self, x_start, t):
"""
Get the distribution q(x_t | x_0).
:param x_start: the [N x C x ...] tensor of noiseless inputs.
:param t: the number of diffusion steps (minus 1). Here, 0 means one step.
:return: A tuple (mean, variance, log_variance), all of x_start's shape.
"""
mean = (
_extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
)
variance = _extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape)
log_variance = _extract_into_tensor(
self.log_one_minus_alphas_cumprod, t, x_start.shape
)
return mean, variance, log_variance
def q_sample(self, x_start, t, noise=None):
"""
Diffuse the data for a given number of diffusion steps.
In other words, sample from q(x_t | x_0).
:param x_start: the initial data batch.
:param t: the number of diffusion steps (minus 1). Here, 0 means one step.
:param noise: if specified, the split-out normal noise.
:return: A noisy version of x_start.
"""
if noise is None:
noise = th.randn_like(x_start)
assert noise.shape == x_start.shape
return (
_extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
+ _extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape)
* noise
)
def q_posterior_mean_variance(self, x_start, x_t, t):
"""
Compute the mean and variance of the diffusion posterior:
q(x_{t-1} | x_t, x_0)
"""
assert x_start.shape == x_t.shape
posterior_mean = (
_extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start
+ _extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t
)
posterior_variance = _extract_into_tensor(self.posterior_variance, t, x_t.shape)
posterior_log_variance_clipped = _extract_into_tensor(
self.posterior_log_variance_clipped, t, x_t.shape
)
assert (
posterior_mean.shape[0]
== posterior_variance.shape[0]
== posterior_log_variance_clipped.shape[0]
== x_start.shape[0]
)
return posterior_mean, posterior_variance, posterior_log_variance_clipped
def p_mean_variance(
self, model, x, t, clip_denoised=True, denoised_fn=None, model_kwargs=None, return_attn_weights=False,
):
"""
Apply the model to get p(x_{t-1} | x_t), as well as a prediction of
the initial x, x_0.
:param model: the model, which takes a signal and a batch of timesteps
as input.
:param x: the [N x C x ...] tensor at time t.
:param t: a 1-D Tensor of timesteps.
:param clip_denoised: if True, clip the denoised signal into [-1, 1].
:param denoised_fn: if not None, a function which applies to the
x_start prediction before it is used to sample. Applies before
clip_denoised.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:return: a dict with the following keys:
- 'mean': the model mean output.
- 'variance': the model variance output.
- 'log_variance': the log of 'variance'.
- 'pred_xstart': the prediction for x_0.
"""
if model_kwargs is None:
model_kwargs = {}
B, C = x.shape[:2]
assert t.shape == (B,)
model_output, attn_weights = model(x, self._scale_timesteps(t), return_attn_weights=return_attn_weights, **model_kwargs)
if self.model_var_type in [ModelVarType.LEARNED, ModelVarType.LEARNED_RANGE]:
assert model_output.shape == (B, C * 2, *x.shape[2:])
model_output, model_var_values = th.split(model_output, C, dim=1)
if self.model_var_type == ModelVarType.LEARNED:
model_log_variance = model_var_values
model_variance = th.exp(model_log_variance)
else:
min_log = _extract_into_tensor(
self.posterior_log_variance_clipped, t, x.shape
)
max_log = _extract_into_tensor(np.log(self.betas), t, x.shape)
# The model_var_values is [-1, 1] for [min_var, max_var].
frac = (model_var_values + 1) / 2
model_log_variance = frac * max_log + (1 - frac) * min_log
model_variance = th.exp(model_log_variance)
else:
model_variance, model_log_variance = {
# for fixedlarge, we set the initial (log-)variance like so
# to get a better decoder log likelihood.
ModelVarType.FIXED_LARGE: (
np.append(self.posterior_variance[1], self.betas[1:]),
np.log(np.append(self.posterior_variance[1], self.betas[1:])),
),
ModelVarType.FIXED_SMALL: (
self.posterior_variance,
self.posterior_log_variance_clipped,
),
}[self.model_var_type]
model_variance = _extract_into_tensor(model_variance, t, x.shape)
model_log_variance = _extract_into_tensor(model_log_variance, t, x.shape)
def process_xstart(x):
if denoised_fn is not None:
x = denoised_fn(x)
if clip_denoised:
return x.clamp(-1, 1)
return x
if self.model_mean_type == ModelMeanType.PREVIOUS_X:
pred_xstart = process_xstart(
self._predict_xstart_from_xprev(x_t=x, t=t, xprev=model_output)
)
model_mean = model_output
elif self.model_mean_type in [ModelMeanType.START_X, ModelMeanType.EPSILON]:
if self.model_mean_type == ModelMeanType.START_X:
pred_xstart = process_xstart(model_output)
else:
pred_xstart = process_xstart(
self._predict_xstart_from_eps(x_t=x, t=t, eps=model_output)
)
model_mean, _, _ = self.q_posterior_mean_variance(
x_start=pred_xstart, x_t=x, t=t
)
else:
raise NotImplementedError(self.model_mean_type)
assert (
model_mean.shape == model_log_variance.shape == pred_xstart.shape == x.shape
)
return {
"mean": model_mean,
"variance": model_variance,
"log_variance": model_log_variance,
"pred_xstart": pred_xstart,
"attn": attn_weights,
}
def _predict_xstart_from_eps(self, x_t, t, eps):
assert x_t.shape == eps.shape
return (
_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t
- _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * eps
)
def _predict_xstart_from_xprev(self, x_t, t, xprev):
assert x_t.shape == xprev.shape
return ( # (xprev - coef2*x_t) / coef1
_extract_into_tensor(1.0 / self.posterior_mean_coef1, t, x_t.shape) * xprev
- _extract_into_tensor(
self.posterior_mean_coef2 / self.posterior_mean_coef1, t, x_t.shape
)
* x_t
)
def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
return (
_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t
- pred_xstart
) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
def _scale_timesteps(self, t):
if self.rescale_timesteps:
return t.float() * (1000.0 / self.num_timesteps)
return t
def p_sample(
self, model, x, t, clip_denoised=True, denoised_fn=None, model_kwargs=None, return_attn_weights=False
):
"""
Sample x_{t-1} from the model at the given timestep.
:param model: the model to sample from.
:param x: the current tensor at x_{t-1}.
:param t: the value of t, starting at 0 for the first diffusion step.
:param clip_denoised: if True, clip the x_start prediction to [-1, 1].
:param denoised_fn: if not None, a function which applies to the
x_start prediction before it is used to sample.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:return: a dict containing the following keys:
- 'sample': a random sample from the model.
- 'pred_xstart': a prediction of x_0.
"""
out = self.p_mean_variance(
model,
x,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
model_kwargs=model_kwargs,
return_attn_weights=return_attn_weights,
)
noise = th.randn_like(x)
nonzero_mask = (
(t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))
) # no noise when t == 0
sample = out["mean"] + nonzero_mask * th.exp(0.5 * out["log_variance"]) * noise
return {"sample": sample, "pred_xstart": out["pred_xstart"], "attn": out["attn"]}
def p_sample_loop(
self,
model,
shape,
noise=None,
clip_denoised=True,
denoised_fn=None,
model_kwargs=None,
device=None,
progress=False,
latent_mask=None,
return_attn_weights=False,
):
"""
Generate samples from the model.
:param model: the model module.
:param shape: the shape of the samples, (N, C, H, W).
:param noise: if specified, the noise from the encoder to sample.
Should be of the same shape as `shape`.
:param clip_denoised: if True, clip x_start predictions to [-1, 1].
:param denoised_fn: if not None, a function which applies to the
x_start prediction before it is used to sample.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:param device: if specified, the device to create the samples on.
If not specified, use a model parameter's device.
:param progress: if True, show a tqdm progress bar.
:return: a non-differentiable batch of samples.
"""
final = None
attns = {}
for neg_t, sample in enumerate(self.p_sample_loop_progressive(
model,
shape,
noise=noise,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
model_kwargs=model_kwargs,
device=device,
progress=progress,
latent_mask=latent_mask,
return_attn_weights=return_attn_weights,
)):
if return_attn_weights: # average attn_weights over quartiles of diffusion process
t = self.num_timesteps - neg_t - 1
quartile = (4*t)//self.num_timesteps
for key, attn_t in sample["attn"].items():
if len(attn_t) == 0:
continue
tag = f"attn/q{quartile}-{key}"
if tag not in attns:
attns[tag] = 0
largest_attn_shape = attn_t[0][0].shape # due to U-net structure
for attn_layer in attn_t:
B = shape[0]
attn_layer = attn_layer.view(B, attn_layer.shape[0]//B, *attn_layer.shape[1:]).mean(dim=1)
if 'temporal' in key:
reshaped = attn_layer
else:
reshaped = th.nn.functional.interpolate(
attn_layer.unsqueeze(0), size=largest_attn_shape,
mode='nearest',
).squeeze(0)
reshaped = reshaped / reshaped.mean() * attn_layer.mean() # renormalise
attns[tag] = attns[tag] + reshaped/(self.num_timesteps/4)
final = sample
return final["sample"], attns
def p_sample_loop_progressive(
self,
model,
shape,
noise=None,
clip_denoised=True,
denoised_fn=None,
model_kwargs=None,
device=None,
progress=False,
latent_mask=None,
return_attn_weights=False,
):
"""
Generate samples from the model and yield intermediate samples from
each timestep of diffusion.
Arguments are the same as p_sample_loop().
Returns a generator over dicts, where each dict is the return value of
p_sample().
"""
if device is None:
device = next(model.parameters()).device
assert isinstance(shape, (tuple, list))
if noise is not None:
img = noise
else:
img = th.randn(*shape, device=device)
indices = list(range(self.num_timesteps))[::-1]
if progress:
# Lazy import so that we don't depend on tqdm.
from tqdm.auto import tqdm
indices = tqdm(indices)
for i in indices:
t = th.tensor([i] * shape[0], device=device)
with th.no_grad():
out = self.p_sample(
model,
img,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
model_kwargs=model_kwargs,
return_attn_weights=return_attn_weights,
)
yield out
img = out["sample"]
def ddim_sample( # NOTE latent_mask etc. is not implemented for DDIM
self,
model,
x,
t,
clip_denoised=True,
denoised_fn=None,
model_kwargs=None,
eta=0.0,
):
"""
Sample x_{t-1} from the model using DDIM.
Same usage as p_sample().
"""
out = self.p_mean_variance(
model,
x,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
model_kwargs=model_kwargs,
)
# Usually our model outputs epsilon, but we re-derive it
# in case we used x_start or x_prev prediction.
eps = self._predict_eps_from_xstart(x, t, out["pred_xstart"])
alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
alpha_bar_prev = _extract_into_tensor(self.alphas_cumprod_prev, t, x.shape)
sigma = (
eta
* th.sqrt((1 - alpha_bar_prev) / (1 - alpha_bar))
* th.sqrt(1 - alpha_bar / alpha_bar_prev)
)
# Equation 12.
noise = th.randn_like(x)
mean_pred = (
out["pred_xstart"] * th.sqrt(alpha_bar_prev)
+ th.sqrt(1 - alpha_bar_prev - sigma ** 2) * eps
)
nonzero_mask = (
(t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))
) # no noise when t == 0
sample = mean_pred + nonzero_mask * sigma * noise
return {"sample": sample, "pred_xstart": out["pred_xstart"]}
def ddim_reverse_sample(
self,
model,
x,
t,
clip_denoised=True,
denoised_fn=None,
model_kwargs=None,
eta=0.0,
):
"""
Sample x_{t+1} from the model using DDIM reverse ODE.
"""
assert eta == 0.0, "Reverse ODE only for deterministic path"
out = self.p_mean_variance(
model,
x,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
model_kwargs=model_kwargs,
)
# Usually our model outputs epsilon, but we re-derive it
# in case we used x_start or x_prev prediction.
eps = (
_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x.shape) * x
- out["pred_xstart"]
) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x.shape)
alpha_bar_next = _extract_into_tensor(self.alphas_cumprod_next, t, x.shape)
# Equation 12. reversed
mean_pred = (
out["pred_xstart"] * th.sqrt(alpha_bar_next)
+ th.sqrt(1 - alpha_bar_next) * eps
)
return {"sample": mean_pred, "pred_xstart": out["pred_xstart"]}
def ddim_sample_loop(
self,
model,
shape,
noise=None,
clip_denoised=True,
denoised_fn=None,
model_kwargs=None,
device=None,
progress=False,
eta=0.0,
):
"""
Generate samples from the model using DDIM.
Same usage as p_sample_loop().
"""
final = None
for sample in self.ddim_sample_loop_progressive(
model,
shape,
noise=noise,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
model_kwargs=model_kwargs,
device=device,
progress=progress,
eta=eta,
):
final = sample
return final["sample"]
def ddim_sample_loop_progressive(
self,
model,
shape,
noise=None,
clip_denoised=True,
denoised_fn=None,
model_kwargs=None,
device=None,
progress=False,
eta=0.0,
):
"""
Use DDIM to sample from the model and yield intermediate samples from
each timestep of DDIM.
Same usage as p_sample_loop_progressive().
"""
if device is None:
device = next(model.parameters()).device
assert isinstance(shape, (tuple, list))
if noise is not None:
img = noise
else:
img = th.randn(*shape, device=device)
indices = list(range(self.num_timesteps))[::-1]
if progress:
# Lazy import so that we don't depend on tqdm.
from tqdm.auto import tqdm
indices = tqdm(indices)
for i in indices:
t = th.tensor([i] * shape[0], device=device)
with th.no_grad():
out = self.ddim_sample(
model,
img,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
model_kwargs=model_kwargs,
eta=eta,
)
yield out
img = out["sample"]
def _vb_terms_bpd(
self, model, x_start, x_t, t, clip_denoised=True, model_kwargs=None, latent_mask=None,
):
"""
Get a term for the variational lower-bound.
The resulting units are bits (rather than nats, as one might expect).
This allows for comparison to other papers.
:return: a dict with the following keys:
- 'output': a shape [N] tensor of NLLs or KLs.
- 'pred_xstart': the x_0 predictions.
"""
true_mean, _, true_log_variance_clipped = self.q_posterior_mean_variance(
x_start=x_start, x_t=x_t, t=t
)
out = self.p_mean_variance(
model, x_t, t, clip_denoised=clip_denoised, model_kwargs=model_kwargs
)
kl = normal_kl(
true_mean, true_log_variance_clipped, out["mean"], out["log_variance"]
)
kl = mean_flat(kl, mask=latent_mask) / np.log(2.0)
decoder_nll = -discretized_gaussian_log_likelihood(
x_start, means=out["mean"], log_scales=0.5 * out["log_variance"]
)
assert decoder_nll.shape == x_start.shape
decoder_nll = mean_flat(decoder_nll, mask=latent_mask) / np.log(2.0)
# At the first timestep return the decoder NLL,
# otherwise return KL(q(x_{t-1}|x_t,x_0) || p(x_{t-1}|x_t))
output = th.where((t == 0), decoder_nll, kl)
return {"output": output, "pred_xstart": out["pred_xstart"]}
def training_losses(self, model, x_start, t, model_kwargs=None, noise=None, latent_mask=None, eval_mask=None):
"""
Compute training losses for a single timestep.
:param model: the model to evaluate loss on.
:param x_start: the [N x C x ...] tensor of inputs.
:param t: a batch of timestep indices.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:param noise: if specified, the specific Gaussian noise to try to remove.
:return: a dict with the key "loss" containing a tensor of shape [N].
Some mean or variance settings may also have other keys.
"""
if model_kwargs is None:
model_kwargs = {}
if noise is None:
noise = th.randn_like(x_start)
x_t = self.q_sample(x_start, t, noise=noise)
terms = {}
if self.loss_type == LossType.KL or self.loss_type == LossType.RESCALED_KL:
terms["loss"] = self._vb_terms_bpd(
model=model,
x_start=x_start,
x_t=x_t,
t=t,
clip_denoised=False,
model_kwargs=model_kwargs,
)["output"]
if self.loss_type == LossType.RESCALED_KL:
terms["loss"] *= self.num_timesteps
elif self.loss_type == LossType.MSE or self.loss_type == LossType.RESCALED_MSE:
model_output, _ = model(x_t, timesteps=self._scale_timesteps(t), **model_kwargs)
if self.model_var_type in [
ModelVarType.LEARNED,
ModelVarType.LEARNED_RANGE,
]:
B, C = x_t.shape[:2]
assert model_output.shape == (B, C * 2, *x_t.shape[2:])
model_output, model_var_values = th.split(model_output, C, dim=1)
# Learn the variance using the variational bound, but don't let
# it affect our mean prediction.
frozen_out = th.cat([model_output.detach(), model_var_values], dim=1)
terms["vb"] = self._vb_terms_bpd(
model=lambda *args, r=frozen_out: r,
x_start=x_start,
x_t=x_t,
t=t,
clip_denoised=False,
)["output"]
if self.loss_type == LossType.RESCALED_MSE:
# Divide by 1000 for equivalence with initial implementation.
# Without a factor of 1/1000, the VB term hurts the MSE term.
terms["vb"] *= self.num_timesteps / 1000.0
target = {
ModelMeanType.PREVIOUS_X: self.q_posterior_mean_variance(
x_start=x_start, x_t=x_t, t=t
)[0],
ModelMeanType.START_X: x_start,
ModelMeanType.EPSILON: noise,
}[self.model_mean_type]
assert model_output.shape == target.shape == x_start.shape
terms["mse"] = mean_flat((target - model_output) ** 2, mask=latent_mask)
terms["eval-mse"] = mean_flat((target - model_output) ** 2, mask=eval_mask)
if "vb" in terms:
terms["loss"] = terms["mse"] + terms["vb"]
else:
terms["loss"] = terms["mse"]
else:
raise NotImplementedError(self.loss_type)
return terms
def _prior_bpd(self, x_start, latent_mask=None):
"""
Get the prior KL term for the variational lower-bound, measured in
bits-per-dim.
This term can't be optimized, as it only depends on the encoder.
:param x_start: the [N x C x ...] tensor of inputs.
:return: a batch of [N] KL values (in bits), one per batch element.
"""
batch_size = x_start.shape[0]
t = th.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)
qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)
kl_prior = normal_kl(
mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0
)
return mean_flat(kl_prior, mask=latent_mask) / np.log(2.0)
def calc_bpd_loop_subsampled(self, model, x_start, clip_denoised=True, model_kwargs=None, latent_mask=None, t_seq=None):
"""
Compute the entire variational lower-bound, measured in bits-per-dim,
as well as other related quantities.
:param model: the model to evaluate loss on.
:param x_start: the [N x C x ...] tensor of inputs.
:param clip_denoised: if True, clip denoised samples.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:return: a dict containing the following keys:
- total_bpd: the total variational lower-bound, per batch element.
- prior_bpd: the prior term in the lower-bound.
- vb: an [N x T] tensor of terms in the lower-bound.
- xstart_mse: an [N x T] tensor of x_0 MSEs for each timestep.
- mse: an [N x T] tensor of epsilon MSEs for each timestep.
"""
device = x_start.device
batch_size = x_start.shape[0]
if t_seq is None:
t_seq = list(range(self.num_timesteps))[::-1]
vb = []
xstart_mse = []
mse = []
# A hacky way to handle computing bpd with a random subset of the timesteps,
# where each row of t_seq is timesteps to one batch item.
is_2d_t_seq = False
if isinstance(t_seq, np.ndarray) and t_seq.ndim == 2:
is_2d_t_seq = True
t_seq = t_seq.transpose()
for t in t_seq:
if is_2d_t_seq:
t_batch = th.tensor(t, device=device)
else:
t_batch = th.tensor([t] * batch_size, device=device)
noise = th.randn_like(x_start)
x_t = self.q_sample(x_start=x_start, t=t_batch, noise=noise)
# Calculate VLB term at the current timestep
with th.no_grad():
out = self._vb_terms_bpd(
model,
x_start=x_start,
x_t=x_t,
t=t_batch,
clip_denoised=clip_denoised,
model_kwargs=model_kwargs,
latent_mask=latent_mask,
)
vb.append(out["output"])
xstart_mse.append(mean_flat((out["pred_xstart"] - x_start) ** 2, mask=latent_mask))
eps = self._predict_eps_from_xstart(x_t, t_batch, out["pred_xstart"])
mse.append(mean_flat((eps - noise) ** 2, mask=latent_mask))
vb = th.stack(vb, dim=1)
xstart_mse = th.stack(xstart_mse, dim=1)
mse = th.stack(mse, dim=1)
prior_bpd = self._prior_bpd(x_start, latent_mask=latent_mask)
total_bpd = vb.sum(dim=1) + prior_bpd
return {
"total_bpd": total_bpd,
"prior_bpd": prior_bpd,
"vb": vb,
"xstart_mse": xstart_mse,
"mse": mse,
}
def calc_bpd_loop(self, model, x_start, clip_denoised=True, model_kwargs=None, latent_mask=None):
return self.calc_bpd_loop_subsampled(
model=model, x_start=x_start, clip_denoised=clip_denoised,
model_kwargs=model_kwargs, latent_mask=latent_mask,
t_seq=list(range(self.num_timesteps))[::-1])
def _extract_into_tensor(arr, timesteps, broadcast_shape):
"""
Extract values from a 1-D numpy array for a batch of indices.
:param arr: the 1-D numpy array.
:param timesteps: a tensor of indices into the array to extract.
:param broadcast_shape: a larger shape of K dimensions with the batch
dimension equal to the length of timesteps.
:return: a tensor of shape [batch_size, 1, ...] where the shape has K dims.
"""
res = th.from_numpy(arr).to(device=timesteps.device)[timesteps].float()
while len(res.shape) < len(broadcast_shape):
res = res[..., None]
return res.expand(broadcast_shape) | 35,019 | 38.304153 | 129 | py |
flexible-video-diffusion-modeling | flexible-video-diffusion-modeling-main/improved_diffusion/train_util.py | import copy
import functools
import os
import wandb
import blobfile as bf
import glob
from pathlib import Path
from time import time
import numpy as np
import torch as th
import torch.distributed as dist
from torch.nn.parallel.distributed import DistributedDataParallel as DDP
from torch.optim import AdamW
from . import dist_util
from .logger import logger
from .fp16_util import (
make_master_params,
master_params_to_model_params,
model_grads_to_master_grads,
unflatten_master_params,
zero_grad,
)
from .nn import update_ema
from .resample import LossAwareSampler, UniformSampler
from .rng_util import rng_decorator, RNG
# For ImageNet experiments, this was a good default value.
# We found that the lg_loss_scale quickly climbed to
# 20-21 within the first ~1K steps of training.
INITIAL_LOG_LOSS_SCALE = 20.0
class TrainLoop:
def __init__(
self,
*,
model,
diffusion,
data,
batch_size,
microbatch,
lr,
ema_rate,
log_interval,
save_interval,
resume_checkpoint,
use_fp16,
fp16_scale_growth,
schedule_sampler,
weight_decay,
lr_anneal_steps,
sample_interval,
pad_with_random_frames,
max_frames,
args,
):
self.args = args
self.model = model
self.diffusion = diffusion
self.data = data
self.batch_size = batch_size
self.microbatch = microbatch if microbatch > 0 else batch_size
self.lr = lr
self.ema_rate = (
[ema_rate]
if isinstance(ema_rate, float)
else [float(x) for x in ema_rate.split(",")]
)
self.log_interval = log_interval
self.save_interval = save_interval
self.resume_checkpoint = resume_checkpoint
self.use_fp16 = use_fp16
self.fp16_scale_growth = fp16_scale_growth
self.schedule_sampler = schedule_sampler or UniformSampler(diffusion)
self.weight_decay = weight_decay
self.lr_anneal_steps = lr_anneal_steps
self.sample_interval = sample_interval
self.pad_with_random_frames = pad_with_random_frames
with RNG(0):
self.vis_batch = next(self.data)[0][:2]
self.max_frames = max_frames
self.step = 0
self.global_batch = self.batch_size * dist.get_world_size()
self.model_params = list(self.model.parameters())
self.master_params = self.model_params
self.lg_loss_scale = INITIAL_LOG_LOSS_SCALE
self.sync_cuda = th.cuda.is_available()
self._load_and_sync_parameters()
if self.use_fp16:
self._setup_fp16()
self.opt = AdamW(self.master_params, lr=self.lr, weight_decay=self.weight_decay)
if self.args.resume_id != '':
self._load_optimizer_state()
# Model was resumed, either due to a restart or a checkpoint
# being specified at the command line.
self.ema_params = [
self._load_ema_parameters(rate) for rate in self.ema_rate
]
else:
self.ema_params = [
copy.deepcopy(self.master_params) for _ in range(len(self.ema_rate))
]
if th.cuda.is_available():
self.use_ddp = True
self.ddp_model = DDP(
self.model,
device_ids=[dist_util.dev()],
output_device=dist_util.dev(),
broadcast_buffers=False,
bucket_cap_mb=128,
find_unused_parameters=False,
)
else:
if dist.get_world_size() > 1:
print(
"Distributed training requires CUDA. "
"Gradients will not be synchronized properly!"
)
self.use_ddp = False
self.ddp_model = self.model
if dist.get_rank() == 0:
logger.logkv("num_parameters", sum(p.numel() for p in model.parameters()), distributed=False)
def _load_and_sync_parameters(self):
resume_checkpoint = find_resume_checkpoint(self.args) or self.resume_checkpoint
if resume_checkpoint:
self.step = parse_resume_step_from_filename(resume_checkpoint)
print(f"loading model from checkpoint: {resume_checkpoint}...")
self.model.load_state_dict(
dist_util.load_state_dict(
resume_checkpoint, map_location=dist_util.dev()
)['state_dict']
)
def _load_ema_parameters(self, rate):
ema_params = copy.deepcopy(self.master_params)
main_checkpoint = find_resume_checkpoint(self.args) or self.resume_checkpoint
ema_checkpoint = find_ema_checkpoint(main_checkpoint, self.step, rate)
if ema_checkpoint:
print(f"loading EMA from checkpoint: {ema_checkpoint}...")
state_dict = dist_util.load_state_dict(
ema_checkpoint, map_location=dist_util.dev()
)['state_dict']
ema_params = self._state_dict_to_master_params(state_dict)
return ema_params
def _load_optimizer_state(self):
main_checkpoint = find_resume_checkpoint(self.args) or self.resume_checkpoint
opt_checkpoint = bf.join(
bf.dirname(main_checkpoint), f"opt{self.step:06}.pt"
)
if bf.exists(opt_checkpoint):
print(f"loading optimizer state from checkpoint: {opt_checkpoint}")
state_dict = dist_util.load_state_dict(
opt_checkpoint, map_location=dist_util.dev()
)
self.opt.load_state_dict(state_dict)
def _setup_fp16(self):
self.master_params = make_master_params(self.model_params)
self.model.convert_to_fp16()
def sample_some_indices(self, max_indices, T):
s = th.randint(low=1, high=max_indices+1, size=())
max_scale = T / (s-0.999)
scale = np.exp(np.random.rand() * np.log(max_scale))
pos = th.rand(()) * (T - scale*(s-1))
indices = [int(pos+i*scale) for i in range(s)]
# do some recursion if we have somehow failed to satisfy the consrtaints
if all(i<T and i>=0 for i in indices):
return indices
else:
print('warning: sampled invalid indices', [int(pos+i*scale) for i in range(s)], 'trying again')
return self.sample_some_indices(max_indices, T)
def sample_all_masks(self, batch1, batch2=None, gather=True, set_masks={'obs': (), 'latent': ()}):
N = self.max_frames
B, T, *_ = batch1.shape
masks = {k: th.zeros_like(batch1[:, :, :1, :1, :1]) for k in ['obs', 'latent']}
for obs_row, latent_row in zip(*[masks[k] for k in ['obs', 'latent']]):
latent_row[self.sample_some_indices(max_indices=N, T=T)] = 1.
while True:
mask = obs_row if th.rand(()) < 0.5 else latent_row
indices = th.tensor(self.sample_some_indices(max_indices=N, T=T))
taken = (obs_row[indices] + latent_row[indices]).view(-1)
indices = indices[taken == 0] # remove indices that are already used in a mask
if len(indices) > N - sum(obs_row) - sum(latent_row):
break
mask[indices] = 1.
if len(set_masks['obs']) > 0: # set_masks allow us to choose informative masks for logging
for k in masks:
set_values = set_masks[k]
n_set = min(len(set_values), len(masks[k]))
masks[k][:n_set] = set_values[:n_set]
any_mask = (masks['obs'] + masks['latent']).clip(max=1)
if not gather:
return batch1, masks['obs'], masks['latent']
batch, (obs_mask, latent_mask), frame_indices =\
self.prepare_training_batch(
any_mask, batch1, batch2, (masks['obs'], masks['latent'])
)
return batch, frame_indices, obs_mask, latent_mask
def prepare_training_batch(self, mask, batch1, batch2, tensors):
"""
Prepare training batch by selecting frames from batch1 according to mask, appending uniformly sampled frames
from batch2, and selecting the corresponding elements from tensors (usually obs_mask and latent_mask).
"""
B, T, *_ = mask.shape
mask = mask.view(B, T) # remove unit C, H, W dims
effective_T = self.max_frames if self.pad_with_random_frames else mask.sum(dim=1).max().int()
indices = th.zeros_like(mask[:, :effective_T], dtype=th.int64)
new_batch = th.zeros_like(batch1[:, :effective_T])
new_tensors = [th.zeros_like(t[:, :effective_T]) for t in tensors]
for b in range(B):
instance_T = mask[b].sum().int()
indices[b, :instance_T] = mask[b].nonzero().flatten()
indices[b, instance_T:] = th.randint_like(indices[b, instance_T:], high=T) if self.pad_with_random_frames else 0
new_batch[b, :instance_T] = batch1[b][mask[b]==1]
new_batch[b, instance_T:] = (batch1 if batch2 is None else batch2)[b][indices[b, instance_T:]]
for new_t, t in zip(new_tensors, tensors):
new_t[b, :instance_T] = t[b][mask[b]==1]
new_t[b, instance_T:] = t[b][indices[b, instance_T:]]
return new_batch, new_tensors, indices
def run_loop(self):
last_sample_time = None
while (
not self.lr_anneal_steps
or self.step < self.lr_anneal_steps
):
self.run_step()
if self.step % self.log_interval == 0:
logger.dumpkvs()
if self.step % self.save_interval == 0:
self.save()
# Run for a finite amount of time in integration tests.
if os.environ.get("DIFFUSION_TRAINING_TEST", "") and self.step > 0:
return
if self.sample_interval is not None and self.step != 0 and (self.step % self.sample_interval == 0 or self.step == 5):
if last_sample_time is not None:
logger.logkv('timing/time_between_samples', time()-last_sample_time)
self.log_samples()
last_sample_time = time()
self.step += 1
# Save the last checkpoint if it wasn't already saved.
if (self.step - 1) % self.save_interval != 0:
self.save()
def run_step(self):
t0 = time()
self.forward_backward()
if self.use_fp16:
self.optimize_fp16()
else:
self.optimize_normal()
self.log_step()
logger.logkv("timing/step_time", time() - t0)
def forward_backward(self):
zero_grad(self.model_params)
batch1 = next(self.data)[0]
batch2 = next(self.data)[0] if self.pad_with_random_frames else None
for i in range(0, batch1.shape[0], self.microbatch):
micro1 = batch1[i : i + self.microbatch]
micro2 = batch2[i:i + self.microbatch] if batch2 is not None else None
micro, frame_indices, obs_mask, latent_mask = self.sample_all_masks(micro1, micro2)
micro = micro.to(dist_util.dev())
frame_indices = frame_indices.to(dist_util.dev())
obs_mask = obs_mask.to(dist_util.dev())
latent_mask = latent_mask.to(dist_util.dev())
last_batch = (i + self.microbatch) >= batch1.shape[0]
t, weights = self.schedule_sampler.sample(micro.shape[0], dist_util.dev())
compute_losses = functools.partial(
self.diffusion.training_losses,
self.ddp_model,
micro,
t,
model_kwargs={'frame_indices': frame_indices, 'obs_mask': obs_mask,
'latent_mask': latent_mask, 'x0': micro},
latent_mask=(1-obs_mask) if self.pad_with_random_frames else latent_mask,
eval_mask=latent_mask,
)
if last_batch or not self.use_ddp:
losses = compute_losses()
else:
with self.ddp_model.no_sync():
losses = compute_losses()
if isinstance(self.schedule_sampler, LossAwareSampler):
self.schedule_sampler.update_with_local_losses(
t, losses["loss"].detach()
)
loss = (losses["loss"] * weights).mean()
log_loss_dict(
self.diffusion, t, {k: v * weights for k, v in losses.items()}
)
if self.use_fp16:
loss_scale = 2 ** self.lg_loss_scale
(loss * loss_scale).backward()
else:
loss.backward()
def optimize_fp16(self):
if any(not th.isfinite(p.grad).all() for p in self.model_params):
self.lg_loss_scale -= 1
print(f"Found NaN, decreased lg_loss_scale to {self.lg_loss_scale}")
return
model_grads_to_master_grads(self.model_params, self.master_params)
self.master_params[0].grad.mul_(1.0 / (2 ** self.lg_loss_scale))
self._log_grad_norm()
self._anneal_lr()
self.opt.step()
for rate, params in zip(self.ema_rate, self.ema_params):
update_ema(params, self.master_params, rate=rate)
master_params_to_model_params(self.model_params, self.master_params)
self.lg_loss_scale += self.fp16_scale_growth
def optimize_normal(self):
self._log_grad_norm()
self._anneal_lr()
self.opt.step()
for rate, params in zip(self.ema_rate, self.ema_params):
update_ema(params, self.master_params, rate=rate)
def _log_grad_norm(self):
sqsum = 0.0
for p in self.master_params:
sqsum += (p.grad ** 2).sum().item()
logger.logkv_mean("grad_norm", np.sqrt(sqsum))
def _anneal_lr(self):
if not self.lr_anneal_steps:
return
frac_done = self.step / self.lr_anneal_steps
lr = self.lr * (1 - frac_done)
for param_group in self.opt.param_groups:
param_group["lr"] = lr
def log_step(self):
logger.logkv("step", self.step)
logger.logkv("samples", (self.step + 1) * self.global_batch)
if self.use_fp16:
logger.logkv("lg_loss_scale", self.lg_loss_scale)
def save(self):
if dist.get_rank() == 0:
Path(get_blob_logdir(self.args)).mkdir(parents=True, exist_ok=True)
def save_checkpoint(rate, params):
if dist.get_rank() == 0:
print(f"saving model {rate}...")
if not rate:
filename = f"model{self.step:06d}.pt"
else:
filename = f"ema_{rate}_{self.step:06d}.pt"
to_save = {
"state_dict": self._master_params_to_state_dict(params),
"config": self.args.__dict__,
"step": self.step
}
with bf.BlobFile(bf.join(get_blob_logdir(self.args), filename), "wb") as f:
th.save(to_save, f)
save_checkpoint(0, self.master_params)
for rate, params in zip(self.ema_rate, self.ema_params):
save_checkpoint(rate, params)
if dist.get_rank() == 0:
with bf.BlobFile(
bf.join(get_blob_logdir(self.args), f"opt{self.step:06d}.pt"),
"wb",
) as f:
th.save(self.opt.state_dict(), f)
dist.barrier()
def _master_params_to_state_dict(self, master_params):
if self.use_fp16:
master_params = unflatten_master_params(
self.model.parameters(), master_params
)
state_dict = self.model.state_dict()
for i, (name, _value) in enumerate(self.model.named_parameters()):
assert name in state_dict
state_dict[name] = master_params[i]
return state_dict
def _state_dict_to_master_params(self, state_dict):
params = [state_dict[name] for name, _ in self.model.named_parameters()]
if self.use_fp16:
return make_master_params(params)
else:
return params
@rng_decorator(seed=0)
def log_samples(self):
if dist.get_rank() == 0:
sample_start = time()
self.model.eval()
orig_state_dict = copy.deepcopy(self.model.state_dict())
self.model.load_state_dict(copy.deepcopy(self._master_params_to_state_dict(self.ema_params[0])))
print("sampling...")
# construct simple masks for our vis batch
obs_mask = th.zeros_like(self.vis_batch[:, :, :1, :1, :1])
latent_mask = obs_mask.clone()
n_obs = self.max_frames // 3
obs_mask[0, :n_obs] = 1.
latent_mask[0, n_obs:self.max_frames] = 1.
if self.batch_size > 1:
spacing = len(self.vis_batch[0]) // self.max_frames
obs_mask[1, :n_obs*spacing:spacing] = 1.
latent_mask[1, n_obs*spacing:self.max_frames*spacing:spacing] = 1.
batch, frame_indices, obs_mask, latent_mask = self.sample_all_masks(
self.vis_batch, None, gather=True,
set_masks={'obs': obs_mask, 'latent': latent_mask}
)
samples, attn = self.diffusion.p_sample_loop(
self.model,
batch.shape,
clip_denoised=True,
model_kwargs={
'frame_indices': frame_indices.to(dist_util.dev()),
'x0': batch.to(dist_util.dev()),
'obs_mask': obs_mask.to(dist_util.dev()),
'latent_mask': latent_mask.to(dist_util.dev())},
latent_mask=latent_mask,
return_attn_weights=True,
)
samples = samples.cpu() * latent_mask + batch * obs_mask
_mark_as_observed(samples[:, :n_obs])
samples = ((samples + 1) * 127.5).clamp(0, 255).to(th.uint8).cpu().numpy()
for i, video in enumerate(samples):
logger.logkv(f'video-{i}', wandb.Video(video), distributed=False)
logger.logkv("timing/sampling_time", time() - sample_start, distributed=False)
# restore model to original state
self.model.train()
self.model.load_state_dict(orig_state_dict)
print("finished sampling")
dist.barrier()
def _mark_as_observed(images, color=[1., -1., -1.]):
for i, c in enumerate(color):
images[..., i, :, 1:2] = c
images[..., i, 1:2, :] = c
images[..., i, :, -2:-1] = c
images[..., i, -2:-1, :] = c
def parse_resume_step_from_filename(filename):
"""
Parse filenames of the form path/to/modelNNNNNN.pt, where NNNNNN is the
checkpoint's number of steps.
"""
split = filename.split("model")
if len(split) < 2:
return 0
split1 = split[-1].split(".")[0]
try:
return int(split1)
except ValueError:
return 0
def get_blob_logdir(args):
root_dir = "checkpoints"
assert os.path.exists(root_dir), "Must create directory 'checkpoints'"
wandb_id = args.resume_id if len(args.resume_id) > 0 else wandb.run.id
return os.path.join(root_dir, wandb_id)
def find_resume_checkpoint(args):
# On your infrastructure, you may want to override this to automatically
# discover the latest checkpoint on your blob storage, etc.
if not args.resume_id:
return
ckpts = glob.glob(os.path.join(get_blob_logdir(args), "model*.pt"))
if len(ckpts) == 0:
return None
iters_fnames = {int(Path(fname).stem.replace('model', '')): fname for fname in ckpts}
return iters_fnames[max(iters_fnames.keys())]
def find_ema_checkpoint(main_checkpoint, step, rate):
if main_checkpoint is None:
return None
filename = f"ema_{rate}_{(step):06d}.pt"
path = bf.join(bf.dirname(main_checkpoint), filename)
if bf.exists(path):
return path
return None
def log_loss_dict(diffusion, ts, losses):
for key, values in losses.items():
logger.logkv_mean(key, values.mean().item())
# Log the quantiles (four quartiles, in particular).
for sub_t, sub_loss in zip(ts.cpu().numpy(), values.detach().cpu().numpy()):
quartile = int(4 * sub_t / diffusion.num_timesteps)
logger.logkv_mean(f"{key}_q{quartile}", sub_loss)
| 20,596 | 38.685934 | 129 | py |
flexible-video-diffusion-modeling | flexible-video-diffusion-modeling-main/improved_diffusion/respace.py | import numpy as np
import torch as th
from .gaussian_diffusion import GaussianDiffusion
def space_timesteps(num_timesteps, section_counts):
"""
Create a list of timesteps to use from an original diffusion process,
given the number of timesteps we want to take from equally-sized portions
of the original process.
For example, if there's 300 timesteps and the section counts are [10,15,20]
then the first 100 timesteps are strided to be 10 timesteps, the second 100
are strided to be 15 timesteps, and the final 100 are strided to be 20.
If the stride is a string starting with "ddim", then the fixed striding
from the DDIM paper is used, and only one section is allowed.
:param num_timesteps: the number of diffusion steps in the original
process to divide up.
:param section_counts: either a list of numbers, or a string containing
comma-separated numbers, indicating the step count
per section. As a special case, use "ddimN" where N
is a number of steps to use the striding from the
DDIM paper.
:return: a set of diffusion steps from the original process to use.
"""
if isinstance(section_counts, str):
if section_counts.startswith("ddim"):
desired_count = int(section_counts[len("ddim") :])
for i in range(1, num_timesteps):
if len(range(0, num_timesteps, i)) == desired_count:
return set(range(0, num_timesteps, i))
raise ValueError(
f"cannot create exactly {num_timesteps} steps with an integer stride"
)
section_counts = [int(x) for x in section_counts.split(",")]
size_per = num_timesteps // len(section_counts)
extra = num_timesteps % len(section_counts)
start_idx = 0
all_steps = []
for i, section_count in enumerate(section_counts):
size = size_per + (1 if i < extra else 0)
if size < section_count:
raise ValueError(
f"cannot divide section of {size} steps into {section_count}"
)
if section_count <= 1:
frac_stride = 1
else:
frac_stride = (size - 1) / (section_count - 1)
cur_idx = 0.0
taken_steps = []
for _ in range(section_count):
taken_steps.append(start_idx + round(cur_idx))
cur_idx += frac_stride
all_steps += taken_steps
start_idx += size
return set(all_steps)
class SpacedDiffusion(GaussianDiffusion):
"""
A diffusion process which can skip steps in a base diffusion process.
:param use_timesteps: a collection (sequence or set) of timesteps from the
original diffusion process to retain.
:param kwargs: the kwargs to create the base diffusion process.
"""
def __init__(self, use_timesteps, **kwargs):
self.use_timesteps = set(use_timesteps)
self.timestep_map = []
self.original_num_steps = len(kwargs["betas"])
base_diffusion = GaussianDiffusion(**kwargs) # pylint: disable=missing-kwoa
last_alpha_cumprod = 1.0
new_betas = []
for i, alpha_cumprod in enumerate(base_diffusion.alphas_cumprod):
if i in self.use_timesteps:
new_betas.append(1 - alpha_cumprod / last_alpha_cumprod)
last_alpha_cumprod = alpha_cumprod
self.timestep_map.append(i)
kwargs["betas"] = np.array(new_betas)
super().__init__(**kwargs)
def p_mean_variance(
self, model, *args, **kwargs
): # pylint: disable=signature-differs
return super().p_mean_variance(self._wrap_model(model), *args, **kwargs)
def training_losses(
self, model, *args, **kwargs
): # pylint: disable=signature-differs
return super().training_losses(self._wrap_model(model), *args, **kwargs)
def _wrap_model(self, model):
if isinstance(model, _WrappedModel):
return model
return _WrappedModel(
model, self.timestep_map, self.rescale_timesteps, self.original_num_steps
)
def _scale_timesteps(self, t):
# Scaling is done by the wrapped model.
return t
class _WrappedModel:
def __init__(self, model, timestep_map, rescale_timesteps, original_num_steps):
self.model = model
self.timestep_map = timestep_map
self.rescale_timesteps = rescale_timesteps
self.original_num_steps = original_num_steps
def __call__(self, x, timesteps, **kwargs):
ts = timesteps
map_tensor = th.tensor(self.timestep_map, device=ts.device, dtype=ts.dtype)
new_ts = map_tensor[ts]
if self.rescale_timesteps:
new_ts = new_ts.float() * (1000.0 / self.original_num_steps)
return self.model(x, timesteps=new_ts, **kwargs) | 4,953 | 38.951613 | 85 | py |
flexible-video-diffusion-modeling | flexible-video-diffusion-modeling-main/improved_diffusion/dist_util.py | """
Helpers for distributed training.
"""
import io
import os
import socket
import blobfile as bf
from mpi4py import MPI
import torch as th
import torch.distributed as dist
# Change this to reflect your cluster layout.
# The GPU for a given rank is (rank % GPUS_PER_NODE).
GPUS_PER_NODE = 8
SETUP_RETRY_COUNT = 3
def setup_dist():
"""
Setup a distributed process group.
"""
if dist.is_initialized():
return
comm = MPI.COMM_WORLD
backend = "gloo" if not th.cuda.is_available() else "nccl"
if backend == "gloo":
hostname = "localhost"
else:
hostname = socket.gethostbyname(socket.getfqdn())
os.environ["MASTER_ADDR"] = comm.bcast(hostname, root=0)
os.environ["RANK"] = str(comm.rank)
os.environ["WORLD_SIZE"] = str(comm.size)
port = comm.bcast(_find_free_port(), root=0)
os.environ["MASTER_PORT"] = str(port)
dist.init_process_group(backend=backend, init_method="env://")
def dev():
"""
Get the device to use for th.distributed.
"""
if th.cuda.is_available():
return th.device(f"cuda:{MPI.COMM_WORLD.Get_rank() % GPUS_PER_NODE}")
return th.device("cpu")
def load_state_dict(path, **kwargs):
"""
Load a PyTorch file without redundant fetches across MPI ranks.
"""
if MPI.COMM_WORLD.Get_rank() == 0:
with bf.BlobFile(path, "rb") as f:
data = f.read()
else:
data = None
data = MPI.COMM_WORLD.bcast(data)
return th.load(io.BytesIO(data), **kwargs)
def sync_params(params):
"""
Synchronize a sequence of Tensors across ranks from rank 0.
"""
for p in params:
with th.no_grad():
dist.broadcast(p, 0)
def _find_free_port():
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("", 0))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return s.getsockname()[1]
finally:
s.close()
| 1,956 | 22.578313 | 77 | py |
flexible-video-diffusion-modeling | flexible-video-diffusion-modeling-main/scripts/video_make_mp4.py | import torch as th
import numpy as np
from argparse import ArgumentParser
from pathlib import Path
import uuid
import argparse
import json
from improved_diffusion.video_datasets import get_test_dataset
from improved_diffusion.test_util import mark_as_observed, tensor2gif, tensor2mp4
from improved_diffusion.script_util import str2bool
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--eval_dir", type=str, required=True)
parser.add_argument("--out_dir", type=str, default=None)
parser.add_argument("--add_gt", type=str2bool, default=True)
parser.add_argument("--do_n", type=int, default=1)
parser.add_argument("--n_seeds", type=int, default=1)
parser.add_argument("--obs_length", type=int, default=0,
help="Number of observed images. If positive, marks the first obs_length frames in output gifs by a red border.")
parser.add_argument("--format", type=str, default="gif", choices=["gif", "mp4", "avi"])
args = parser.parse_args()
if args.add_gt:
model_args_path = Path(args.eval_dir) / "model_config.json"
with open(model_args_path, "r") as f:
model_args = argparse.Namespace(**json.load(f))
dataset = get_test_dataset(model_args.dataset)
out_dir = (Path(args.out_dir) if args.out_dir is not None else Path(args.eval_dir)) / "videos"
out_dir.mkdir(exist_ok=True)
out_path = out_dir / f"{args.do_n}_{args.n_seeds}.{args.format}"
videos = []
for data_idx in range(args.do_n):
if args.add_gt:
gt_drange = [-1, 1]
gt_video, _ = dataset[data_idx]
gt_video = (gt_video.numpy() - gt_drange[0]) / (gt_drange[1] - gt_drange[0]) * 255
gt_video = gt_video.astype(np.uint8)
mark_as_observed(gt_video)
videos.append([gt_video])
else:
videos.append([])
seed = 0
done = 0
while done < args.n_seeds:
filename = Path(args.eval_dir) / "samples" / f"sample_{data_idx:04d}-{seed}.npy"
print(filename)
try:
video = np.load(filename)
mark_as_observed(video[:args.obs_length])
videos[-1].append(video)
done += 1
except PermissionError:
pass
seed += 1
assert seed < 100, f'Not enough seeds for idx {data_idx} (found {done} after trying {seed} seeds)'
videos[-1] = np.concatenate(videos[-1], axis=-2)
video = np.concatenate(videos, axis=-1)
random_str = uuid.uuid4()
if args.format == "gif":
tensor2gif(th.tensor(video), out_path, drange=[0, 255], random_str=random_str)
elif args.format == "mp4":
print(th.tensor(video).shape, th.tensor(video).dtype)
tensor2mp4(th.tensor(video), out_path, drange=[0, 255], random_str=random_str)
else:
raise ValueError(f"Unknown format {args.format}")
print(f"Saved to {out_path}") | 2,986 | 40.486111 | 137 | py |
flexible-video-diffusion-modeling | flexible-video-diffusion-modeling-main/scripts/video_fvd.py | import torch as th
import numpy as np
import argparse
import os
from pathlib import Path
import json
from collections import defaultdict
import tensorflow.compat.v1 as tf
# Metrics
from improved_diffusion.video_datasets import get_test_dataset
import improved_diffusion.frechet_video_distance as fvd
from improved_diffusion import test_util
tf.disable_eager_execution() # Required for our FVD computation code
class SampleDataset(th.utils.data.Dataset):
def __init__(self, samples_path, sample_idx, length):
self.samples_path = Path(samples_path)
self.sample_idx = sample_idx
self.length = length
def __len__(self):
return self.length
def __getitem__(self, idx):
path = self.samples_path / f"sample_{idx:04d}-{self.sample_idx}.npy"
npy = np.load(path).astype(np.float32)
normed = -1 + 2 * npy / 255
return th.tensor(normed).type(th.float32), {}
class FVD:
def __init__(self, batch_size, T, frame_shape):
self.batch_size = batch_size
self.vid = tf.placeholder("uint8", [self.batch_size, T, *frame_shape])
self.vid_feature_vec = fvd.create_id3_embedding(fvd.preprocess(self.vid, (224, 224)), batch_size=self.batch_size)
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
self.sess.run(tf.tables_initializer())
def extract_features(self, vid):
def pad_along_axis(array: np.ndarray, target_length: int, axis: int = 0) -> np.ndarray:
# From here: https://stackoverflow.com/questions/19349410/how-to-pad-with-zeros-a-tensor-along-some-axis-python
pad_size = target_length - array.shape[axis]
if pad_size <= 0:
return array
npad = [(0, 0)] * array.ndim
npad[axis] = (0, pad_size)
return np.pad(array, pad_width=npad, mode='constant', constant_values=0)
# vid is expected to have a shape of BxTxCxHxW
B = vid.shape[0]
vid = np.moveaxis(vid, 2, 4) # B, T, H, W, C
vid = pad_along_axis(vid, target_length=self.batch_size, axis=0)
features = self.sess.run(self.vid_feature_vec, feed_dict={self.vid: vid})
features = features[:B]
return features
@staticmethod
def compute_fvd(vid1_features, vid2_features):
return fvd.fid_features_to_metric(vid1_features, vid2_features)
def compute_fvd(test_dataset, sample_dataset, T, num_videos, batch_size=16):
_, C, H, W = test_dataset[0][0].shape
fvd_handler = FVD(batch_size=batch_size, T=T, frame_shape=[H, W, C])
test_loader = th.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False, drop_last=False)
sample_loader = th.utils.data.DataLoader(sample_dataset, batch_size=batch_size, shuffle=False, drop_last=False)
assert len(test_dataset) == num_videos, f"{len(test_dataset)} != {num_videos}"
assert len(sample_dataset) == num_videos, f"{len(sample_dataset)} != {num_videos}"
with tf.Graph().as_default():
all_test_features = []
all_pred_features = []
for (test_batch, _), (sample_batch, _) in zip(test_loader, sample_loader):
scale = lambda x: ((x.numpy()+1)*255/2).astype(np.uint8) # scale from [-1, 1] to [0, 255]
test_batch = scale(test_batch)
sample_batch = scale(sample_batch)
test_features = fvd_handler.extract_features(test_batch)
sample_features = fvd_handler.extract_features(sample_batch)
all_test_features.append(test_features)
all_pred_features.append(sample_features)
all_test_features = np.concatenate(all_test_features, axis=0)
all_pred_features = np.concatenate(all_pred_features, axis=0)
fvd = fvd_handler.compute_fvd(all_pred_features, all_test_features)
return fvd
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--eval_dir", type=str, required=True)
parser.add_argument("--num_videos", type=int, default=None,
help="Number of generated samples per test video.")
parser.add_argument("--batch_size", type=int, default=None,
help="Batch size for extracting video features the I3D model.")
parser.add_argument("--sample_idx", type=int, default=0)
args = parser.parse_args()
save_path = Path(args.eval_dir) / f"fvd-{args.num_videos}-{args.sample_idx}.txt"
if save_path.exists():
fvd = np.loadtxt(save_path).squeeze()
print(f"FVD already computed: {fvd}")
exit()
# Load model args
model_args_path = Path(args.eval_dir) / "model_config.json"
with open(model_args_path, "r") as f:
model_args = argparse.Namespace(**json.load(f))
# Set batch size given dataset if not specified
if args.batch_size is None:
args.batch_size = {'mazes_cwvae': 16, 'minerl': 8, 'carla_no_traffic': 4}[model_args.dataset]
# Prepare datasets
sample_dataset = SampleDataset(samples_path=(Path(args.eval_dir) / "samples"), sample_idx=args.sample_idx, length=args.num_videos)
test_dataset = th.utils.data.Subset(
dataset=get_test_dataset(dataset_name=model_args.dataset, T=model_args.T),
indices=list(range(args.num_videos)),
)
fvd = compute_fvd(test_dataset, sample_dataset, T=model_args.T, num_videos=args.num_videos, batch_size=args.batch_size)
np.savetxt(save_path, np.array([fvd]))
print(f"FVD: {fvd}") | 5,468 | 44.198347 | 134 | py |
flexible-video-diffusion-modeling | flexible-video-diffusion-modeling-main/scripts/video_train.py | """
Train a diffusion model on images.
"""
import os
import sys
import argparse
import wandb
import torch.distributed as dist
from improved_diffusion import dist_util
from improved_diffusion.video_datasets import load_data, default_T_dict, default_image_size_dict
from improved_diffusion.resample import create_named_schedule_sampler
from improved_diffusion.script_util import (
model_and_diffusion_defaults,
create_model_and_diffusion,
args_to_dict,
add_dict_to_argparser,
)
from improved_diffusion.train_util import TrainLoop
from improved_diffusion.logger import logger
os.environ["MY_WANDB_DIR"] = "none"
if "--unobserve" in sys.argv:
sys.argv.remove("--unobserve")
os.environ["WANDB_MODE"] = "dryrun"
if "WANDB_DIR_DRYRUN" in os.environ:
os.environ["MY_WANDB_DIR"] = os.environ["WANDB_DIR_DRYRUN"]
def init_wandb(config, id):
if dist.get_rank() != 0:
return
wandb_dir = os.environ.get("MY_WANDB_DIR", "none")
if wandb_dir == "none":
wandb_dir = None
wandb.init(entity=os.environ['WANDB_ENTITY'],
project=os.environ['WANDB_PROJECT'],
config=config, dir=wandb_dir, id=id)
print(f"Wandb run id: {wandb.run.id}")
num_nodes = 1
if "SLURM_JOB_NODELIST" in os.environ:
assert "SLURM_JOB_NUM_NODES" in os.environ
num_nodes = int(os.environ['SLURM_JOB_NUM_NODES'])
print(f"Node list: {os.environ['SLURM_JOB_NODELIST']}")
logger.logkv("num_nodes", num_nodes)
print(f"Number of nodes: {num_nodes}")
def num_available_cores():
# Copied from pytorch source code https://pytorch.org/docs/stable/_modules/torch/utils/data/dataloader.html#DataLoader
max_num_worker_suggest = None
if hasattr(os, 'sched_getaffinity'):
try:
max_num_worker_suggest = len(os.sched_getaffinity(0))
except Exception:
pass
if max_num_worker_suggest is None:
cpu_count = os.cpu_count()
if cpu_count is not None:
max_num_worker_suggest = cpu_count
return max_num_worker_suggest or 1
def main():
args = create_argparser().parse_args()
if args.num_workers == -1:
# Set the number of workers automatically.
args.num_workers = max(num_available_cores() - 1, 1)
print(f"num_workers is not specified. It is automatically set to \"number of cores - 1\" = {args.num_workers}")
# Set T and image size
video_length = default_T_dict[args.dataset]
default_T = video_length
default_image_size = default_image_size_dict[args.dataset]
args.T = default_T if args.T == -1 else args.T
args.image_size = default_image_size
dist_util.setup_dist()
resume = bool(args.resume_id)
init_wandb(config=args, id=args.resume_id if resume else None)
print("creating model and diffusion...")
model, diffusion = create_model_and_diffusion(
**args_to_dict(args, model_and_diffusion_defaults().keys())
)
model.to(dist_util.dev())
schedule_sampler = create_named_schedule_sampler(args.schedule_sampler, diffusion)
print("creating data loader...")
data = load_data(
dataset_name=args.dataset,
batch_size=args.batch_size,
T=args.T,
num_workers=args.num_workers,
)
print("training...")
TrainLoop(
model=model,
diffusion=diffusion,
data=data,
batch_size=args.batch_size,
microbatch=args.microbatch,
lr=args.lr,
ema_rate=args.ema_rate,
log_interval=args.log_interval,
save_interval=args.save_interval,
resume_checkpoint=args.resume_checkpoint,
use_fp16=args.use_fp16,
fp16_scale_growth=args.fp16_scale_growth,
schedule_sampler=schedule_sampler,
weight_decay=args.weight_decay,
lr_anneal_steps=args.lr_anneal_steps,
sample_interval=args.sample_interval,
pad_with_random_frames=args.pad_with_random_frames,
max_frames=args.max_frames,
args=args,
).run_loop()
def create_argparser():
defaults = dict(
dataset="",
schedule_sampler="uniform",
lr=1e-4,
weight_decay=0.0,
lr_anneal_steps=0,
batch_size=1,
microbatch=-1, # -1 disables microbatches
ema_rate="0.9999", # comma-separated list of EMA values
log_interval=10,
save_interval=50000,
resume_checkpoint="",
use_fp16=False,
fp16_scale_growth=1e-3,
resume_id='', # set this to a previous run's wandb id to resume training
num_workers=-1,
pad_with_random_frames=True,
max_frames=20,
T=-1,
sample_interval=50000,
)
defaults.update(model_and_diffusion_defaults())
parser = argparse.ArgumentParser()
add_dict_to_argparser(parser, defaults)
return parser
if __name__ == "__main__":
main()
| 4,890 | 31.177632 | 122 | py |
flexible-video-diffusion-modeling | flexible-video-diffusion-modeling-main/scripts/video_to_world_coords.py | import argparse
from pathlib import Path
import random
import numpy as np
import torch as th
from improved_diffusion.carla_regressor import load_classifier_regressor_like_paper, predict_coords, base_data_transform
from improved_diffusion.test_util import Protect, get_model_results_path, get_eval_run_identifier
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--classifier_path", type=str)
parser.add_argument("--regressor_path", type=str)
parser.add_argument("--eval_dir", type=str, default=None)
parser.add_argument("--dataset_dir", type=str, default=None, help="If given, will run on dataset instead of samples in eval_dir.")
parser.add_argument("--batch_size", type=int, default=8, help="Batch size for CARLA regressor.")
parser.add_argument("--device", default="cuda" if th.cuda.is_available() else "cpu")
args = parser.parse_args()
# Load CARLA regressor
classifier, regressor = load_classifier_regressor_like_paper(args.classifier_path, args.regressor_path, args.device)
# Videos to do
doing_dataset = args.dataset_dir is not None
if doing_dataset:
assert '..' not in args.dataset_dir, "Don't use .. in dataset_dir"
dataset_coords_dir = Path('results') / args.dataset_dir / 'coords'
dataset_coords_dir.mkdir(parents=True, exist_ok=True)
paths_to_do = list(Path(args.dataset_dir).glob('video_*.pt'))
else:
# Prepare samples directory
args.eval_dir = Path(args.eval_dir)
(args.eval_dir / 'coords').mkdir(parents=True, exist_ok=True)
print(f"Saving samples to {args.eval_dir / 'coords'}")
paths_to_do = list((args.eval_dir / 'samples').glob('*.npy'))
random.shuffle(paths_to_do) # shuffle them to make parallelism work better
for path in paths_to_do:
if doing_dataset:
coords_path = dataset_coords_dir / (path.stem+'.npy')
else:
coords_path = args.eval_dir / 'coords' / path.name
if coords_path.exists():
continue
print(f"Predicting coords for {path} at {coords_path}.")
if doing_dataset:
raw_video = np.array(th.load(path))
else:
raw_video = np.load(path)
raw_video = raw_video.transpose(0, 2, 3, 1) # put channel dim to end, as expected for np array
video = th.stack([base_data_transform(frame) for frame in raw_video])
coords = predict_coords(video, classifier, regressor, args.batch_size)
np.save(coords_path, coords) | 2,552 | 46.277778 | 134 | py |
flexible-video-diffusion-modeling | flexible-video-diffusion-modeling-main/scripts/video_sample.py | """
Generate a large batch of image samples from a model and save them as a large
numpy array. This can be used to produce samples for FID evaluation.
"""
import argparse
from operator import is_
import os
import json
from pathlib import Path
from PIL import Image
import numpy as np
import torch as th
from improved_diffusion import dist_util
from improved_diffusion.script_util import (
model_and_diffusion_defaults,
create_model_and_diffusion,
args_to_dict,
str2bool,
)
from improved_diffusion.test_util import get_model_results_path, get_eval_run_identifier, Protect
from improved_diffusion.sampling_schemes import sampling_schemes
from improved_diffusion.video_datasets import get_test_dataset
@th.no_grad()
def sample_video(args, model, diffusion, batch, just_get_indices=False):
"""
batch has a shape of BxTxCxHxW where
B: batch size
T: video length
CxWxH: image size
"""
B, T, C, H, W = batch.shape
samples = th.zeros_like(batch)
samples[:, :args.n_obs] = batch[:, :args.n_obs]
# Intilise sampling scheme
optimal_schedule_path = None if args.optimality is None else args.eval_dir / "optimal_schedule.pt"
frame_indices_iterator = iter(sampling_schemes[args.sampling_scheme](
video_length=T, num_obs=args.n_obs,
max_frames=args.max_frames, step_size=args.max_latent_frames,
optimal_schedule_path=optimal_schedule_path,
))
indices_used = []
while True:
frame_indices_iterator.set_videos(samples.to(args.device)) # ignored for non-adaptive sampling schemes
try:
obs_frame_indices, latent_frame_indices = next(frame_indices_iterator)
except StopIteration:
break
print(f"Conditioning on {sorted(obs_frame_indices)} frames, predicting {sorted(latent_frame_indices)}.")
# Prepare network's input
frame_indices = th.cat([th.tensor(obs_frame_indices), th.tensor(latent_frame_indices)], dim=1).long()
x0 = th.stack([samples[i, fi] for i, fi in enumerate(frame_indices)], dim=0).clone()
obs_mask = th.cat([th.ones_like(th.tensor(obs_frame_indices)),
th.zeros_like(th.tensor(latent_frame_indices))], dim=1).view(B, -1, 1, 1, 1).float()
latent_mask = 1 - obs_mask
if just_get_indices:
local_samples = th.stack([batch[i, ind] for i, ind in enumerate(frame_indices)])
else:
# Prepare masks
print(f"{'Frame indices':20}: {frame_indices[0].cpu().numpy()}.")
print(f"{'Observation mask':20}: {obs_mask[0].cpu().int().numpy().squeeze()}")
print(f"{'Latent mask':20}: {latent_mask[0].cpu().int().numpy().squeeze()}")
print("-" * 40)
# Move tensors to the correct device
x0, obs_mask, latent_mask, frame_indices = (t.to(args.device) for t in [x0, obs_mask, latent_mask, frame_indices])
# Run the network
local_samples, _ = diffusion.p_sample_loop(
model, x0.shape, clip_denoised=args.clip_denoised,
model_kwargs=dict(frame_indices=frame_indices,
x0=x0,
obs_mask=obs_mask,
latent_mask=latent_mask),
latent_mask=latent_mask,
return_attn_weights=False)
print('local samples', local_samples.min(), local_samples.max())
# Fill in the generated frames
for i, li in enumerate(latent_frame_indices):
samples[i, li] = local_samples[i, -len(li):].cpu()
indices_used.append((obs_frame_indices, latent_frame_indices))
return samples, indices_used
def main(args, model, diffusion, dataset):
not_done = list(args.indices)
while len(not_done) > 0:
batch_indices = not_done[:args.batch_size]
not_done = not_done[args.batch_size:]
output_filenames = [args.eval_dir / "samples" / f"sample_{i:04d}-{args.sample_idx}.npy" for i in batch_indices]
todo = [not p.exists() for p in output_filenames]
if not any(todo):
print(f"Nothing to do for the batches {min(batch_indices)} - {max(batch_indices)}, sample #{args.sample_idx}.")
continue
batch = th.stack([dataset[i][0] for i in batch_indices])
samples, _ = sample_video(args, model, diffusion, batch)
drange = [-1, 1]
samples = (samples.numpy() - drange[0]) / (drange[1] - drange[0]) * 255
samples = samples.astype(np.uint8)
for i in range(len(batch_indices)):
if todo[i]:
np.save(output_filenames[i], samples[i])
print(f"*** Saved {output_filenames[i]} ***")
def visualise(args, model, diffusion, dataset):
"""
batch has a shape of BxTxCxHxW where
B: batch size
T: video length
CxWxH: image size
"""
is_adaptive = "adaptive" in args.sampling_scheme
bs = args.batch_size if is_adaptive else 1
batch = next(iter(th.utils.data.DataLoader(dataset, batch_size=bs, shuffle=False, drop_last=False)))[0]
_, indices = sample_video(args, model, diffusion, batch, just_get_indices=True)
def visualise_obs_lat_sequence(sequence, index):
""" if index is None, expects sequence to be a list of tuples of form (list, list)
if index is given, expects sequence to be a list of tuples of form (list of lists (from which index i is taken), list of lists (from which index i is taken))
"""
vis = []
exist_indices = list(range(args.n_obs))
for obs_frame_indices, latent_frame_indices in sequence:
obs_frame_indices, latent_frame_indices = obs_frame_indices[index], latent_frame_indices[index]
exist_indices.extend(latent_frame_indices)
new_layer = th.zeros((args.T, 3)).int()
border_colour = th.tensor([0, 0, 0]).int()
not_sampled_colour = th.tensor([255, 255, 255]).int()
exist_colour = th.tensor([50, 50, 50]).int()
obs_colour = th.tensor([50, 50, 255]).int()
latent_colour = th.tensor([255, 69, 0]).int()
new_layer = th.zeros((args.T, 3)).int()
new_layer[:, :] = not_sampled_colour
new_layer[exist_indices, :] = exist_colour
new_layer[obs_frame_indices, :] = obs_colour
new_layer[latent_frame_indices, :] = latent_colour
scale = 4
new_layer = new_layer.repeat_interleave(scale+1, dim=0)
new_layer[::(scale+1)] = border_colour
new_layer = th.cat([new_layer, new_layer[:1]], dim=0)
vis.extend([new_layer.clone() for _ in range(scale+1)])
vis[-1][:] = border_colour
vis = th.stack([vis[-1], *vis])
if not is_adaptive:
assert index == 0
fname = f"vis_{args.sampling_scheme}_sampling-{args.T}-given-{args.n_obs}_{args.max_latent_frames}-{args.max_frames}-chunks"
if args.optimality is not None:
fname += f"_optimal-{args.optimality}"
if is_adaptive:
fname += f"_index-{index}"
else:
assert index == 0
fname += '.png'
dir = Path("visualisations")
dir.mkdir(parents=True, exist_ok=True)
Image.fromarray(vis.numpy().astype(np.uint8)).save(dir / fname)
print(f"Saved to {str(dir / fname)}")
for i in range(len(batch)):
visualise_obs_lat_sequence(indices, i)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("checkpoint_path", type=str)
parser.add_argument("--sampling_scheme", required=True, choices=sampling_schemes.keys())
parser.add_argument("--batch_size", type=int, default=8)
parser.add_argument("--eval_dir", type=str, default=None)
parser.add_argument("--n_obs", type=int, default=36, help="Number of observed frames at the beginning of the video. The rest are sampled.")
parser.add_argument("--T", type=int, default=None, help="Length of the videos. If not specified, it will be inferred from the dataset.")
parser.add_argument("--max_frames", type=int, default=None,
help="Denoted K in the paper. Maximum number of (observed or latent) frames input to the model at once. Defaults to what the model was trained with.")
parser.add_argument("--max_latent_frames", type=int, default=None, help="Number of frames to sample in each stage. Defaults to max_frames/2.")
parser.add_argument("--start_index", type=int, default=0)
parser.add_argument("--stop_index", type=int, default=None)
parser.add_argument("--use_ddim", type=str2bool, default=False)
parser.add_argument("--timestep_respacing", type=str, default="")
parser.add_argument("--clip_denoised", type=str2bool, default=True)
parser.add_argument("--sample_idx", type=int, default=0, help="Sampled images will have this specific index. Used for sampling multiple videos with the same observations.")
parser.add_argument("--just_visualise", action='store_true', help="Make visualisation of sampling scheme instead of doing it.")
parser.add_argument("--optimality", type=str, default=None,
choices=["linspace-t", "random-t", "linspace-t-force-nearby", "random-t-force-nearby"],
help="Type of optimised sampling scheme to use for choosing observed frames. By default uses non-optimized sampling scheme. The optimal indices should be computed before use via video_optimal_schedule.py.")
parser.add_argument("--device", default="cuda" if th.cuda.is_available() else "cpu")
args = parser.parse_args()
# Prepare which indices to sample (for unconditional generation index does nothing except change file name)
if args.stop_index is None:
# assume we're in a slurm batch job, set start and stop_index accordingly
if "SLURM_ARRAY_TASK_ID" in os.environ:
task_id = int(os.environ["SLURM_ARRAY_TASK_ID"])
else:
print("Warning: assuming we're not in a slurm batch job, only sampling first batch.")
task_id = 0
args.start_index = task_id * args.batch_size
args.stop_index = (task_id + 1) * args.batch_size
args.indices = list(range(args.start_index, args.stop_index))
print(f"Sampling for indices {args.start_index} to {args.stop_index}.")
# Load the checkpoint (state dictionary and config)
data = dist_util.load_state_dict(args.checkpoint_path, map_location="cpu")
state_dict = data["state_dict"]
model_args = data["config"]
model_args.update({"use_ddim": args.use_ddim,
"timestep_respacing": args.timestep_respacing})
model_args = argparse.Namespace(**model_args)
model, diffusion = create_model_and_diffusion(
**args_to_dict(model_args, model_and_diffusion_defaults().keys())
)
model.load_state_dict(state_dict)
model = model.to(args.device)
model.eval()
args.image_size = model_args.image_size
if args.max_frames is None:
args.max_frames = model_args.max_frames
if args.max_latent_frames is None:
args.max_latent_frames = args.max_frames // 2
# Load the dataset (to get observations from)
dataset = get_test_dataset(dataset_name=model_args.dataset, T=args.T)
args.T = dataset.T
if args.just_visualise:
visualise(args, model, diffusion, dataset)
exit()
# Prepare samples directory
args.eval_dir = get_model_results_path(args) / get_eval_run_identifier(args)
(args.eval_dir / 'samples').mkdir(parents=True, exist_ok=True)
print(f"Saving samples to {args.eval_dir / 'samples'}")
# Store model configs in a JSON file
json_path = args.eval_dir / "model_config.json"
if not json_path.exists():
with Protect(json_path): # avoids race conditions
with open(json_path, "w") as f:
json.dump(vars(model_args), f, indent=4)
print(f"Saved model config at {json_path}")
main(args, model, diffusion, dataset)
| 12,061 | 48.03252 | 230 | py |
flexible-video-diffusion-modeling | flexible-video-diffusion-modeling-main/datasets/minerl.py | import tensorflow as tf
import tensorflow_datasets as tfds
import minerl_navigate
import os
import numpy as np
from pathlib import Path
if __name__ == "__main__":
data_dir = Path(os.path.dirname(os.path.abspath(__file__)))
orig_dataset = 'minerl_navigate'
torch_dataset_path = data_dir / f'{orig_dataset}-torch'
torch_dataset_path.mkdir(exist_ok=True)
for split in ['train', 'test']:
torch_split_path = torch_dataset_path / split
torch_split_path.mkdir(exist_ok=True)
ds = tfds.load("minerl_navigate", data_dir=str(data_dir), shuffle_files=False)[split]
for cnt, item in enumerate(ds):
video = item["video"].numpy()
np.save(torch_split_path / f"{cnt}.npy", video)
print(f' [-] {cnt} scenes in the {split} dataset')
| 804 | 31.2 | 93 | py |
flexible-video-diffusion-modeling | flexible-video-diffusion-modeling-main/datasets/mazes.py | """gqn_mazes dataset."""
"""This implementation is based on https://github.com/saeidnp/cwvae/blob/master/datasets/gqn_mazes/gqn_mazes.py and
https://github.com/iShohei220/torch-gqn/blob/c0156c72f4e63ca6523ab8d9a6f6b3ce9e0e391d/dataset/convert2torch.py"""
import tensorflow as tf
import tensorflow_datasets as tfds
import numpy as np
from pathlib import Path
_DESCRIPTION = """
# GQN Mazes Dataset
References:
```
@article{saxena2021clockworkvae,
title={Clockwork Variational Autoencoders},
author={Saxena, Vaibhav and Ba, Jimmy and Hafner, Danijar},
journal={arXiv preprint arXiv:2102.09532},
year={2021},
}
```
```
@article {Eslami1204,
title = {Neural scene representation and rendering},
author = {Eslami, S. M. Ali and Jimenez Rezende, Danilo and Besse, Frederic and Viola, Fabio and Morcos, Ari S. and Garnelo, Marta and Ruderman, Avraham and Rusu, Andrei A. and Danihelka, Ivo and Gregor, Karol and Reichert, David P. and Buesing, Lars and Weber, Theophane and Vinyals, Oriol and Rosenbaum, Dan and Rabinowitz, Neil and King, Helen and Hillier, Chloe and Botvinick, Matt and Wierstra, Daan and Kavukcuoglu, Koray and Hassabis, Demis},
doi = {10.1126/science.aar6170},
publisher = {American Association for the Advancement of Science},
URL = {https://science.sciencemag.org/content/360/6394/1204},
journal = {Science},
year = {2018},
}
```
"""
_CITATION = """
@article{saxena2021clockwork,
title={Clockwork Variational Autoencoders},
author={Vaibhav Saxena and Jimmy Ba and Danijar Hafner},
year={2021},
eprint={2102.09532},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
"""
_DOWNLOAD_URL = "https://archive.org/download/gqn_mazes/gqn_mazes.zip"
class GqnMazes(tfds.core.GeneratorBasedBuilder):
"""DatasetBuilder for GQN Mazes dataset."""
VERSION = tfds.core.Version("1.0.0")
RELEASE_NOTES = {
"1.0.0": "Initial release.",
}
def _info(self) -> tfds.core.DatasetInfo:
"""Returns the dataset metadata."""
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict(
{
"video": tfds.features.Video(shape=(None, 64, 64, 3)),
}
),
supervised_keys=None,
homepage="https://archive.org/details/gqn_mazes",
citation=_CITATION,
)
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
"""Returns SplitGenerators."""
path = dl_manager.download_and_extract(_DOWNLOAD_URL)
return {
"train": self._generate_examples(path / "train"),
"test": self._generate_examples(path / "test"),
}
def _generate_examples(self, path):
"""Yields examples."""
for f in path.glob("*.mp4"):
yield str(f), {
"video": str(f.resolve()),
}
def _process_seq(seq):
seq = tf.expand_dims(seq, 0)
seq = tf.cast(seq, tf.float32) / 255.0
return seq
if __name__ == "__main__":
data_dir = Path(os.path.dirname(os.path.abspath(__file__)))
orig_dataset = 'gqn_mazes'
torch_dataset_path = data_dir / f'{orig_dataset}-torch'
torch_dataset_path.mkdir(exist_ok=True)
for split in ['train', 'test']:
torch_split_path = torch_dataset_path / split
torch_split_path.mkdir(exist_ok=True)
ds = tfds.load("gqn_mazes", data_dir=str(data_dir), shuffle_files=False)[split]
for cnt, item in enumerate(ds):
video = item["video"].numpy()
np.save(torch_split_path / f"{cnt}.npy", video)
print(f' [-] {cnt} scenes in the {split} dataset') | 3,718 | 32.504505 | 450 | py |
dbcsr | dbcsr-master/src/acc/libsmm_acc/predict/predict_train.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
####################################################################################################
# Copyright (C) by the DBCSR developers group - All rights reserved #
# This file is part of the DBCSR library. #
# #
# For information on the license, see the LICENSE file. #
# For further information please visit https://dbcsr.cp2k.org #
# SPDX-License-Identifier: GPL-2.0+ #
####################################################################################################
import os
import sys
import datetime
import json
import random
import numpy as np
import pandas as pd
import xgboost as xgb
import dask.dataframe as dd
import matplotlib.pyplot as plt
import argparse
from predict_helpers import (
safe_pickle,
safe_pickle_load,
plot_choice_goodness,
plot_performance_gains,
plot_scaled_performance_gains,
plot_absolute_performance_gain,
plot_relative_performance_gain,
performance_gain,
)
sys.path.append("../")
from kernels.smm_predict import to_tuple, to_string # noqa: E402
visual_separator = (
"\n----------------------------------------------------------------------------"
)
# ===============================================================================
def main(
datadir,
destdir,
algo,
model_args,
nrows,
prefitted_model_folder,
run_intermediate_evaluation,
):
"""
Train a Machine Learning model on autotuning data to predict a kernel's performance given
its template parameters
"""
# ===============================================================================
# Create folder to store results of this training and start a log
folder, log_file, log = get_log_folder(prefitted_model_folder, destdir, algo)
# ===============================================================================
# Override algorithm option if working on a pre-fitted model, and log program options
log += print_and_log(visual_separator)
algo, model_args, nrows, log = dump_or_load_options(
algo, model_args, prefitted_model_folder, nrows, folder, log
)
# ===============================================================================
# Get maximum and baseline performances
(
max_performances,
max_performances_algo,
max_performances_ref,
baseline_performances_algo,
) = get_reference_performances(datadir, algo)
# ===============================================================================
# Read data
log += print_and_log(visual_separator)
X, X_mnk, Y, log, data_nrows = read_data(algo, datadir, nrows, folder, log)
# ===============================================================================
# AT THIS POINT, WE MOVE FROM DASK (out-of-memory dataframes) TO PANDAS
# ===============================================================================
log += print_and_log("[moving to pandas] Compute X ...")
X = X.compute()
log += print_and_log("[moving to pandas] Compute Y ...")
Y = Y.compute()
log += print_and_log("[moving to pandas] Compute X_mnk ...")
X_mnk = X_mnk.compute()
log += print_and_log("[moving to pandas] Done")
# ===============================================================================
# Get or train partial model (i.e. trained on the "training" part of the data, not the entire dataset)
log += print_and_log(visual_separator)
if len(prefitted_model_folder) == 0: # train a model
log += print_and_log("\nPreparing to fit model...")
(
X_train,
Y_train,
X_mnk_train,
X_test,
Y_test,
X_mnk_test,
model_partial,
log,
) = train_model(X, X_mnk, Y, algo, model_args, folder, log)
else: # load pre-trained model
log += print_and_log(
"\nReading partial pre-fitted partial model from " + prefitted_model_folder
)
(
X_train,
Y_train,
X_mnk_train,
X_test,
Y_test,
X_mnk_test,
model_partial,
log,
) = fetch_pre_trained_model_partial(
X, X_mnk, Y, model_args, prefitted_model_folder, log
)
# ===============================================================================
# Evaluate partial model
if model_partial is not None:
log = evaluate_model(
model_partial,
X_train,
X_mnk_train,
Y_train,
X_test,
X_mnk_test,
Y_test,
max_performances_ref,
max_performances_algo,
baseline_performances_algo,
data_nrows,
log,
folder,
)
# ===============================================================================
# Refit to the entire dataset
# Get or train model fit on the entire dataset (i.e. not just on the "training" part of the data)
model_file = os.path.join(prefitted_model_folder, "feature_tree_refit.p")
if (
run_intermediate_evaluation
or len(prefitted_model_folder) == 0
or not os.path.exists(model_file)
):
log += print_and_log(visual_separator)
log += print_and_log("\nRefit to the entire dataset:")
X = X_train.append(X_test, ignore_index=True)
X_mnk = X_mnk_train.append(X_mnk_test, ignore_index=True)
Y = Y_train.append(Y_test, ignore_index=True)
model_partial.fit(X, Y)
model = (
model_partial # This model is fit on the entire dataset, it is not partial
)
results_file = os.path.join(folder, "feature_tree_refit.p")
safe_pickle([X.columns.values, model], results_file)
else:
log += print_and_log(
"\nReading pre-fitted model from " + prefitted_model_folder
)
X, model, log = fetch_pre_trained_model(prefitted_model_folder, X, log)
# ===============================================================================
# Evaluate refit-model
log = evaluate_model(
model,
X,
X_mnk,
Y,
None,
None,
None,
max_performances_ref,
max_performances_algo,
baseline_performances_algo,
data_nrows,
log,
folder,
)
# ===============================================================================
# Print log
log += print_and_log(visual_separator)
with open(log_file, "w") as f:
f.write(log)
# ===============================================================================
# Model hyperparameters
optimized_hyperparameters = {
# chosen by hyperparameter optimization. The optimal parameter depends on the GPU, the data ...
# the values below are the average of the optimal value for the P100 and the V100
"tiny": {
"scikit_max_depth": 16,
"scikit_min_samples_leaf": 2,
"scikit_min_samples_split": 15,
"xgboost_max_depth": 12,
"xgboost_learning_rate": 0.1,
"xgboost_n_estimators": 100,
},
"small": {
"scikit_max_depth": 16,
"scikit_min_samples_leaf": 2,
"scikit_min_samples_split": 15,
"xgboost_max_depth": 14,
"xgboost_learning_rate": 0.1,
"xgboost_n_estimators": 170,
},
"medium": {
"scikit_max_depth": 18,
"scikit_min_samples_leaf": 2,
"scikit_min_samples_split": 13,
"xgboost_max_depth": 14,
"xgboost_learning_rate": 0.1,
"xgboost_n_estimators": 140,
},
"largeDB1": {
"scikit_max_depth": 18,
"scikit_min_samples_leaf": 2,
"scikit_min_samples_split": 15,
"xgboost_max_depth": 14,
"xgboost_learning_rate": 0.1,
"xgboost_n_estimators": 170,
},
"largeDB2": {
"scikit_max_depth": 18,
"scikit_min_samples_leaf": 2,
"scikit_min_samples_split": 15,
"xgboost_max_depth": 14,
"xgboost_learning_rate": 0.1,
"xgboost_n_estimators": 170,
},
}
# ===============================================================================
# Printing and dumping helpers
def get_log_folder(prefitted_model_folder, destination_folder, algo):
"""Create a unique log folder for this run in which logs, plots etc. will be stored"""
if len(prefitted_model_folder) == 0:
# Create a new folder for this model
file_signature = datetime.datetime.now().strftime("%Y-%m-%d--%H-%M")
folder_name = os.path.join(
"model_selection", os.path.join(algo, file_signature)
)
if destination_folder != ".":
folder = os.path.join(destination_folder, folder_name)
else:
folder = folder_name
log_file = os.path.join(folder, "log.txt")
if not os.path.exists(folder):
while True: # loop until we've created a folder
try:
os.makedirs(folder)
break
except FileExistsError:
time_stamp_seconds = datetime.datetime.now().strftime("-%S")
new_folder = folder + time_stamp_seconds
print(
"Folder {} exists already. Trying to create folder {}.".format(
folder, new_folder
)
)
folder = new_folder
else:
# If loading a pre-fitted model, use this pre-fitted model's folder as a log folder, but create a new log file
folder = prefitted_model_folder
log_file_signature = datetime.datetime.now().strftime("%Y-%m-%d--%H-%M")
log_file = os.path.join(folder, "log_" + log_file_signature + ".txt")
# Log folder and file
log = ""
log += print_and_log("\nLogging to:")
log += print_and_log("\t" + folder)
log += print_and_log("\t" + log_file)
return folder, log_file, log
def dump_or_load_options(algo, model_args, prefitted_model, nrows, folder, log):
options_file_name = os.path.join(folder, "options.json")
pgm_options = {"folder": folder, "algo": algo, "nrows": nrows}
pgm_options.update(model_args)
if len(prefitted_model) == 0:
# if we're training a model, dump options to folder so they can be reloaded in another run
print("Dump options to", options_file_name)
with open(options_file_name, "w") as f:
json.dump(pgm_options, f)
else:
# if we're using a pre-fitted model, load options from that model
print("Read options from", options_file_name)
with open(options_file_name, "r") as f:
pgm_options = json.load(f)
algo = pgm_options["algo"]
model_args_list = ["model", "splits", "ntrees", "njobs"]
model_args = dict()
for m in model_args_list:
model_args[m] = pgm_options[m]
nrows = pgm_options["nrows"]
# Log options
log += print_and_log("Predict-train running with options:")
for opt, opt_val in pgm_options.items():
log += print_and_log("{:<15}: {}".format(opt, opt_val))
return algo, model_args, nrows, log
def print_and_log(msg):
if not isinstance(msg, str):
msg = str(msg)
log = "\n" + msg
print(msg)
return log
def dask_to_pandas(*dfs):
"""Convert training data dask -> pandas"""
pd_dfs = [df.compute() for df in dfs]
return pd_dfs[0] if len(pd_dfs) == 1 else pd_dfs
def pandas_to_dask(*dfs):
"""Convert training data pandas -> dask"""
dd_dfs = [dd.from_pandas(df, npartitions=3) for df in dfs]
return dd_dfs[0] if len(dd_dfs) == 1 else dd_dfs
# ===============================================================================
# Custom loss functions and scorers
def perf_loss(y_true, y_pred, top_k, X_mnk, scaled=True):
"""
Compute the relative performance losses per mnk if one were to measure the top-k best predicted sets of parameters
and pick the best out of this top-k
:param y_true: ground truth performances (performance scaled between 0 and 1)
:param y_pred: estimated performances (performance scaled between 0 and 1)
:param top_k: number of top performances to measure
:param X_mnk: corresponding mnks
:return: perf_losses: array of relative performance losses (in %), one array element per mnk
"""
assert len(y_true.index) == y_pred.flatten().size
assert len(y_true.index) == len(X_mnk.index)
perf_losses = list()
mnks = np.unique(X_mnk["mnk"].values)
for mnk in mnks:
# Get performances per mnk
idx_mnk = np.where(X_mnk == mnk)[0].tolist()
assert len(idx_mnk) > 0, "idx_mnk is empty"
y_true_mnk = y_true.iloc[idx_mnk]
y_pred_mnk = y_pred[idx_mnk]
# Get top-k best predicted performances
if top_k != 1:
top_k_idx = np.argpartition(-y_pred_mnk, top_k)[:top_k]
else:
top_k_idx = np.argmax(y_pred_mnk)
y_correspmax = y_true_mnk.iloc[top_k_idx]
# Chosen max perf. among predicted max performances
maxperf_chosen = np.amax(y_correspmax)
# True Max. performances
if not scaled:
maxperf = float(y_true_mnk.max(axis=0))
assert maxperf >= 0, "Found non-positive value for maxperf: " + str(maxperf)
perf_loss = (maxperf - maxperf_chosen) / maxperf
else:
perf_loss = 1.0 - maxperf_chosen
# Relative performance loss incurred by using model-predicted parameters instead of autotuned ones [%]
perf_losses.append(100 * perf_loss)
return perf_losses
def worse_rel_perf_loss_of_k(y_true, y_pred, top_k, X_mnk, scaled=True):
y = np.array(perf_loss(y_true, y_pred, top_k, X_mnk, scaled))
return float(y.max(axis=0))
def mean_rel_perf_loss_of_k(y_true, y_pred, top_k, X_mnk, scaled=True):
y = np.array(perf_loss(y_true, y_pred, top_k, X_mnk, scaled))
return float(y.mean(axis=0))
def worse_case_scorer(estimator, X, y, top_k):
"""
:param estimator: the model that should be evaluated
:param X: validation data
:param y: ground truth target for X
:return: score: a floating point number that quantifies the estimator prediction quality on X, with reference to y
"""
mnk = dd.DataFrame()
mnk["mnk"] = X["mnk"].copy()
y_pred = estimator.predict(X.drop(["mnk"].values, axis=1))
score = worse_rel_perf_loss_of_k(y, y_pred, top_k, mnk)
return (
-score
) # by scikit-learn convention, higher numbers are better, so the value should be negated
def worse_case_scorer_top1(estimator, X, y):
return worse_case_scorer(estimator, X, y, 1)
def mean_scorer(estimator, X, y, top_k):
"""
:param estimator: the model that should be evaluated
:param X: validation data
:param y: ground truth target for X
:return: score: a floating point number that quantifies the estimator prediction quality on X, with reference to y
"""
mnk = dd.DataFrame()
mnk["mnk"] = X["mnk"].copy()
y_pred = estimator.predict(X.drop(["mnk"].values, axis=1))
score = mean_rel_perf_loss_of_k(y, y_pred, top_k, mnk)
return (
-score
) # by scikit-learn convention, higher numbers are better, so the value should be negated
def mean_scorer_top1(estimator, X, y):
return mean_scorer(estimator, X, y, 1)
# ===============================================================================
# Read and prepare data
def get_reference_performances(folder, algo):
import json
maxperf_file = os.path.join(folder, "max_performances.json")
with open(maxperf_file) as f:
max_performances = json.load(f)
maxperf_file = os.path.join(folder, "max_performances_by_algo.json")
with open(maxperf_file) as f:
max_performances_algo = json.load(f)[algo]
max_performances_ref = max_performances
baseline_file = os.path.join(folder, "baseline_performances_by_algo.json")
with open(baseline_file) as f:
baseline_performances_algo = json.load(f)[algo]
return (
max_performances,
max_performances_algo,
max_performances_ref,
baseline_performances_algo,
)
def read_data(algo, read_from, nrows, folder, log):
parquet_data_file = os.path.join(read_from, "training_data_" + algo + ".parquet")
log += print_and_log("\nRead data from " + parquet_data_file)
# ===============================================================================
# Get 'X'
cols_to_ignore = [
"perf_scaled",
"mnk",
"perf (Gflop/s)",
"perf_scaled_by_algo",
"perf_squared",
]
X = dd.read_parquet(parquet_data_file)
cols_to_drop = set(cols_to_ignore).intersection(set(X.columns.values))
log += print_and_log("\nDropping following columns from X:\n" + str(cols_to_drop))
X = X.drop(cols_to_drop, axis=1)
log += print_and_log(
"X : {:>8,} x {:>8,} ({:>2.2} MB)".format(
len(X), len(X.columns), sys.getsizeof(X) / 10**6
)
)
log += print_and_log("Head:")
log += print_and_log(X.head())
n_features = len(list(X.columns))
predictor_names = X.columns.values
log += print_and_log("\nPredictor variables: (" + str(n_features) + ")")
for i, p in enumerate(predictor_names):
log += print_and_log("\t{:2}) {}".format(i + 1, p))
# ===============================================================================
# Get 'Y'
log += print_and_log("\nRead Y")
Y = dd.read_parquet(parquet_data_file, columns=["perf_scaled"])
log += print_and_log(
"Y : {:>8,} ({:>2.2} MB)".format(len(Y), sys.getsizeof(Y) / 10**6)
)
log += print_and_log("Head:")
log += print_and_log(Y.head())
# ===============================================================================
# Get 'X_mnk'
log += print_and_log("\nRead X_mnk")
X_mnk = dd.read_parquet(parquet_data_file, columns=["mnk"])
nrows_data = len(X_mnk.index)
log += print_and_log(
"X_mnk : {:>8,} ({:>2.2} MB)".format(nrows_data, sys.getsizeof(X_mnk) / 10**6)
)
log += print_and_log("Head:")
log += print_and_log(X_mnk.head())
log += print_and_log("# unique mnks:")
log += print_and_log(str(X_mnk["mnk"].nunique().compute()) + "\n")
return X, X_mnk, Y, log, nrows_data
# ===============================================================================
# Predictive modelling
def get_hyperparameter_grid(algo, model_name, n_features):
# Hyper-parameters to optimize
param_grid = dict()
if "scikit" in model_name: # it is a scikit-learn model
if algo == "medium":
max_depth = [10, 13, 16, 18, 21, 24]
min_samples_split = [2, 8, 12, 18]
min_samples_leaf = [2, 8, 12, 18]
elif algo == "tiny":
step = 1
max_depth = range(4, int(2 * n_features) + 1, step)
min_samples_split = range(1, 26, step)
min_samples_leaf = range(1, 26, step)
elif algo == "small":
step = 3
max_depth = range(4, int(2 * n_features) + 1, step)
min_samples_split = [2, 5, 8, 13, 18]
min_samples_leaf = [2, 5, 8, 13, 18]
else: # largeDB1,2
step = 3
max_depth = range(4, int(2 * n_features) + 1, step)
min_samples_split = range(2, 21, step)
min_samples_leaf = range(2, 21, step)
param_grid = {
model_name + "__estimator__" + "max_depth": list(max_depth),
model_name + "__estimator__" + "min_samples_split": list(min_samples_split),
model_name + "__estimator__" + "min_samples_leaf": list(min_samples_leaf),
}
elif "xgb" in model_name: # it is an XGBOOST model
if algo == "medium":
max_depth = [16, 13]
n_estimators = [100, 140]
learning_rate = [0.1]
elif algo == "tiny":
max_depth = range(10, n_features + 2, 1)
n_estimators = range(30, 160, 20)
learning_rate = range(1, 5)
learning_rate = [i / 10 for i in learning_rate]
elif algo == "small":
max_max_depth = 20
max_depth = range(10, min(max_max_depth, n_features + 2), 4)
n_estimators = range(50, 200, 30)
learning_rate = [0.1, 0.3]
else: # largeDB1,2
max_max_depth = 20
max_depth = range(10, min(max_max_depth, n_features + 2), 4)
n_estimators = range(50, 200, 30)
learning_rate = [0.1, 0.3]
param_grid = {
"max_depth": list(max_depth),
"learning_rate": list(learning_rate),
"n_estimators": list(n_estimators),
}
else:
raise AssertionError("Cannot recognize model: " + model_name)
return param_grid
def get_scikit_DecisionTree_model(algo):
from sklearn.tree import DecisionTreeRegressor
model = DecisionTreeRegressor(
criterion="mse",
splitter="best",
min_samples_split=optimized_hyperparameters[algo]["scikit_min_samples_split"],
min_samples_leaf=optimized_hyperparameters[algo]["scikit_min_samples_leaf"],
max_depth=optimized_hyperparameters[algo]["scikit_max_depth"],
max_features=None,
max_leaf_nodes=None,
)
# Feature selection through permutation importance
from eli5.sklearn import PermutationImportance
model_perm = PermutationImportance(model, cv=None)
return model_perm, "scikit-Decision_Tree"
def get_scikit_RandomForest_model(algo, njobs, ntrees):
from sklearn.ensemble import RandomForestRegressor
model = RandomForestRegressor(
criterion="mse",
n_estimators=ntrees,
min_samples_split=optimized_hyperparameters[algo]["scikit_min_samples_split"],
min_samples_leaf=optimized_hyperparameters[algo]["scikit_min_samples_leaf"],
max_depth=optimized_hyperparameters[algo]["scikit_max_depth"],
bootstrap=True,
max_features="sqrt",
n_jobs=njobs,
)
return model, "scikit-Random_Forest"
def get_xgb_DecisionTree_model(algo, njobs, ntrees):
params = {
"max_depth": optimized_hyperparameters[algo]["xgboost_max_depth"],
"learning_rate": optimized_hyperparameters[algo]["xgboost_learning_rate"],
"n_estimators": optimized_hyperparameters[algo]["xgboost_n_estimators"],
"tree_method": "exact",
"verbosity": 2,
"objective": "reg:squarederror",
"booster": "gbtree",
"n_jobs": njobs,
}
model = xgb.XGBRegressor(**params)
return model, "xgb-Decision_Tree"
def get_xgb_DecisionTree_dask_model(algo, njobs, ntrees):
params = {
"max_depth": optimized_hyperparameters[algo]["xgboost_max_depth"],
"learning_rate": optimized_hyperparameters[algo]["xgboost_learning_rate"],
"n_estimators": optimized_hyperparameters[algo]["xgboost_n_estimators"],
"tree_method": "exact",
"verbosity": 2,
"objective": "reg:squarederror",
"booster": "gbtree",
"n_jobs": njobs,
}
from dask_ml.xgboost import XGBRegressor_dask
model = XGBRegressor_dask(**params)
return model, "xgb-Decision_Tree_dask"
def get_xgb_DecisionTree_GPU_model(algo, njobs, ntrees):
params = {
"max_depth": optimized_hyperparameters[algo]["xgboost_max_depth"],
"learning_rate": optimized_hyperparameters[algo]["xgboost_learning_rate"],
"n_estimators": optimized_hyperparameters[algo]["xgboost_n_estimators"],
"tree_method": "gpu_hist",
"verbosity": 2,
"objective": "reg:squarederror",
"booster": "gbtree",
"n_jobs": njobs,
}
model = xgb.XGBRegressor(**params)
return model, "xgb-Decision_Tree_GPU"
def get_xgb_RandomForest_model(algo, njobs, ntrees):
params = {
"max_depth": optimized_hyperparameters[algo]["xgboost_max_depth"],
"learning_rate": optimized_hyperparameters[algo]["xgboost_learning_rate"],
"n_estimators": optimized_hyperparameters[algo]["xgboost_n_estimators"],
"tree_method": "exact",
"nthread": njobs,
"subsample": 0.5,
"colsample_bynode": 0.8,
"num_parallel_tree": ntrees,
"verbosity": 2,
"objective": "reg:squarederror",
}
model = xgb.XGBRFRegressor(**params)
return model, "xgb-Random_Forest"
def get_model(model_to_train, algo, njobs, ntrees):
if model_to_train == "DT":
model, model_name = get_scikit_DecisionTree_model(algo)
elif model_to_train == "RF":
model, model_name = get_scikit_RandomForest_model(algo, njobs, ntrees)
elif model_to_train == "xgb-DT":
model, model_name = get_xgb_DecisionTree_model(algo, njobs, ntrees)
elif model_to_train == "xgb-DT-dask":
model, model_name = get_xgb_DecisionTree_dask_model(algo, njobs, ntrees)
elif model_to_train == "xgb-DT-GPU":
model, model_name = get_xgb_DecisionTree_GPU_model(algo, njobs, ntrees)
elif model_to_train == "xgb-RF":
model, model_name = get_xgb_RandomForest_model(algo, njobs, ntrees)
else:
raise AssertionError(
"Cannot recognize model: " + model_to_train + ". Options: DT, RF"
)
return model, model_name
def get_train_test_partition(to_partition, test, train=None):
"""
Perform train/test partition
:param to_partition: sequence of objects to partition
:param test: ndarray, test-indices
:param train (optional): ndarray
:return:
"""
if train is None: # Retrieve training indices
all_indices = set(range(len(to_partition[0].index)))
train = list(all_indices - set(test))
print(
"About to partition into train (len: {:,}) / test (len: {:,})".format(
len(train), len(test)
)
)
partitioned = list()
for df in to_partition:
df_train = df.iloc[
train, :
] # train: use for hyper-parameter optimization (via CV) and training
partitioned.append(df_train)
df_test = df.iloc[
test, :
] # test : use for evaluation of 'selected/final' model
partitioned.append(df_test)
print("Returning object of length: {}".format(len(partitioned)))
return partitioned
def train_model(X, X_mnk, Y, algo, model_options, folder, log):
# ===============================================================================
# Get options
results_file = os.path.join(folder, "feature_tree.p")
# ===============================================================================
# Testing splitter (train/test-split)
from sklearn.model_selection import GroupShuffleSplit
cv = GroupShuffleSplit(n_splits=2, test_size=0.2)
train_test_splits = cv.split(X, Y, groups=X_mnk["mnk"])
train, test = next(train_test_splits)
(
X_train,
X_test,
Y_train,
Y_test,
X_mnk_train,
X_mnk_test,
) = get_train_test_partition([X, Y, X_mnk], test, train)
plot_train_test_partition(test, train, X_mnk, folder)
log += print_and_log(
"\nComplete train/test split, total size="
+ str(X.shape)
+ ", test size="
+ str(X_test.shape)
+ ", train_size="
+ str(X_train.shape)
)
del X, X_mnk, Y # free memory
predictor_names = X_train.columns.values
# ===============================================================================
# Predictive model
model_to_train = model_options["model"]
model, model_name = get_model(
model_to_train, algo, model_options["njobs"], model_options["ntrees"]
)
log += print_and_log(
"\nStart tune/train for model " + model_name + " with parameters:"
)
log += print_and_log(model)
# ===============================================================================
# Cross-validation splitter (train/validation-split)
test_size = 0.3
cv = GroupShuffleSplit(n_splits=model_options["splits"], test_size=test_size)
# ===============================================================================
# Feature selection: SelectFromModel
from sklearn.feature_selection import SelectFromModel
feature_importance_threshold = (
0.0005 # only remove the features with VERY little importance
)
model.cv = cv.split(X_train.values, Y_train.values, groups=X_mnk_train.values)
model.fit(X_train.values, Y_train.values)
model_fs = SelectFromModel(
model, threshold=feature_importance_threshold, max_features=None, prefit=True
)
print(model_fs)
model.cv = None
# ===============================================================================
# Info on feature selection
all_feature_names = X_train.columns.values.tolist()
feature_support = model_fs.get_support()
features_importances = model.feature_importances_
feature_name_importance = zip(
all_feature_names, features_importances, feature_support
)
feature_name_importance = sorted(
feature_name_importance, key=lambda x: x[1], reverse=True
)
log += print_and_log(visual_separator)
n_selected_features = np.sum(feature_support)
log += print_and_log("Optimal number of features : {}".format(n_selected_features))
# Selected features
log += print_and_log("\nFeatures:")
selected_features = list()
selected_feature_importances = list()
for i, (feat_name, feat_imp, feat_in) in enumerate(feature_name_importance):
in_or_out = "accepted" if feat_in else " x rejected"
log += print_and_log(
"{:>2}) {:<40}, imp: {:>1.3f} {}".format(
i + 1, feat_name, feat_imp, in_or_out
)
)
if feat_in:
selected_features.append(feat_name)
selected_feature_importances.append(feat_imp)
plot_feature_importance(features_importances, all_feature_names, folder)
# Drop non-selected features
features_to_drop = [f for f in predictor_names if f not in selected_features]
X_train = X_train.drop(features_to_drop, axis=1)
X_test = X_test.drop(features_to_drop, axis=1)
n_features = len(X_train.columns)
# ===============================================================================
# Fit
out_of_memory_computation = "dask" in model_options["model"]
if out_of_memory_computation:
X_train, Y_train = pandas_to_dask(X_train, Y_train)
if model_options["hyperparameter_optimization"]:
# Hyperparameter Optimization
param_grid = get_hyperparameter_grid(algo, model_name, n_features)
if param_grid is None:
raise AssertionError("param_grid object is None. Please implement!")
# At this point, we "cheat"/"take a shortcut" in 2 ways:
# - we split into train/test partitions using the simple default splitter, not one that is aware of mnk-groups
# - we use an overall MSE scorer, not one that looks at the performance loss of predicted mnks wrt. autotuned
if out_of_memory_computation:
from dask_ml.model_selection import GridSearchCV
gds_pars = {
"estimator": model,
"param_grid": param_grid,
"cv": model_options["splits"],
"refit": True,
"n_jobs": 1,
}
else:
from sklearn.model_selection import GridSearchCV
gds_pars = {
"estimator": model,
"param_grid": param_grid,
"cv": model_options["splits"],
"refit": True,
"n_jobs": 1,
"verbose": 2,
}
gds = GridSearchCV(**gds_pars)
log += print_and_log(visual_separator)
log += print_and_log("\nStart hyperparameter optimization & training ... :\n")
log += print_and_log("Hyper-parameter grid:")
for par, values in param_grid.items():
log += print_and_log("\t" + par + ": " + str(values))
log += print_and_log("\n")
gds.fit(X_train.values, Y_train.values)
log += print_and_log("... done")
describe_hpo(gds, log, folder)
model = gds.best_estimator_
else:
# Fit
log += print_and_log(visual_separator)
log += print_and_log("\nStart fitting model with predictors:\n")
for i, p in enumerate(X_train.columns.values):
log += print_and_log("\t{:>2}) {}".format(i + 1, p))
model.fit(X_train, Y_train)
safe_pickle([X_train.columns.values, model, test], results_file)
log += print_and_log("\nCompleted fit, wrote results to " + results_file)
log += print_and_log(visual_separator)
return_model = model
# Return
if "mnk" in X_train.columns.values:
X_train.drop("mnk", axis=1, inplace=True)
if "mnk" in X_test.columns.values:
X_train.drop("mnk", axis=1, inplace=True)
if out_of_memory_computation:
X_train, Y_train = dask_to_pandas(X_train, Y_train)
return X_train, Y_train, X_mnk_train, X_test, Y_test, X_mnk_test, return_model, log
def fetch_pre_trained_model(model_path_folder, X, log):
model_path = os.path.join(model_path_folder, "feature_tree_refit.p")
print("fetched pre-trained model from: {}".format(model_path))
features, model = safe_pickle_load(model_path)
print("Pickled variables:\nfeatures:{}\nmodel:{}".format(features, model))
log += print_and_log("\nDrop non-selected features")
predictor_names = X.columns.values.tolist()
features_to_drop = [f for f in predictor_names if f not in features]
X.drop(features_to_drop, axis=1, inplace=True)
return X, model, log
def fetch_pre_trained_model_partial(X, X_mnk, Y, model_options, model_path_folder, log):
# Load pre-trained model, selected features and indices of test-set
model_path = os.path.join(model_path_folder, "feature_tree.p")
print("fetched partial pre-trained model from: {}".format(model_path))
features, model, test_indices = safe_pickle_load(model_path)
print(
"Pickled stuff:\nfeatures:{}\nmodel:{}\ntest_indices:{}".format(
features, model, test_indices
)
)
if "mnk" in features:
features.remove("mnk")
log += print_and_log("\nPerform train/test split")
(
X_train,
X_test,
Y_train,
Y_test,
X_mnk_train,
X_mnk_test,
) = get_train_test_partition([X, Y, X_mnk], test_indices)
log += print_and_log(
"\nComplete train/test split, total size="
+ str(X.shape)
+ ", test size="
+ str(X_test.shape)
+ ", train_size="
+ str(X_train.shape)
)
log += print_and_log("\nDrop non-selected features")
predictor_names = X_train.columns.values.tolist()
features_to_drop = [f for f in predictor_names if f not in features]
X_train.drop(features_to_drop, axis=1, inplace=True)
X_test.drop(features_to_drop, axis=1, inplace=True)
out_of_memory_computation = "dask" in model_options["model"]
if out_of_memory_computation:
X_train, Y_train = pandas_to_dask(X_train, Y_train)
return X_train, Y_train, X_mnk_train, X_test, Y_test, X_mnk_test, model, log
# ===============================================================================
# Describe and evaluate model
def describe_hpo(gs, log, folder):
# Scores obtained during hyperparameter optimization
columns_to_print = list()
for par in gs.param_grid.keys():
columns_to_print.append("param_" + par)
columns_to_print += [
"mean_test_score",
"std_test_score",
"mean_train_score",
"std_train_score",
]
log += print_and_log("\nHyperparameter search results (head):")
cv_results = pd.DataFrame(gs.cv_results_)[columns_to_print]
with pd.option_context("display.max_rows", None, "display.max_columns", None):
log += print_and_log(cv_results.head())
cv_results_path = os.path.join(folder, "hyperparameter_optimization_results.csv")
with open(cv_results_path, "w") as f:
cv_results.to_csv(f, index=False)
log += print_and_log("Wrote hyperparameter results to " + cv_results_path)
# Best parameter set
log += print_and_log("\nBest parameters set found on development set:")
for bestpar_name, bestpar_value in gs.best_params_.items():
log += print_and_log("\t{}: {}".format(bestpar_name, bestpar_value))
# Best estimator
log += print_and_log("\nBest estimator:")
best_estimator = gs.best_estimator_
log += print_and_log(best_estimator)
log += print_and_log(visual_separator)
return log
def describe_model(model, X, Y, log):
predictor_names = X.columns.values.tolist()
log += print_and_log("Model:")
log += print_and_log(model)
log += print_and_log("Predictor variables:")
for p in predictor_names:
log += print_and_log("\t{}".format(p))
return log
def print_custom_error(y_true, y_pred, X_mnk, log, scaled=True):
result_line = (
"\tRelative performance loss compared to autotuned max:\n"
+ "top-{}: worse: {:>6.3f} [%], mean: {:>6.3f} [%]"
)
for top_k in [1]:
log += print_and_log(
result_line.format(
top_k,
worse_rel_perf_loss_of_k(y_true, y_pred, top_k, X_mnk, scaled),
mean_rel_perf_loss_of_k(y_true, y_pred, top_k, X_mnk, scaled),
)
)
return log
def print_error(y_true, y_pred, log):
from sklearn.metrics import mean_absolute_error, mean_squared_error
result_line = "\tOverall error:\n" + "absolute: {:>6.3f}, mean squared {:>6.3f}"
log += print_and_log(
result_line.format(
mean_absolute_error(y_true, y_pred), mean_squared_error(y_true, y_pred)
)
)
return log
def scale_back(y_scaled, x_mnk, max_performances, mnk=None):
if mnk is None:
corresponding_maxperf = np.array(
[max_performances[mnk] for mnk in x_mnk["mnk"].values.tolist()]
)
else:
corresponding_maxperf = max_performances[mnk]
return y_scaled * corresponding_maxperf
def plot_train_test_partition(test_idx, train_idx, X_mnk, folder):
import matplotlib.pyplot as plt
mnks_string_train = X_mnk["mnk"].iloc[train_idx].unique()
mnks_train = to_tuple(*mnks_string_train)
mnks_string_test = X_mnk["mnk"].iloc[test_idx].unique()
mnks_test = to_tuple(*mnks_string_test)
y_train_product = (
dict()
) # keys: m*n*k, values: how many times this mnk-product appears in training-mnks
for m, n, k in mnks_train:
mxnxk = m * n * k
if mxnxk in y_train_product.keys():
y_train_product[mxnxk] += 1
else:
y_train_product[mxnxk] = 1
train_mnks = list()
train_counts = list()
for mnk, count in y_train_product.items():
for c in range(count):
train_mnks.append(mnk)
train_counts.append(c + 1)
y_test_product = dict()
for m, n, k in mnks_test:
mxnxk = m * n * k
if mxnxk in y_test_product.keys():
y_test_product[mxnxk] += 1
else:
y_test_product[mxnxk] = 1
test_mnks = list()
test_counts = list()
for mnk, count in y_test_product.items():
for c in range(count):
test_mnks.append(mnk)
if mnk in y_train_product.keys():
test_counts.append(y_train_product[mnk] + c + 1)
else:
test_counts.append(c + 1)
plt.figure(figsize=(30, 5))
markersize = 12
plt.plot(
train_mnks,
train_counts,
"o",
markersize=markersize,
color="blue",
label="training mnks (" + str(len(train_mnks)) + ")",
)
plt.plot(
test_mnks,
test_counts,
"o",
markersize=markersize,
color="red",
label="testing mnks (" + str(len(test_mnks)) + ")",
)
plot_file_path = os.path.join(folder, "train-test_split.svg")
plt.xlabel("m * n * k triplets")
plt.ylabel("number of occurences in data set")
plt.title("Train/test split")
maxcount = max(max(test_counts), max(train_counts)) + 1
plt.ylim([0, maxcount])
plt.legend()
plt.savefig(plot_file_path)
def plot_feature_importance(importances, names, folder):
plt.rcdefaults()
fig, ax = plt.subplots()
ax.set_title("Feature importances")
ax.barh(range(len(names)), importances, color="g", align="center")
ax.set_yticks(np.arange(len(importances)))
ax.set_yticklabels(names)
ax.invert_yaxis()
plot_file_path = os.path.join(folder, "feature_importance.svg")
plt.savefig(plot_file_path)
print(plot_file_path)
def plot_loss_histogram(y_true, y_pred, X_mnk, folder):
import matplotlib.pyplot as plt
# Get losses
top_k = 1
y = np.array(perf_loss(y_true, y_pred, top_k, X_mnk, False))
# Losses-histogram
num_bins = 100
plt.figure()
plt.hist(y, num_bins, facecolor="green", alpha=0.75)
plt.xlabel("relative performance loss [%]")
plt.ylabel("# occurrences")
plt.title(
"Performance losses for top-k="
+ str(top_k)
+ " ("
+ str(len(y))
+ " test mnks)"
)
plot_file_path = os.path.join(folder, "result_losses.svg")
plt.savefig(plot_file_path)
print(plot_file_path)
def plot_prediction_accuracy(m, n, k, y_true, y_pred, train, pp):
plt.figure()
if train:
plt.plot(100 * y_true, 100 * y_pred, "b.", label="truth")
else:
plt.plot(100 * y_true, 100 * y_pred, "r.", label="truth")
plt.xlabel("true scaled performance [%]")
plt.ylabel("predicted scaled performance [%]")
type = "train" if train else "test"
plt.title("Prediction accuracy for kernel " + str((m, n, k)) + " (" + type + ")")
pp.savefig()
def get_predive_model_performances(
y_true, y_pred, x_mnk, max_performances_ref, max_performances_algo
):
predictive_model_perf_scaled = dict()
for mnk_string in x_mnk["mnk"].unique():
idx_mnk = np.where(x_mnk == mnk_string)[0].tolist()
assert len(idx_mnk) > 0, "idx_mnk is empty"
m, n, k = to_tuple(mnk_string)
perf_chosen_idx = [np.argmax(y_pred[idx_mnk])]
perf_effective = y_true.iloc[idx_mnk].iloc[perf_chosen_idx].values.item()
predictive_model_perf_scaled[
(m, n, k)
] = perf_effective # 'scaled' between 0 and 1
predictive_model_perf = dict(
zip(
predictive_model_perf_scaled.keys(),
[
perf_scaled * max_performances_ref[to_string(mnk)]
for mnk, perf_scaled in predictive_model_perf_scaled.items()
],
)
)
# Re-scale performances by algorithm for a fair comparison
predictive_model_perf_scaled = dict(
zip(
predictive_model_perf.keys(),
[
perf / max_performances_algo[mnk]
for mnk, perf in predictive_model_perf.items()
],
)
)
return predictive_model_perf, predictive_model_perf_scaled
# ===============================================================================
def evaluate_model(
model,
X_train,
X_mnk_train,
Y_train,
X_test,
X_mnk_test,
Y_test,
max_performances_ref,
max_performances_algo,
baseline_performances_algo,
data_nrows,
log,
folder,
):
"""Main evaluation function"""
if model is None:
return log
# Start evaluation
log += print_and_log(visual_separator)
log += print_and_log("Start model evaluation")
if all([x is not None for x in [X_test, Y_test]]):
log = describe_model(model, X_test, Y_test, log)
# Training error
if all([x is not None for x in [X_train, X_mnk_train, Y_train]]):
y_train_pred = model.predict(X_train.values)
log += print_and_log("\nTraining error: (train&val)")
log = print_custom_error(Y_train, y_train_pred, X_mnk_train, log, True)
log = print_error(Y_train, y_train_pred, log)
# Test error
if all([x is not None for x in [X_test, X_mnk_test, Y_test]]):
y_test_pred = model.predict(X_test)
log += print_and_log("\nTesting error:")
log = print_custom_error(Y_test, y_test_pred, X_mnk_test, log, True)
log = print_error(Y_test, y_test_pred, log)
# Training error (scaled-back)
if all([x is not None for x in [X_train, X_mnk_train, Y_train]]):
log += print_and_log("\nTraining error (scaled back): (train&val)")
y_train_pred_scaled_back = scale_back(
y_train_pred, X_mnk_train, max_performances_ref
)
y_train_scaled_back = pd.DataFrame(
scale_back(Y_train.values.flatten(), X_mnk_train, max_performances_ref)
)
log = print_custom_error(
y_train_scaled_back, y_train_pred_scaled_back, X_mnk_train, log, False
)
log = print_error(y_train_scaled_back, y_train_pred_scaled_back, log)
if all([x is not None for x in [X_test, X_mnk_test, Y_test]]):
# Test error (scaled-back)
log += print_and_log("\nTesting error (scaled back): (test&val)")
y_test_pred_scaled_back = scale_back(
y_test_pred, X_mnk_test, max_performances_ref
)
y_test_scaled_back = pd.DataFrame(
scale_back(Y_test.values.flatten(), X_mnk_test, max_performances_ref)
)
log = print_custom_error(
y_test_scaled_back, y_test_pred_scaled_back, X_mnk_test, log, False
)
log = print_error(y_test_scaled_back, y_test_pred_scaled_back, log)
# ===============================================================================
# Print histogram for "best" estimator
if all([x is not None for x in [X_test, X_mnk_test, Y_test]]):
log += print_and_log("\nPlot result histogram:")
plot_loss_histogram(Y_test, y_test_pred, X_mnk_test, folder)
# ===============================================================================
# Plot prediction accuracy and goodness of choice for a few mnks (training-set)
if all([x is not None for x in [X_train, X_mnk_train, Y_train]]):
n_samples = 10 if data_nrows < 100000000 else 2
mnks_to_plot = random.sample(X_mnk_train["mnk"].values.tolist(), n_samples)
from matplotlib.backends.backend_pdf import PdfPages
plot_file_path = os.path.join(folder, "evaluation_by_mnk_refit.pdf")
if all([x is not None for x in [X_test, X_mnk_test, Y_test]]):
plot_file_path = os.path.join(folder, "evaluation_by_mnk.pdf")
pp = PdfPages(plot_file_path)
for mnk_string in mnks_to_plot:
# Get performances per mnk
idx_mnk = np.where(X_mnk_train == mnk_string)[0].tolist()
assert len(idx_mnk) > 0, "idx_mnk is empty"
m_, n_, k_ = to_tuple(mnk_string)
y_train_pred_mnk = y_train_pred[idx_mnk]
Y_train_mnk = Y_train.iloc[idx_mnk]
log += print_and_log("Prediction accuracy plot: " + str(mnk_string))
plot_prediction_accuracy(
m_, n_, k_, Y_train_mnk, y_train_pred_mnk, True, pp
)
log += print_and_log("Goodness plot: " + str(mnk_string))
plot_choice_goodness(
m_,
n_,
k_,
baseline_performances_algo,
max_performances_ref,
Y_train["perf_scaled"].iloc[idx_mnk].values,
y_train_pred_mnk,
True,
pp,
)
# ===============================================================================
# Plot prediction accuracy for a few mnks (testing-set)
if all([x is not None for x in [X_test, X_mnk_test, Y_test]]):
mnks_to_plot = random.sample(X_mnk_test["mnk"].values.tolist(), n_samples)
for mnk_string in mnks_to_plot:
# Get performances per mnk
idx_mnk = np.where(X_mnk_test == mnk_string)[0].tolist()
assert len(idx_mnk) > 0, "idx_mnk is empty"
m_, n_, k_ = to_tuple(mnk_string)
log += print_and_log("Prediction accuracy plot: " + str(mnk_string))
plot_prediction_accuracy(
m_, n_, k_, Y_test.iloc[idx_mnk], y_test_pred[idx_mnk], False, pp
)
log += print_and_log("Goodness plot: " + str(mnk_string))
plot_choice_goodness(
m_,
n_,
k_,
baseline_performances_algo,
max_performances_ref,
Y_test["perf_scaled"].iloc[idx_mnk].values,
y_test_pred[idx_mnk],
False,
pp,
True,
)
if all([x is not None for x in [X_train, X_mnk_train, Y_train]]):
pp.close()
# ===============================================================================
# Scale baseline and max performances
max_performances_algo = dict(
zip(
[to_tuple(mnk_string) for mnk_string in max_performances_algo.keys()],
max_performances_algo.values(),
)
)
max_performances_algo_scaled = dict(
zip(max_performances_algo.keys(), [1.0] * len(max_performances_algo))
)
baseline_performances_algo = dict(
zip(
[to_tuple(mnk_string) for mnk_string in baseline_performances_algo.keys()],
baseline_performances_algo.values(),
)
)
baseline_performances_algo_scaled = dict(
zip(
[(m, n, k) for m, n, k in baseline_performances_algo.keys()],
[
perf / max_performances_algo[(m, n, k)]
for (m, n, k), perf in baseline_performances_algo.items()
],
)
)
# ===============================================================================
# Compare max performances and baseline
from matplotlib.backends.backend_pdf import PdfPages
plot_file_path = os.path.join(folder, "evaluation_by_overall_refit.pdf")
if all([x is not None for x in [X_test, X_mnk_test, Y_test]]):
plot_file_path = os.path.join(folder, "evaluation_overall.pdf")
pp = PdfPages(plot_file_path)
if all([x is not None for x in [X_test, X_mnk_test, Y_test]]):
plot_performance_gains(
max_performances_algo,
baseline_performances_algo,
"trained",
"max. performance per algorithm",
"baseline per algorithm",
pp,
)
plot_scaled_performance_gains(
max_performances_algo_scaled,
baseline_performances_algo_scaled,
"trained",
"max. performance per algorithm",
"baseline per algorithm",
pp,
)
# ===============================================================================
# 'Results' = y_true ( y_chosen )
if all([x is not None for x in [X_train, X_mnk_train, Y_train]]):
(
predictive_model_perf_train,
predictive_model_perf_train_scaled,
) = get_predive_model_performances(
Y_train,
y_train_pred,
X_mnk_train,
max_performances_ref,
max_performances_algo,
)
if all([x is not None for x in [X_test, X_mnk_test, Y_test]]):
(
predictive_model_perf_test,
predictive_model_perf_test_scaled,
) = get_predive_model_performances(
Y_test,
y_test_pred,
X_mnk_test,
max_performances_ref,
max_performances_algo,
)
# ===============================================================================
# Plot results (training set: predictive modelling VS naïve)
log += print_and_log("\nPredictive model VS baseline: ")
if all([x is not None for x in [X_train, X_mnk_train, Y_train]]):
perf_gain_pred_train_over_baseline = performance_gain(
baseline_performances_algo, predictive_model_perf_train
)
plot_absolute_performance_gain(
perf_gain_pred_train_over_baseline,
"trained",
"baseline per algorithm",
"predictive model",
pp,
)
scaled_perf_gain_pred_train_over_baseline = performance_gain(
baseline_performances_algo_scaled, predictive_model_perf_train_scaled
)
plot_relative_performance_gain(
scaled_perf_gain_pred_train_over_baseline,
"trained",
"baseline per algorithm",
"predictive model",
pp,
)
if all([x is not None for x in [X_test, X_mnk_test, Y_test]]):
perf_gain_pred_test_over_baseline = performance_gain(
baseline_performances_algo, predictive_model_perf_test
)
plot_absolute_performance_gain(
perf_gain_pred_test_over_baseline,
"tested",
"baseline per algorithm",
"predictive model",
pp,
)
scaled_perf_gain_pred_test_over_baseline = performance_gain(
baseline_performances_algo_scaled, predictive_model_perf_test_scaled
)
plot_relative_performance_gain(
scaled_perf_gain_pred_test_over_baseline,
"tested",
"baseline per algorithm",
"predictive model",
pp,
)
log += print_and_log("\nPredictive model VS autotuned: ")
perf_gain_pred_train_over_max = performance_gain(
max_performances_algo, predictive_model_perf_train
)
plot_absolute_performance_gain(
perf_gain_pred_train_over_max,
"trained",
"max. performance per algorithm",
"predictive model",
pp,
)
scaled_perf_gain_pred_train_over_max = performance_gain(
max_performances_algo_scaled, predictive_model_perf_train_scaled
)
plot_relative_performance_gain(
scaled_perf_gain_pred_train_over_max,
"trained",
"max. performance per algorithm",
"predictive model",
pp,
)
if all([x is not None for x in [X_test, X_mnk_test, Y_test]]):
perf_gain_pred_test_over_max = performance_gain(
max_performances_algo, predictive_model_perf_test
)
plot_absolute_performance_gain(
perf_gain_pred_test_over_max,
"tested",
"max. performance per algorithm",
"predictive model",
pp,
)
scaled_perf_gain_pred_test_over_max = performance_gain(
max_performances_algo_scaled, predictive_model_perf_test_scaled
)
plot_relative_performance_gain(
scaled_perf_gain_pred_test_over_max,
"tested",
"max. performance per algorithm",
"predictive model",
pp,
)
if all([x is not None for x in [X_test, X_mnk_test, Y_test]]):
log += print_and_log("\nCompare performances: ")
plot_performance_gains(
baseline_performances_algo,
predictive_model_perf_train,
"trained",
"baseline per algorithm",
"predictive model",
pp,
)
plot_performance_gains(
max_performances_algo,
predictive_model_perf_train,
"trained",
"max. performance per algorithm",
"predictive model",
pp,
)
if all([x is not None for x in [X_test, X_mnk_test, Y_test]]):
plot_performance_gains(
baseline_performances_algo,
predictive_model_perf_test,
"tested",
"baseline per algorithm",
"predictive model",
pp,
)
plot_performance_gains(
max_performances_algo,
predictive_model_perf_test,
"tested",
"max. performance per algorithm",
"predictive model",
pp,
)
pp.close()
return log
# ===============================================================================
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="""
Train a Machine Learning model on autotuning data to predict a kernel's performance given
its template parameters
This script is part of the workflow for predictive modelling of optimal libsmm_acc parameters.
For more details, see README.md.
""",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"-d",
"--destination_folder",
metavar="FOLDER",
type=str,
default=".",
help="Folder in which to write plots, models, etc.",
)
parser.add_argument(
"-f",
"--folder",
metavar="FOLDER",
type=str,
default=".",
help="Folder from which to read data",
)
parser.add_argument(
"-a", "--algo", metavar="algoname", default="", help="Algorithm to train on"
)
parser.add_argument(
"-m",
"--model",
default="DT",
help="Model to train. Options: DT (Decision Trees), RF (Random Forests), xgb-DT, xgb-DT-dask (out-of-memory"
+ "xgboost), xgb-DT-GPU (with GPU support), xgb-RF",
)
parser.add_argument(
"-o",
"--hyperparameter_optimization",
default=False,
help="Whether to do hyperparameter optimization. If False, the model will be trained with 'best guess' parameters",
)
parser.add_argument(
"-s",
"--splits",
default=3,
metavar="NUMBER",
type=int,
help="Number of cross-validation splits used in RFECV and GridSearchCV",
)
parser.add_argument(
"-e",
"--ntrees",
default=3,
metavar="NUMBER",
type=int,
help="Number of estimators in RF",
)
parser.add_argument(
"-j",
"--njobs",
default=-1,
metavar="NUMBER",
type=int,
help="Number of parallel jobs that Joblib will launch (used by GridSearchCV and XGBoost)",
)
parser.add_argument(
"-r",
"--nrows",
default=None,
metavar="NUMBER",
type=int,
help="Number of rows of data to load. Default: None (load all)",
)
parser.add_argument(
"-g",
"--prefitted_model",
metavar="filename",
default="",
help="Path to pickled model object to load instead of re-training model",
)
parser.add_argument(
"-i",
"--intermediate_evaluation",
default=False,
help="Whether to perform evaluation of the model trained on part of the model",
)
parser.set_defaults(intermediate_evaluation=False)
args = parser.parse_args()
model_args = {
"model": args.model,
"splits": args.splits,
"ntrees": args.ntrees,
"njobs": args.njobs,
"hyperparameter_optimization": args.hyperparameter_optimization,
}
main(
args.folder,
args.destination_folder,
args.algo,
model_args,
args.nrows,
args.prefitted_model,
args.intermediate_evaluation,
)
| 60,672 | 34.986358 | 123 | py |
TREMBA | TREMBA-master/DataLoader.py | import torch
import torch.nn as nn
import torchvision.datasets as dset
import torch.utils.data
import torchvision.transforms as transforms
import os
import json
import numpy as np
from torch.utils.data import Dataset, DataLoader
def imagenet(state):
if 'defense' in state and state['defense']:
mean = np.array([0.5, 0.5, 0.5])
std = np.array([0.5, 0.5, 0.5])
else:
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
transform = transforms.Compose(
[transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor()])
train_loader = torch.utils.data.DataLoader(
dset.ImageFolder(state['train_path'], transform=transform),
batch_size=state['batch_size'], shuffle=False,
num_workers=8, pin_memory=True)
test_loader = torch.utils.data.DataLoader(
dset.ImageFolder(state['data_path'], transform=transform),
batch_size=state['batch_size'], shuffle=False,
num_workers=8, pin_memory=True)
nlabels = 1000
return train_loader, test_loader, nlabels, mean, std | 1,142 | 29.891892 | 67 | py |
TREMBA | TREMBA-master/attack.py | import argparse
import torchvision.models as models
import os
import json
import DataLoader
from utils import *
from FCN import *
from Normalize import Normalize, Permute
from imagenet_model.Resnet import resnet152_denoise, resnet101_denoise
def EmbedBA(function, encoder, decoder, image, label, config, latent=None):
device = image.device
if latent is None:
latent = encoder(image.unsqueeze(0)).squeeze().view(-1)
momentum = torch.zeros_like(latent)
dimension = len(latent)
noise = torch.empty((dimension, config['sample_size']), device=device)
origin_image = image.clone()
last_loss = []
lr = config['lr']
for iter in range(config['num_iters']+1):
perturbation = torch.clamp(decoder(latent.unsqueeze(0)).squeeze(0)*config['epsilon'], -config['epsilon'], config['epsilon'])
logit, loss = function(torch.clamp(image+perturbation, 0, 1), label)
if config['target']:
success = torch.argmax(logit, dim=1) == label
else:
success = torch.argmax(logit, dim=1) !=label
last_loss.append(loss.item())
if function.current_counts > 50000:
break
if bool(success.item()):
return True, torch.clamp(image+perturbation, 0, 1)
nn.init.normal_(noise)
noise[:, config['sample_size']//2:] = -noise[:, :config['sample_size']//2]
latents = latent.repeat(config['sample_size'], 1) + noise.transpose(0, 1)*config['sigma']
perturbations = torch.clamp(decoder(latents)*config['epsilon'], -config['epsilon'], config['epsilon'])
_, losses = function(torch.clamp(image.expand_as(perturbations) + perturbations, 0, 1), label)
grad = torch.mean(losses.expand_as(noise) * noise, dim=1)
if iter % config['log_interval'] == 0 and config['print_log']:
print("iteration: {} loss: {}, l2_deviation {}".format(iter, float(loss.item()), float(torch.norm(perturbation))))
momentum = config['momentum'] * momentum + (1-config['momentum'])*grad
latent = latent - lr * momentum
last_loss = last_loss[-config['plateau_length']:]
if (last_loss[-1] > last_loss[0]+config['plateau_overhead'] or last_loss[-1] > last_loss[0] and last_loss[-1]<0.6) and len(last_loss) == config['plateau_length']:
if lr > config['lr_min']:
lr = max(lr / config['lr_decay'], config['lr_min'])
last_loss = []
return False, origin_image
parser = argparse.ArgumentParser()
parser.add_argument('--config', default='config.json', help='config file')
parser.add_argument('--device', default='cuda:0', help='Device for Attack')
parser.add_argument('--save_prefix', default=None, help='override save_prefix in config file')
parser.add_argument('--model_name', default=None)
args = parser.parse_args()
with open(args.config) as config_file:
state = json.load(config_file)
if args.save_prefix is not None:
state['save_prefix'] = args.save_prefix
if args.model_name is not None:
state['model_name'] = args.model_name
new_state = state.copy()
new_state['batch_size'] = 1
new_state['test_bs'] = 1
device = torch.device(args.device if torch.cuda.is_available() else "cpu")
weight = torch.load(os.path.join("G_weight", state['generator_name']+".pytorch"), map_location=device)
encoder_weight = {}
decoder_weight = {}
for key, val in weight.items():
if key.startswith('0.'):
encoder_weight[key[2:]] = val
elif key.startswith('1.'):
decoder_weight[key[2:]] = val
_, dataloader, nlabels, mean, std = DataLoader.imagenet(new_state)
if 'OSP' in state:
if state['source_model_name'] == 'Adv_Denoise_Resnet152':
s_model = resnet152_denoise()
loaded_state_dict = torch.load(os.path.join('weight', state['source_model_name']+".pytorch"))
s_model.load_state_dict(loaded_state_dict)
if 'defense' in state and state['defense']:
source_model = nn.Sequential(
Normalize(mean, std),
Permute([2,1,0]),
s_model
)
else:
source_model = nn.Sequential(
Normalize(mean, std),
s_model
)
if state['model_name'] == 'Resnet34':
pretrained_model = models.resnet34(pretrained=True)
elif state['model_name'] == 'VGG19':
pretrained_model = models.vgg19_bn(pretrained=True)
elif state['model_name'] == 'Densenet121':
pretrained_model = models.densenet121(pretrained=True)
elif state['model_name'] == 'Mobilenet':
pretrained_model = models.mobilenet_v2(pretrained=True)
elif state['model_name'] == 'Adv_Denoise_Resnext101':
pretrained_model = resnet101_denoise()
loaded_state_dict = torch.load(os.path.join('weight', state['model_name']+".pytorch"))
pretrained_model.load_state_dict(loaded_state_dict, strict=True)
if 'defense' in state and state['defense']:
model = nn.Sequential(
Normalize(mean, std),
Permute([2,1,0]),
pretrained_model
)
else:
model = nn.Sequential(
Normalize(mean, std),
pretrained_model
)
encoder = Imagenet_Encoder()
decoder = Imagenet_Decoder()
encoder.load_state_dict(encoder_weight)
decoder.load_state_dict(decoder_weight)
model.to(device)
model.eval()
encoder.to(device)
encoder.eval()
decoder.to(device)
decoder.eval()
if 'OSP' in state:
source_model.to(device)
source_model.eval()
F = Function(model, state['batch_size'], state['margin'], nlabels, state['target'])
count_success = 0
count_total = 0
if not os.path.exists(state['save_path']):
os.mkdir(state['save_path'])
for i, (images, labels) in enumerate(dataloader):
images = images.to(device)
labels = int(labels)
logits = model(images)
correct = torch.argmax(logits, dim=1) == labels
if correct:
torch.cuda.empty_cache()
if state['target']:
labels = state['target_class']
if 'OSP' in state:
hinge_loss = MarginLoss_Single(state['white_box_margin'], state['target'])
images.requires_grad = True
latents = encoder(images)
for k in range(state['white_box_iters']):
perturbations = decoder(latents)*state['epsilon']
logits = source_model(torch.clamp(images+perturbations, 0, 1))
loss = hinge_loss(logits, labels)
grad = torch.autograd.grad(loss, latents)[0]
latents = latents - state['white_box_lr'] * grad
with torch.no_grad():
success, adv = EmbedBA(F, encoder, decoder, images[0], labels, state, latents.view(-1))
else:
with torch.no_grad():
success, adv = EmbedBA(F, encoder, decoder, images[0], labels, state)
count_success += int(success)
count_total += int(correct)
print("image: {} eval_count: {} success: {} average_count: {} success_rate: {}".format(i, F.current_counts, success, F.get_average(), float(count_success) / float(count_total)))
F.new_counter()
success_rate = float(count_success) / float(count_total)
if state['target']:
np.save(os.path.join(state['save_path'], '{}_class_{}.npy'.format(state['save_prefix'], state['target_class'])), np.array(F.counts))
else:
np.save(os.path.join(state['save_path'], '{}.npy'.format(state['save_prefix'])), np.array(F.counts))
print("success rate {}".format(success_rate))
print("average eval count {}".format(F.get_average()))
| 7,438 | 36.570707 | 185 | py |
TREMBA | TREMBA-master/utils.py | import torch.nn as nn
import torch
import numpy as np
class MarginLoss(nn.Module):
def __init__(self, margin=1.0, target=False):
super(MarginLoss, self).__init__()
self.margin = margin
self.target = target
def forward(self, logits, label):
if not self.target:
one_hot= torch.zeros_like(logits, dtype=torch.uint8)
label = label.reshape(-1,1)
one_hot.scatter_(1, label, 1)
diff = logits[one_hot] - torch.max(logits[~one_hot].view(len(logits),-1), dim=1)[0]
margin = torch.nn.functional.relu(diff + self.margin, True) - self.margin
else:
diff = torch.max(torch.cat((logits[:, :label],logits[:,(label+1):]), dim=1), dim=1)[0] - logits[:, label]
margin = torch.nn.functional.relu(diff + self.margin, True) - self.margin
return margin.mean()
class MarginLoss_Single(nn.Module):
def __init__(self, margin=1.0, target=False):
super(MarginLoss_Single, self).__init__()
self.margin = margin
self.target = target
def forward(self, logits, label):
if not self.target:
if label == 0:
logits_cat = logits[:,(label+1):]
elif label == logits.size()[1] - 1:
logits_cat = logits[:, :label]
else:
logits_cat = torch.cat((logits[:, :label],logits[:,(label+1):]), dim=1)
diff = logits[:,label] - torch.max(logits_cat, dim=1)[0]
margin = torch.nn.functional.relu(diff + self.margin, True) - self.margin
else:
diff = torch.max(torch.cat((logits[:, :label],logits[:,(label+1):]), dim=1), dim=1)[0] - logits[:, label]
margin = torch.nn.functional.relu(diff + self.margin, True) - self.margin
return margin.mean()
class Function(nn.Module):
def __init__(self, model, batch_size=256, margin=0, nlabels=10, target=False):
super(Function, self).__init__()
self.model = model
self.margin = margin
self.target = target
self.batch_size = batch_size
self.current_counts = 0
self.counts = []
self.nlabels = nlabels
def _loss(self, logits, label):
if not self.target:
if label == 0:
logits_cat = logits[:,(label+1):]
elif label == logits.size()[1] - 1:
logits_cat = logits[:, :label]
else:
logits_cat = torch.cat((logits[:, :label],logits[:,(label+1):]), dim=1)
diff = logits[:,label] - torch.max(logits_cat, dim=1)[0]
margin = torch.nn.functional.relu(diff + self.margin, True) - self.margin
else:
diff = torch.max(torch.cat((logits[:, :label],logits[:,(label+1):]), dim=1), dim=1)[0] - logits[:, label]
margin = torch.nn.functional.relu(diff + self.margin, True) - self.margin
return margin
def forward(self, images, label):
if len(images.size())==3:
images = images.unsqueeze(0)
n = len(images)
device = images.device
k = 0
loss = torch.zeros(n, dtype=torch.float32, device=device)
logits = torch.zeros((n, self.nlabels), dtype=torch.float32, device=device)
while k < n:
start = k
end = min(k + self.batch_size, n)
logits[start:end] = self.model(images[start:end])
loss[start:end] = self._loss(logits, label)
k = end
self.current_counts += n
return logits, loss
def new_counter(self):
self.counts.append(self.current_counts)
self.current_counts = 0
def get_average(self, iter=50000):
counts = np.array(self.counts)
return np.mean(counts[counts<iter])
| 3,786 | 35.413462 | 117 | py |
TREMBA | TREMBA-master/Normalize.py | import torch
import torch.nn as nn
class Normalize(nn.Module):
def __init__(self, mean, std):
super(Normalize, self).__init__()
self.mean = mean
self.std = std
def forward(self, input):
size = input.size()
x = input.clone()
for i in range(size[1]):
x[:,i] = (x[:,i] - self.mean[i])/self.std[i]
return x
class Permute(nn.Module):
def __init__(self, permutation = [2,1,0]):
super().__init__()
self.permutation = permutation
def forward(self, input):
return input[:, self.permutation] | 606 | 20.678571 | 56 | py |
TREMBA | TREMBA-master/FCN.py | # -*- coding: utf-8 -*-
import torch
import torch.nn as nn
class Imagenet_Encoder(nn.Module):
def __init__(self):
super().__init__()
self.conv1_1 = nn.Sequential(
nn.Conv2d(in_channels=3, out_channels=16, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.BatchNorm2d(16)
)
self.conv1_2 = nn.Sequential(
nn.Conv2d(in_channels=16, out_channels=32, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.BatchNorm2d(32),
nn.MaxPool2d(kernel_size=2)
)
self.conv2_1 = nn.Sequential(
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.BatchNorm2d(64)
)
self.conv2_2 = nn.Sequential(
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.BatchNorm2d(64),
nn.MaxPool2d(kernel_size=2)
)
self.conv3_1 = nn.Sequential(
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.BatchNorm2d(128)
)
self.conv3_2 = nn.Sequential(
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.BatchNorm2d(128),
nn.MaxPool2d(kernel_size=2)
)
self.conv4_1 = nn.Sequential(
nn.Conv2d(in_channels=128, out_channels=32, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.BatchNorm2d(32)
)
self.conv4_2 = nn.Sequential(
nn.Conv2d(in_channels=32, out_channels=8, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.BatchNorm2d(8),
nn.MaxPool2d(kernel_size=2)
)
def forward(self, x):
x = self.conv1_1(x)
x = self.conv1_2(x)
x = self.conv2_1(x)
x = self.conv2_2(x)
x = self.conv3_1(x)
x = self.conv3_2(x)
x = self.conv4_1(x)
x = self.conv4_2(x)
return x
class Imagenet_Decoder(nn.Module):
def __init__(self):
super().__init__()
self.deconv1_1 = nn.Sequential(
nn.Conv2d(in_channels=8, out_channels=32, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.BatchNorm2d(32)
)
self.deconv1_2 = nn.Sequential(
nn.ConvTranspose2d(32, 64, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1),
nn.ReLU(inplace=True),
nn.BatchNorm2d(64)
)
self.deconv2_1 = nn.Sequential(
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.BatchNorm2d(128)
)
self.deconv2_2 = nn.Sequential(
nn.ConvTranspose2d(128, 128, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1),
nn.ReLU(inplace=True),
nn.BatchNorm2d(128)
)
self.deconv3_1 = nn.Sequential(
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.BatchNorm2d(128)
)
self.deconv3_2 = nn.Sequential(
nn.ConvTranspose2d(128, 64, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1),
nn.ReLU(inplace=True),
nn.BatchNorm2d(64)
)
self.deconv4_1 = nn.Sequential(
nn.Conv2d(in_channels=64, out_channels=32, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.BatchNorm2d(32)
)
self.deconv4_2 = nn.Sequential(
nn.ConvTranspose2d(32, 16, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1),
nn.ReLU(inplace=True),
nn.BatchNorm2d(16)
)
self.fcn = nn.Conv2d(16, 3, kernel_size=1)
def forward(self, x):
n = len(x)
x = x.view(n, 8, 14, 14)
y = self.deconv1_1(x)
y = self.deconv1_2(y)
y = self.deconv2_1(y)
y = self.deconv2_2(y)
y = self.deconv3_1(y)
y = self.deconv3_2(y)
y = self.deconv4_1(y)
y = self.deconv4_2(y)
y = self.fcn(y)
return torch.tanh(y) | 4,477 | 32.924242 | 107 | py |
TREMBA | TREMBA-master/train_generator.py | import argparse
import os
import json
import torch
import torch.nn as nn
import torch.nn.functional as F
from Normalize import Normalize, Permute
import DataLoader
import numpy as np
from FCN import *
from utils import *
import torchvision.models as models
import copy
from imagenet_model.Resnet import *
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--device', type=int, nargs="+", default=[0,1,2,3])
parser.add_argument('--config', default='config_joint_train_Imagenet_GAN.json', help='config file')
args = parser.parse_args()
with open(args.config) as config_file:
state = json.load(config_file)
device = torch.device(args.device[0])
train_loader, test_loader, nlabels, mean, std = DataLoader.imagenet(state)
nets = []
for model_name in state['model_name']:
print(model_name)
if model_name == "VGG16":
pretrained_model = models.vgg16_bn(pretrained=True)
elif model_name == 'Resnet18':
pretrained_model = models.resnet18(pretrained=True)
elif model_name == 'Squeezenet':
pretrained_model = models.squeezenet1_1(pretrained=True)
elif model_name == 'Googlenet':
pretrained_model = models.googlenet(pretrained=True)
elif model_name == 'Adv_Denoise_Resnet152':
pretrained_model = resnet152_denoise()
loaded_state_dict = torch.load(os.path.join('weight', model_name+".pytorch"))
pretrained_model.load_state_dict(loaded_state_dict)
if 'defense' in state and state['defense']:
net = nn.Sequential(
Normalize(mean, std),
Permute([2,1,0]),
pretrained_model
)
else:
net = nn.Sequential(
Normalize(mean, std),
pretrained_model
)
nets.append(net)
model = nn.Sequential(
Imagenet_Encoder(),
Imagenet_Decoder()
)
for i in range(len(nets)):
nets[i] = torch.nn.DataParallel(nets[i], args.device)
nets[i].eval()
nets[i].to(device)
model = torch.nn.DataParallel(model, args.device)
model.to(device)
print(model)
optimizer_G = torch.optim.SGD(model.parameters(), state['learning_rate_G'], momentum=state['momentum'],
weight_decay=0, nesterov=True)
scheduler_G = torch.optim.lr_scheduler.StepLR(optimizer_G, step_size=state['epochs'] // state['schedule'],
gamma=state['gamma'])
hingeloss = MarginLoss(margin=state['margin'], target=state['target'])
if state['target']:
save_name = "Imagenet_{}{}_target_{}.pytorch".format("_".join(state['model_name']), state['save_suffix'], state['target_class'])
else:
save_name = "Imagenet_{}{}_untarget.pytorch".format("_".join(state['model_name']), state['save_suffix'])
def train():
model.train()
for batch_idx, (data, label) in enumerate(train_loader):
nat = data.to(device)
if state['target']:
label = state['target_class']
else:
label = label.to(device)
losses_g = []
optimizer_G.zero_grad()
for net in nets:
noise = model(nat)
adv = torch.clamp(noise * state['epsilon'] + nat, 0, 1)
logits_adv = net(adv)
loss_g = hingeloss(logits_adv, label)
losses_g.append("%4.2f" % loss_g.item())
loss_g.backward()
optimizer_G.step()
if (batch_idx + 1) % state['log_interval'] == 0:
print("batch {}, losses_g {}".format(batch_idx + 1, dict(zip(state['model_name'], losses_g))))
def test():
model.eval()
loss_avg = [0.0 for i in range(len(nets))]
success = [0 for i in range(len(nets))]
for batch_idx, (data, label) in enumerate(test_loader):
nat = data.to(device)
if state['target']:
label = state['target_class']
else:
label = label.to(device)
noise = model(nat)
adv = torch.clamp(noise * state['epsilon'] + nat, 0, 1)
for j in range(len(nets)):
logits = nets[j](adv)
loss = hingeloss(logits, label)
loss_avg[j] += loss.item()
if state['target']:
success[j] += int((torch.argmax(logits, dim=1) == label).sum())
else:
success[j] += int((torch.argmax(logits, dim=1) == label).sum())
state['test_loss'] = [loss_avg[i] / len(test_loader) for i in range(len(loss_avg))]
state['test_successes'] = [success[i] / len(test_loader.dataset) for i in range(len(success))]
state['test_success'] = 0.0
for i in range(len(state['test_successes'])):
state['test_success'] += state['test_successes'][i]/len(state['test_successes'])
best_success = 0.0
for epoch in range(state['epochs']):
scheduler_G.step()
state['epoch'] = epoch
train()
torch.cuda.empty_cache()
if epoch % 10 == 0:
with torch.no_grad():
test()
print(state)
torch.save(model.module.state_dict(), os.path.join("G_weight", save_name))
print("epoch {}, Current success: {}, Best success: {}".format(epoch, state['test_success'], best_success)) | 5,548 | 36.748299 | 136 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.