repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
InDuDoNet | InDuDoNet-main/train.py | # !/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Created on Tue Nov 5 11:56:06 2020
@author: hongwang (hongwang9209@hotmail.com)
MICCAI2021: ``InDuDoNet: An Interpretable Dual Domain Network for CT Metal Artifact Reduction''
paper link: https://arxiv.org/pdf/2109.05298.pdf
"""
from __future__ import print_function
import argparse
import os
import torch
import torch.nn.functional as F
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import time
import matplotlib.pyplot as plt
import numpy as np
from tensorboardX import SummaryWriter
from torch.utils.data import DataLoader
from math import ceil
from deeplesion.Dataset import MARTrainDataset
from network.indudonet import InDuDoNet
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
parser = argparse.ArgumentParser()
parser.add_argument("--data_path", type=str, default="./deep_lesion/", help='txt path to training spa-data')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=0)
parser.add_argument('--batchSize', type=int, default=1, help='input batch size')
parser.add_argument('--patchSize', type=int, default=416, help='the height / width of the input image to network')
parser.add_argument('--niter', type=int, default=100, help='total number of training epochs')
parser.add_argument('--num_channel', type=int, default=32, help='the number of dual channels') # refer to https://github.com/hongwang01/RCDNet for the channel concatenation strategy
parser.add_argument('--T', type=int, default=4, help='the number of ResBlocks in every ProxNet')
parser.add_argument('--S', type=int, default=10, help='the number of total iterative stages')
parser.add_argument('--resume', type=int, default=0, help='continue to train')
parser.add_argument("--milestone", type=int, default=[40, 80], help="When to decay learning rate")
parser.add_argument('--lr', type=float, default=0.0002, help='initial learning rate')
parser.add_argument('--log_dir', default='./logs/', help='tensorboard logs')
parser.add_argument('--model_dir', default='./models/', help='saving model')
parser.add_argument('--eta1', type=float, default=1, help='initialization for stepsize eta1')
parser.add_argument('--eta2', type=float, default=5, help='initialization for stepsize eta2')
parser.add_argument('--alpha', type=float, default=0.5, help='initialization for weight factor')
parser.add_argument('--gamma', type=float, default=1e-1, help='hyper-parameter for balancing different loss items')
opt = parser.parse_args()
# create path
try:
os.makedirs(opt.log_dir)
except OSError:
pass
try:
os.makedirs(opt.model_dir)
except OSError:
pass
cudnn.benchmark = True
def train_model(net,optimizer, scheduler,datasets):
data_loader = DataLoader(datasets, batch_size=opt.batchSize, shuffle=True, num_workers=int(opt.workers),
pin_memory=True)
num_data = len(datasets)
num_iter_epoch = ceil(num_data / opt.batchSize)
writer = SummaryWriter(opt.log_dir)
step = 0
for epoch in range(opt.resume, opt.niter):
mse_per_epoch = 0
tic = time.time()
# train stage
lr = optimizer.param_groups[0]['lr']
phase = 'train'
for ii, data in enumerate(data_loader):
Xma, XLI, Xgt, mask, Sma, SLI, Sgt, Tr = [x.cuda() for x in data]
net.train()
optimizer.zero_grad()
ListX, ListS, ListYS= net(Xma, XLI, mask, Sma, SLI, Tr)
loss_l2YSmid = 0.1 * F.mse_loss(ListYS[opt.S -2], Sgt)
loss_l2Xmid = 0.1 * F.mse_loss(ListX[opt.S -2] * (1 - mask), Xgt * (1 - mask))
loss_l2YSf = F.mse_loss(ListYS[-1], Sgt)
loss_l2Xf = F.mse_loss(ListX[-1] * (1 - mask), Xgt * (1 - mask))
loss_l2YS = loss_l2YSf + loss_l2YSmid
loss_l2X = loss_l2Xf + loss_l2Xmid
loss = opt.gamma * loss_l2YS + loss_l2X
loss.backward()
optimizer.step()
mse_iter = loss.item()
mse_per_epoch += mse_iter
if ii % 400 == 0:
template = '[Epoch:{:>2d}/{:<2d}] {:0>5d}/{:0>5d}, Loss={:5.2e}, Lossl2YS={:5.2e}, Lossl2X={:5.2e}, lr={:.2e}'
print(template.format(epoch + 1, opt.niter, ii, num_iter_epoch, mse_iter, loss_l2YS, loss_l2X, lr))
writer.add_scalar('Loss', loss, step)
writer.add_scalar('Loss_YS', loss_l2YS, step)
writer.add_scalar('Loss_X', loss_l2X, step)
step += 1
mse_per_epoch /= (ii + 1)
print('Loss={:+.2e}'.format(mse_per_epoch))
print('-' * 100)
scheduler.step()
# save model
torch.save(net.state_dict(), os.path.join(opt.model_dir, 'InDuDoNet_latest.pt'))
if epoch % 10 == 0:
# save model
model_prefix = 'model_'
save_path_model = os.path.join(opt.model_dir, model_prefix + str(epoch + 1))
torch.save({
'epoch': epoch + 1,
'step': step + 1,
}, save_path_model)
torch.save(net.state_dict(), os.path.join(opt.model_dir, 'InDuDoNet_%d.pt' % (epoch + 1)))
toc = time.time()
print('This epoch take time {:.2f}'.format(toc - tic))
writer.close()
print('Reach the maximal epochs! Finish training')
if __name__ == '__main__':
def print_network(name, net):
num_params = 0
for param in net.parameters():
num_params += param.numel()
print('name={:s}, Total number={:d}'.format(name, num_params))
net = InDuDoNet(opt).cuda()
print_network("InDuDoNet:", net)
optimizer= optim.Adam(net.parameters(), betas=(0.5, 0.999), lr=opt.lr)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=opt.milestone,gamma=0.5) # learning rates
# from opt.resume continue to train
for _ in range(opt.resume):
scheduler.step()
if opt.resume:
net.load_state_dict(torch.load(os.path.join(opt.model_dir, 'InDuDoNet_%d.pt' % (opt.resume))))
print('loaded checkpoints, epoch{:d}'.format(opt.resume))
# load dataset
train_mask = np.load(os.path.join(opt.data_path, 'trainmask.npy'))
train_dataset = MARTrainDataset(opt.data_path, opt.patchSize, train_mask)
# train model
train_model(net, optimizer, scheduler,train_dataset)
| 6,328 | 44.532374 | 182 | py |
InDuDoNet | InDuDoNet-main/test_deeplesion.py | import os
import os.path
import argparse
import numpy as np
import torch
import time
import matplotlib.pyplot as plt
import h5py
import PIL
from PIL import Image
from network.indudonet import InDuDoNet
from deeplesion.build_gemotry import initialization, build_gemotry
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
parser = argparse.ArgumentParser(description="YU_Test")
parser.add_argument("--model_dir", type=str, default="models", help='path to model and log files')
parser.add_argument("--data_path", type=str, default="deeplesion/test/", help='path to training data')
parser.add_argument("--use_GPU", type=bool, default=True, help='use GPU or not')
parser.add_argument("--save_path", type=str, default="./test_results/", help='path to training data')
parser.add_argument('--num_channel', type=int, default=32, help='the number of dual channels')
parser.add_argument('--T', type=int, default=4, help='the number of ResBlocks in every ProxNet')
parser.add_argument('--S', type=int, default=10, help='the number of total iterative stages')
parser.add_argument('--eta1', type=float, default=1, help='initialization for stepsize eta1')
parser.add_argument('--eta2', type=float, default=5, help='initialization for stepsize eta2')
parser.add_argument('--alpha', type=float, default=0.5, help='initialization for weight factor')
opt = parser.parse_args()
def mkdir(path):
folder = os.path.exists(path)
if not folder:
os.makedirs(path)
print("--- new folder... ---")
print("--- " + path + " ---")
else:
print("--- There exsits folder " + path + " ! ---")
input_dir = opt.save_path + '/Xma/'
gt_dir = opt.save_path + '/Xgt/'
outX_dir = opt.save_path+'/X/'
outYS_dir = opt.save_path +'/YS/'
mkdir(input_dir)
mkdir(gt_dir)
mkdir(outX_dir)
mkdir(outYS_dir)
def image_get_minmax():
return 0.0, 1.0
def proj_get_minmax():
return 0.0, 4.0
def normalize(data, minmax):
data_min, data_max = minmax
data = np.clip(data, data_min, data_max)
data = (data - data_min) / (data_max - data_min)
data = data * 255.0
data = data.astype(np.float32)
data = np.expand_dims(np.transpose(np.expand_dims(data, 2), (2, 0, 1)),0)
return data
param = initialization()
ray_trafo = build_gemotry(param)
test_mask = np.load(os.path.join(opt.data_path, 'testmask.npy'))
def test_image(data_path, imag_idx, mask_idx):
txtdir = os.path.join(data_path, 'test_640geo_dir.txt')
mat_files = open(txtdir, 'r').readlines()
gt_dir = mat_files[imag_idx]
file_dir = gt_dir[:-6]
data_file = file_dir + str(mask_idx) + '.h5'
abs_dir = os.path.join(data_path, 'test_640geo/', data_file)
gt_absdir = os.path.join(data_path, 'test_640geo/', gt_dir[:-1])
gt_file = h5py.File(gt_absdir, 'r')
Xgt = gt_file['image'][()]
gt_file.close()
file = h5py.File(abs_dir, 'r')
Xma= file['ma_CT'][()]
Sma = file['ma_sinogram'][()]
XLI = file['LI_CT'][()]
SLI = file['LI_sinogram'][()]
Tr = file['metal_trace'][()]
Sgt = np.asarray(ray_trafo(Xgt))
file.close()
M512 = test_mask[:,:,mask_idx]
M = np.array(Image.fromarray(M512).resize((416, 416), PIL.Image.BILINEAR))
Xma = normalize(Xma, image_get_minmax()) # *255
Xgt = normalize(Xgt, image_get_minmax())
XLI = normalize(XLI, image_get_minmax())
Sma = normalize(Sma, proj_get_minmax())
Sgt = normalize(Sgt, proj_get_minmax())
SLI = normalize(SLI, proj_get_minmax())
Tr = 1 - Tr.astype(np.float32)
Tr = np.expand_dims(np.transpose(np.expand_dims(Tr, 2), (2, 0, 1)), 0) # 1*1*h*w
Mask = M.astype(np.float32)
Mask = np.expand_dims(np.transpose(np.expand_dims(Mask, 2), (2, 0, 1)),0)
return torch.Tensor(Xma).cuda(), torch.Tensor(XLI).cuda(), torch.Tensor(Xgt).cuda(), torch.Tensor(Mask).cuda(), \
torch.Tensor(Sma).cuda(), torch.Tensor(SLI).cuda(), torch.Tensor(Sgt).cuda(), torch.Tensor(Tr).cuda()
def print_network(name, net):
num_params = 0
for param in net.parameters():
num_params += param.numel()
print('name={:s}, Total number={:d}'.format(name, num_params))
def main():
print('Loading model ...\n')
net = InDuDoNet(opt).cuda()
print_network("InDuDoNet", net)
net.load_state_dict(torch.load(opt.model_dir))
net.eval()
time_test = 0
count = 0
for imag_idx in range(1): # for demo
print(imag_idx)
for mask_idx in range(10):
Xma, XLI, Xgt, M, Sma, SLI, Sgt, Tr = test_image(opt.data_path, imag_idx, mask_idx)
with torch.no_grad():
if opt.use_GPU:
torch.cuda.synchronize()
start_time = time.time()
ListX, ListS, ListYS= net(Xma, XLI, M, Sma, SLI, Tr)
end_time = time.time()
dur_time = end_time - start_time
time_test += dur_time
print('Times: ', dur_time)
Xoutclip = torch.clamp(ListX[-1] / 255.0, 0, 0.5)
Xgtclip = torch.clamp(Xgt / 255.0, 0, 0.5)
Xmaclip = torch.clamp(Xma /255.0, 0, 0.5)
Xoutnorm = Xoutclip / 0.5
Xmanorm = Xmaclip / 0.5
Xgtnorm = Xgtclip / 0.5
YS = torch.clamp(ListYS[-1]/255, 0, 1)
idx = imag_idx *10+ mask_idx + 1
plt.imsave(input_dir + str(idx) + '.png', Xmanorm.data.cpu().numpy().squeeze(), cmap="gray")
plt.imsave(gt_dir + str(idx) + '.png', Xgtnorm.data.cpu().numpy().squeeze(), cmap="gray")
plt.imsave(outX_dir + str(idx) + '.png', Xoutnorm.data.cpu().numpy().squeeze(), cmap="gray")
plt.imsave(outYS_dir + str(idx) + '.png', YS.data.cpu().numpy().squeeze(), cmap="gray")
count += 1
print('Avg.time={:.4f}'.format(time_test/count))
if __name__ == "__main__":
main()
| 5,780 | 39.145833 | 117 | py |
InDuDoNet | InDuDoNet-main/deeplesion/Dataset.py | import os
import os.path
import numpy as np
import random
import h5py
import torch
import torch.utils.data as udata
import PIL.Image as Image
from numpy.random import RandomState
import scipy.io as sio
import PIL
from PIL import Image
from .build_gemotry import initialization, build_gemotry
param = initialization()
ray_trafo = build_gemotry(param)
def image_get_minmax():
return 0.0, 1.0
def proj_get_minmax():
return 0.0, 4.0
def normalize(data, minmax):
data_min, data_max = minmax
data = np.clip(data, data_min, data_max)
data = (data - data_min) / (data_max - data_min)
data = data.astype(np.float32)
data = data*255.0
data = np.transpose(np.expand_dims(data, 2), (2, 0, 1))
return data
class MARTrainDataset(udata.Dataset):
def __init__(self, dir, patchSize, mask):
super().__init__()
self.dir = dir
self.train_mask = mask
self.patch_size = patchSize
self.txtdir = os.path.join(self.dir, 'train_640geo_dir.txt')
self.mat_files = open(self.txtdir, 'r').readlines()
self.file_num = len(self.mat_files)
self.rand_state = RandomState(66)
def __len__(self):
return self.file_num
def __getitem__(self, idx):
gt_dir = self.mat_files[idx]
#random_mask = random.randint(0, 89) # include 89
random_mask = random.randint(0, 9) # for demo
file_dir = gt_dir[:-6]
data_file = file_dir + str(random_mask) + '.h5'
abs_dir = os.path.join(self.dir, 'train_640geo/', data_file)
gt_absdir = os.path.join(self.dir,'train_640geo/', gt_dir[:-1])
gt_file = h5py.File(gt_absdir, 'r')
Xgt = gt_file['image'][()]
gt_file.close()
file = h5py.File(abs_dir, 'r')
Xma= file['ma_CT'][()]
Sma = file['ma_sinogram'][()]
XLI =file['LI_CT'][()]
SLI = file['LI_sinogram'][()]
Tr = file['metal_trace'][()]
file.close()
Sgt = np.asarray(ray_trafo(Xgt))
M512 = self.train_mask[:,:,random_mask]
M = np.array(Image.fromarray(M512).resize((416, 416), PIL.Image.BILINEAR))
Xma = normalize(Xma, image_get_minmax())
Xgt = normalize(Xgt, image_get_minmax())
XLI = normalize(XLI, image_get_minmax())
Sma = normalize(Sma, proj_get_minmax())
Sgt = normalize(Sgt, proj_get_minmax())
SLI = normalize(SLI, proj_get_minmax())
Tr = 1 -Tr.astype(np.float32)
Tr = np.transpose(np.expand_dims(Tr, 2), (2, 0, 1))
Mask = M.astype(np.float32)
Mask = np.transpose(np.expand_dims(Mask, 2), (2, 0, 1))
return torch.Tensor(Xma), torch.Tensor(XLI), torch.Tensor(Xgt), torch.Tensor(Mask), \
torch.Tensor(Sma), torch.Tensor(SLI), torch.Tensor(Sgt), torch.Tensor(Tr) | 2,795 | 34.846154 | 93 | py |
InDuDoNet | InDuDoNet-main/network/priornet.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class UNet(nn.Module):
def __init__(self, n_channels=2, n_classes=1, n_filter=32):
super(UNet, self).__init__()
self.inc = inconv(n_channels, n_filter)
self.down1 = down(n_filter, n_filter*2)
self.down2 = down(n_filter*2, n_filter*4)
self.down3 = down(n_filter*4, n_filter*8)
self.down4 = down(n_filter*8, n_filter*8)
self.up1 = up(n_filter*16, n_filter*4)
self.up2 = up(n_filter*8, n_filter*2)
self.up3 = up(n_filter*4, n_filter)
self.up4 = up(n_filter*2, n_filter)
self.outc = outconv(n_filter, n_classes)
def forward(self, x):
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
x = self.up1(x5, x4)
x = self.up2(x, x3)
x = self.up3(x, x2)
x = self.up4(x, x1)
x = self.outc(x)
return x
class double_conv(nn.Module):
'''(conv => BN => ReLU) * 2'''
def __init__(self, in_ch, out_ch):
super(double_conv, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_ch, out_ch, 3, padding=1),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True),
nn.Conv2d(out_ch, out_ch, 3, padding=1),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True)
)
def forward(self, x):
x = self.conv(x)
return x
class inconv(nn.Module):
def __init__(self, in_ch, out_ch):
super(inconv, self).__init__()
self.conv = double_conv(in_ch, out_ch)
def forward(self, x):
x = self.conv(x)
return x
class down(nn.Module):
def __init__(self, in_ch, out_ch):
super(down, self).__init__()
self.mpconv = nn.Sequential(
nn.MaxPool2d(2),
double_conv(in_ch, out_ch)
)
def forward(self, x):
x = self.mpconv(x)
return x
class up(nn.Module):
def __init__(self, in_ch, out_ch, bilinear=True):
super(up, self).__init__()
# would be a nice idea if the upsampling could be learned too,
# but my machine do not have enough memory to handle all those weights
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
else:
self.up = nn.ConvTranspose2d(in_ch // 2, in_ch // 2, 2, stride=2)
self.conv = double_conv(in_ch, out_ch)
def forward(self, x1, x2):
x1 = self.up(x1)
# input is CHW
diffY = x2.size()[2] - x1.size()[2]
diffX = x2.size()[3] - x1.size()[3]
x1 = F.pad(x1, (diffX // 2, diffX - diffX // 2,
diffY // 2, diffY - diffY // 2))
# for padding issues, see
# https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a
# https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd
x = torch.cat([x2, x1], dim=1)
x = self.conv(x)
return x
class outconv(nn.Module):
def __init__(self, in_ch, out_ch):
super(outconv, self).__init__()
self.conv = nn.Conv2d(in_ch, out_ch, 1)
self.activ = nn.Tanh()
def forward(self, x):
# x = self.activ(self.conv(x))
x = self.conv(x)
return x | 3,438 | 28.646552 | 122 | py |
InDuDoNet | InDuDoNet-main/network/indudonet.py | """
MICCAI2021: ``InDuDoNet: An Interpretable Dual Domain Network for CT Metal Artifact Reduction''
paper link: https://arxiv.org/pdf/2109.05298.pdf
"""
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from odl.contrib import torch as odl_torch
from .priornet import UNet
import sys
#sys.path.append("deeplesion/")
from .build_gemotry import initialization, build_gemotry
para_ini = initialization()
fp = build_gemotry(para_ini)
op_modfp = odl_torch.OperatorModule(fp)
op_modpT = odl_torch.OperatorModule(fp.adjoint)
filter = torch.FloatTensor([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]) / 9 # for initialization
filter = filter.unsqueeze(dim=0).unsqueeze(dim=0)
class InDuDoNet (nn.Module):
def __init__(self, args):
super(InDuDoNet, self).__init__()
self.S = args.S # Stage number S includes the initialization process
self.iter = self.S - 1 # not include the initialization process
self.num_u = args.num_channel + 1 # concat extra 1 term
self.num_f = args.num_channel + 2 # concat extra 2 terms
self.T = args.T
# stepsize
self.eta1const = args.eta1
self.eta2const = args.eta2
self.eta1 = torch.Tensor([self.eta1const]) # initialization for eta1 at all stages
self.eta2 = torch.Tensor([self.eta2const]) # initialization for eta2 at all stages
self.eta1S = self.make_coeff(self.S, self.eta1) # learnable in iterative process
self.eta2S = self.make_coeff(self.S, self.eta2)
# weight factor
self.alphaconst = args.alpha
self.alpha = torch.Tensor([self.alphaconst])
self.alphaS = self.make_coeff(self.S, self.alpha) # learnable in iterative process
# priornet
self.priornet = UNet(n_channels=2, n_classes=1, n_filter=32)
# proxNet for initialization
self.proxNet_X0 = CTnet(args.num_channel + 1, self.T) # args.num_channel: the number of channel concatenation 1: gray CT image
self.proxNet_S0 = Projnet(args.num_channel + 1, self.T) # args.num_channel: the number of channel concatenation 1: gray normalized sinogram
# proxNet for iterative process
self.proxNet_Xall = self.make_Xnet(self.S, args.num_channel+1, self.T)
self.proxNet_Sall = self.make_Snet(self.S, args.num_channel+1, self.T)
# Initialization S-domain by convoluting on XLI and SLI, respectively
self.CX_const = filter.expand(args.num_channel, 1, -1, -1)
self.CX = nn.Parameter(self.CX_const, requires_grad=True)
self.CS_const = filter.expand(args.num_channel, 1, -1, -1)
self.CS = nn.Parameter(self.CS_const, requires_grad=True)
self.bn = nn.BatchNorm2d(1)
def make_coeff(self, iters,const):
const_dimadd = const.unsqueeze(dim=0)
const_f = const_dimadd.expand(iters,-1)
coeff = nn.Parameter(data=const_f, requires_grad = True)
return coeff
def make_Xnet(self, iters, channel, T): #
layers = []
for i in range(iters):
layers.append(CTnet(channel, T))
return nn.Sequential(*layers)
def make_Snet(self, iters, channel, T):
layers = []
for i in range(iters):
layers.append(Projnet(channel, T))
return nn.Sequential(*layers)
def forward(self, Xma, XLI, M, Sma, SLI, Tr):
# save mid-updating results
ListS = [] # saving the reconstructed normalized sinogram
ListX = [] # saving the reconstructed CT image
ListYS = [] # saving the reconstructed sinogram
# with the channel concatenation and detachment operator (refer to https://github.com/hongwang01/RCDNet) for initializing dual-domain
XZ00 = F.conv2d(XLI, self.CX, stride=1, padding=1)
input_Xini = torch.cat((XLI, XZ00), dim=1) #channel concatenation
XZ_ini = self.proxNet_X0(input_Xini)
X0 = XZ_ini[:, :1, :, :] #channel detachment
XZ = XZ_ini[:, 1:, :, :] #auxiliary variable in image domain
X = X0 # the initialized CT image
SZ00 = F.conv2d(SLI, self.CS, stride=1, padding=1)
input_Sini = torch.cat((SLI, SZ00), dim=1)
SZ_ini = self.proxNet_S0(input_Sini)
S0 = SZ_ini[:, :1, :, :]
SZ = SZ_ini[:, 1:, :, :] # auxiliary variable in sinogram domain
S = S0 # the initialized normalized sinogram
ListS.append(S)
# PriorNet
prior_input = torch.cat((Xma, XLI), dim=1)
Xs = XLI + self.priornet(prior_input)
Y = op_modfp(F.relu(self.bn(Xs)) / 255)
Y = Y / 4.0 * 255 #normalized coefficients
# 1st iteration: Updating X0, S0-->S1
PX= op_modfp(X/255)/ 4.0 * 255
GS = Y * (Y*S - PX) + self.alphaS[0]*Tr * Tr * Y * (Y * S - Sma)
S_next = S - self.eta1S[0]/10*GS
inputS = torch.cat((S_next, SZ), dim=1)
outS = self.proxNet_Sall[0](inputS)
S = outS[:,:1,:,:] # the updated normalized sinogram at the 1th stage
SZ = outS[:,1:,:,:]
ListS.append(S)
ListYS.append(Y*S)
# 1st iteration: Updating X0, S1-->X1
ESX = PX - Y*S
GX = op_modpT((ESX/255) * 4.0)
X_next = X - self.eta2S[0] / 10 * GX
inputX = torch.cat((X_next, XZ), dim=1)
outX = self.proxNet_Xall[0](inputX)
X = outX[:, :1, :, :] # the updated CT image at the 1th stage
XZ = outX[:, 1:, :, :]
ListX.append(X)
for i in range(self.iter):
# updating S
PX = op_modfp(X / 255) / 4.0 * 255
GS = Y * (Y * S - PX) + self.alphaS[i+1] * Tr * Tr * Y * (Y * S - Sma)
S_next = S - self.eta1S[i+1] / 10 * GS
inputS = torch.cat((S_next, SZ), dim=1)
outS = self.proxNet_Sall[i+1](inputS)
S = outS[:, :1, :, :]
SZ = outS[:, 1:, :, :]
ListS.append(S)
ListYS.append(Y * S)
# updating X
ESX = PX - Y * S
GX = op_modpT((ESX / 255) * 4.0)
X_next = X - self.eta2S[i+1] / 10 * GX
inputX = torch.cat((X_next, XZ), dim=1)
outX = self.proxNet_Xall[i+1](inputX)
X = outX[:, :1, :, :]
XZ = outX[:, 1:, :, :]
ListX.append(X)
return ListX, ListS, ListYS
# proxNet_S
class Projnet(nn.Module):
def __init__(self, channel, T):
super(Projnet, self).__init__()
self.channels = channel
self.T = T
self.layer = self.make_resblock(self.T)
def make_resblock(self, T):
layers = []
for i in range(T):
layers.append(
nn.Sequential(nn.Conv2d(self.channels, self.channels, kernel_size=3, stride=1, padding=1, dilation=1),
nn.BatchNorm2d(self.channels),
nn.ReLU(),
nn.Conv2d(self.channels, self.channels, kernel_size=3, stride=1, padding=1, dilation=1),
nn.BatchNorm2d(self.channels),
))
return nn.Sequential(*layers)
def forward(self, input):
S = input
for i in range(self.T):
S = F.relu(S + self.layer[i](S))
return S
# proxNet_X
class CTnet(nn.Module):
def __init__(self, channel, T):
super(CTnet, self).__init__()
self.channels = channel
self.T = T
self.layer = self.make_resblock(self.T)
def make_resblock(self, T):
layers = []
for i in range(T):
layers.append(nn.Sequential(
nn.Conv2d(self.channels, self.channels, kernel_size=3, stride=1, padding=1, dilation=1),
nn.BatchNorm2d(self.channels),
nn.ReLU(),
nn.Conv2d(self.channels, self.channels, kernel_size=3, stride=1, padding=1, dilation=1),
nn.BatchNorm2d(self.channels),
))
return nn.Sequential(*layers)
def forward(self, input):
X = input
for i in range(self.T):
X = F.relu(X + self.layer[i](X))
return X
| 8,679 | 41.54902 | 168 | py |
InDuDoNet | InDuDoNet-main/CLINIC_metal/preprocess_clinic/utils/torch.py | """Helper functions for torch
"""
__all__ = [
"get_device", "is_cuda", "copy_model", "find_layer", "to_npy", "get_last_checkpoint",
"print_model", "save_graph", "backprop_on", "backprop_off", "add_post", "flatten_model",
"FunctionModel"]
import os
import os.path as path
import numpy as np
import torch
import torch.nn as nn
from copy import copy
from .misc import read_dir
from collections import OrderedDict
def get_device(model):
return next(model.parameters()).device
def is_cuda(model):
return next(model.parameters()).is_cuda
def copy_model(model):
"""shallow copy a model
"""
if len(list(model.children())) == 0: return model
model_ = copy(model)
model_._modules = copy(model_._modules)
for k, m in model._modules.items():
model_._modules[k] = copy_model(m)
return model_
def find_layer(module, filter_fcn):
def find_layer_(module, found):
for k, m in module.named_children():
if filter_fcn(m): found.append((module, k))
else: find_layer_(m, found)
found = []
find_layer_(module, found)
return found
class FunctionModel(nn.Module):
def __init__(self, fcn):
super(FunctionModel, self).__init__()
self.fcn = fcn
def forward(self, *inputs):
return self.fcn(*inputs)
def to_npy(*tensors, squeeze=False):
if len(tensors) == 1:
if squeeze: return tensors[0].detach().cpu().numpy().squeeze()
else: return tensors[0].detach().cpu().numpy()
else:
if squeeze: return [t.detach().cpu().numpy().squeeze() for t in tensors]
else: return [t.detach().cpu().numpy() for t in tensors]
def set_requires_grad(*nets, requires_grad=False):
for net in nets:
if net is not None:
for param in net.parameters():
param.requires_grad = requires_grad
def backprop_on(*nets): set_requires_grad(*nets, requires_grad=True)
def backprop_off(*nets): set_requires_grad(*nets, requires_grad=False)
def get_last_checkpoint(checkpoint_dir, predicate=None, pattern=None):
if predicate is None:
predicate = lambda x: x.endswith('pth') or x.endswith('pt')
checkpoints = read_dir(checkpoint_dir, predicate)
if len(checkpoints) == 0:
return "", 0
checkpoints = sorted(checkpoints, key=lambda x: path.getmtime(x))
checkpoint = checkpoints[-1]
if pattern is None:
pattern = lambda x: int(path.basename(x).split('_')[-1].split('.')[0])
return checkpoint, pattern(checkpoint)
def print_model(model): print(get_graph(model))
def save_graph(model, graph_file):
with open(graph_file, 'w') as f: f.write(get_graph(model))
def get_graph(model):
def get_graph_(model, param_cnts):
model_str = ""
if hasattr(model, 'parameters'):
model_str += model.__repr__() + "\n"
parameters = [p for p in model.parameters() if p.requires_grad]
num_parameters = sum([np.prod(p.size()) for p in parameters])
param_cnts.append(num_parameters)
else:
for k in model.__dir__():
if not k.startswith("_"):
v = getattr(model, k)
if hasattr(v, 'parameters'):
model_str += k + ":\n"
model_str += get_graph_(v, param_cnts)
return model_str
model_str = ""
param_cnts = []
model_str += '============ Model Initialized ============\n'
model_str += get_graph_(model, param_cnts)
model_str += '===========================================\n'
model_str += "Number of parameters: {:.4e}\n".format(sum(param_cnts))
return model_str
def add_post(loader, post_fcn):
class LoaderWrapper(object):
def __init__(self, loader, post_fcn):
self.loader = loader
self.post_fcn = post_fcn
def __getattribute__(self, name):
if not name.startswith("__") and name not in object.__getattribute__(self, "__dict__") :
return getattr(object.__getattribute__(self, "loader"), name)
return object.__getattribute__(self, name)
def __len__(self): return len(self.loader)
def __iter__(self):
for data in self.loader:
yield self.post_fcn(data)
return LoaderWrapper(loader, post_fcn)
def flatten_model(model):
def flatten_model_(model, output):
model_list = list(model.children())
if len(model_list) == 1: model = model_list[0]
if type(model) is nn.Sequential:
for m in model.children():
flatten_model_(m, output)
else:
output.append(model)
output = []
flatten_model_(model, output)
return nn.Sequential(*output)
| 4,786 | 28.549383 | 100 | py |
InDuDoNet | InDuDoNet-main/CLINIC_metal/preprocess_clinic/utils/__init__.py | from .misc import *
from .torch import *
from .log import Logger | 64 | 20.666667 | 23 | py |
Input-Specific-Certification | Input-Specific-Certification-main/zipdata.py | import multiprocessing
import os.path as op
from threading import local
from zipfile import ZipFile, BadZipFile
from PIL import Image
from io import BytesIO
import torch.utils.data as data
_VALID_IMAGE_TYPES = ['.jpg', '.jpeg', '.tiff', '.bmp', '.png']
class ZipData(data.Dataset):
_IGNORE_ATTRS = {'_zip_file'}
def __init__(self, path, map_file,
transform=None, target_transform=None,
extensions=None):
self._path = path
if not extensions:
extensions = _VALID_IMAGE_TYPES
self._zip_file = ZipFile(path)
self.zip_dict = {}
self.samples = []
self.transform = transform
self.target_transform = target_transform
self.class_to_idx = {}
with open(map_file, 'r') as f:
for line in iter(f.readline, ""):
line = line.strip()
if not line:
continue
cls_idx = [l for l in line.split('\t') if l]
if not cls_idx:
continue
assert len(cls_idx) >= 2, "invalid line: {}".format(line)
idx = int(cls_idx[1])
cls = cls_idx[0]
del cls_idx
at_idx = cls.find('@')
assert at_idx >= 0, "invalid class: {}".format(cls)
cls = cls[at_idx + 1:]
if cls.startswith('/'):
# Python ZipFile expects no root
cls = cls[1:]
assert cls, "invalid class in line {}".format(line)
prev_idx = self.class_to_idx.get(cls)
assert prev_idx is None or prev_idx == idx, "class: {} idx: {} previously had idx: {}".format(
cls, idx, prev_idx
)
self.class_to_idx[cls] = idx
for fst in self._zip_file.infolist():
fname = fst.filename
target = self.class_to_idx.get(fname)
if target is None:
continue
if fname.endswith('/') or fname.startswith('.') or fst.file_size == 0:
continue
ext = op.splitext(fname)[1].lower()
if ext in extensions:
self.samples.append((fname, target))
assert len(self), "No images found in: {} with map: {}".format(self._path, map_file)
def __repr__(self):
return 'ZipData({}, size={})'.format(self._path, len(self))
def __getstate__(self):
return {
key: val if key not in self._IGNORE_ATTRS else None
for key, val in self.__dict__.iteritems()
}
def __getitem__(self, index):
proc = multiprocessing.current_process()
pid = proc.pid # get pid of this process.
if pid not in self.zip_dict:
self.zip_dict[pid] = ZipFile(self._path)
zip_file = self.zip_dict[pid]
if index >= len(self) or index < 0:
raise KeyError("{} is invalid".format(index))
path, target = self.samples[index]
try:
sample = Image.open(BytesIO(zip_file.read(path))).convert('RGB')
except BadZipFile:
print("bad zip file")
return None, None
if self.transform is not None:
sample = self.transform(sample)
if self.target_transform is not None:
target = self.target_transform(target)
return sample, target
def __len__(self):
return len(self.samples)
| 3,481 | 35.270833 | 110 | py |
Input-Specific-Certification | Input-Specific-Certification-main/certify_iss.py | # evaluate a smoothed classifier on a dataset
import argparse
from time import time
from model import resnet110
from datasets import get_dataset, DATASETS, get_num_classes
import numpy as np
from scipy.stats import norm
from statsmodels.stats.proportion import proportion_confint
import torch
from tqdm import tqdm
parser = argparse.ArgumentParser(description='Certify with FastCertify')
# prepare data, model, output file
parser.add_argument("dataset", default='cifar10', help="which dataset")
parser.add_argument("base_classifier", type=str, help="path to saved pytorch model of base classifier")
parser.add_argument("sigma", type=float, help="noise hyperparameter")
parser.add_argument("outfile", type=str, help="output file")
# hyperparameters for sample size planning
parser.add_argument("--loss_type", choices=['absolute', 'relative'], help="loss type")
parser.add_argument("--max_loss", type=float, default=0.01, help="the tolerable loss of certified radius")
parser.add_argument("--batch_size", type=int, default=200, help="batch size")
parser.add_argument("--max_size", type=int, default=200, help="the maximum sample size")
parser.add_argument('--n0', type=int, default=10, help='the sample size for FastCertify')
parser.add_argument("--alpha", type=float, default=0.001, help="failure probability")
# hyperparameters for the input iteration
parser.add_argument("--skip", type=int, default=1, help="how many examples to skip")
parser.add_argument("--max", type=int, default=-1, help="stop after this many examples")
parser.add_argument("--split", choices=["train", "test"], default="test", help="train or test set")
args = parser.parse_args()
print(args)
def _sample_noise(base_classifier, num_classes, x: torch.tensor, sigma: float, batchnum: int, batch_size) -> np.ndarray:
with torch.no_grad():
counts = np.zeros(num_classes, dtype=int)
for _ in range(batchnum):
batch = x.repeat((batch_size, 1, 1, 1))
noise = torch.randn_like(batch, device='cuda') * sigma
predictions = base_classifier(batch + noise).argmax(1)
counts += _count_arr(predictions.cpu().numpy(), num_classes)
return counts
def _count_arr(arr: np.ndarray, length: int) -> np.ndarray:
counts = np.zeros(length, dtype=int)
for idx in arr:
counts[idx] += 1
return counts
def _lower_confidence_bound(NA, N, alpha: float) -> float:
return proportion_confint(NA, N, alpha=2 * alpha, method="beta")[0]
def generate_iss(loss, batch_size, upper, sigma, alpha, loss_type) -> dict:
iss = {}
max_sample_size = upper * batch_size
if loss_type=='absolute':
pre=0
for pa in list(np.arange(500 + 1) * 0.001+0.5):
iss[pa] = upper
opt_radius = sigma * norm.ppf(
_lower_confidence_bound(max_sample_size * pa, max_sample_size, alpha))
standard = opt_radius - loss
if standard <= 0:
iss[pa] = 0
else:
for num in range(pre,upper + 1):
sample_size = num * batch_size
if sigma * norm.ppf(_lower_confidence_bound(sample_size * pa, sample_size, alpha)) >= standard:
iss[pa] = num
pre=num
break
if loss_type=='relative':
for pa in list(np.arange(500 + 1) * 0.001+0.5):
iss[pa] = upper
opt_radius = sigma * norm.ppf(
_lower_confidence_bound(max_sample_size * pa, max_sample_size, alpha))
standard = opt_radius*(1- loss)
if standard <= 0:
iss[pa] = 0
else:
for num in range(upper + 1):
sample_size = num * batch_size
if sigma * norm.ppf(_lower_confidence_bound(sample_size * pa, sample_size, alpha)) >= standard:
iss[pa] = num
break
return iss
def find_opt_batchnum(iss, pa_lower, pa_upper):
list_p = list(iss.keys())
pa_lower = np.clip(pa_lower, 0.0, 1.0)
pa_upper = np.clip(pa_upper, 0.0, 1.0)
for i, p in enumerate(list_p):
if pa_lower <= p:
opt_batchnum = max(iss[list_p[max(0,i - 1)]], iss[p])
break
for i, p in enumerate(list_p):
if pa_upper <= p:
opt_batchnum = max(opt_batchnum, iss[list_p[max(0,i - 1)]], iss[p])
break
return opt_batchnum
if __name__ == "__main__":
t_start = time()
batch_size = args.batch_size
n0 = args.n0//batch_size
alpha = args.alpha
num_classes = 10
sigma = args.sigma
loss = args.max_loss
upper = args.max_size//batch_size
loss_type = args.loss_type
model = resnet110().cuda()
# load the base classifier
checkpoint = torch.load(args.base_classifier)
if checkpoint is not None:
print('==> Resuming from checkpoint..')
model.load_state_dict(checkpoint['net'])
model.eval()
# prepare output file
f = open(args.outfile, 'w')
print("idx\tlabel\tpredict\tpA\tsamplesize\tradius\tcorrect\ttime_forward", file=f, flush=True)
# iterate through the dataset
dataset = get_dataset(args.dataset, args.split)
certify_num_total = 0
sample_size_total = 0
radius_total = 0
grid = [0.25, 0.50, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0, 2.25]
cnt_grid_hard =np.zeros((len(grid) + 1,), dtype=np.int)
t1=time()
iss=generate_iss(loss, batch_size, upper, sigma, alpha, loss_type)
t2=time()
time_iss=t2-t1
for i in tqdm(range(len(dataset))):
# only certify every args.skip examples, and stop after args.max examples
if i % args.skip != 0:
continue
if i == args.max:
break
(x, label) = dataset[i]
certify_num_total += 1
t1 = time()
# certify the prediction of g around x
x = x.cuda()
counts_prediction = _sample_noise(model, num_classes, x, sigma, n0, batch_size)
# use these samples to take a guess at the top class
prediction_uncertain = counts_prediction.argmax().item()
pa_lower, pa_upper = proportion_confint(counts_prediction[prediction_uncertain].item(), n0 * batch_size, alpha,
method="beta")
# compute the optimal batchnum
opt_batchnum = find_opt_batchnum(iss, pa_lower, pa_upper)
sample_size = opt_batchnum * batch_size
# forward
if sample_size != 0:
# draw more samples of f(x + epsilon)
counts_certification = counts_prediction
counts_certification += _sample_noise(model, num_classes, x, sigma, opt_batchnum - n0, batch_size)
# use these samples to estimate a lower bound on pA
nA = counts_certification[prediction_uncertain].item()
pABar = _lower_confidence_bound(nA, sample_size, alpha)
if pABar < 0.5:
prediction = -1
radius = 0
else:
prediction = prediction_uncertain
radius = sigma * norm.ppf(pABar)
else:
pABar=pa_lower
prediction = -1
radius = 0
t2 = time()
sample_size_total += sample_size
correct = int(prediction == label)
if correct == 1:
cnt_grid_hard[0] += 1
radius_total += radius
for j in range(len(grid)):
if radius >= grid[j]:
cnt_grid_hard[j + 1] += 1
time_forward = t2 - t1
print(f"{i}\t{label}\t{prediction}\t{pABar:.3f}\t{sample_size}\t{radius:.3f}\t{correct}\t{time_forward:.3f}",
file=f, flush=True)
t_end = time()
print(f'===Certification Summary({loss_type})===', file=f, flush=True)
print(
f"image number={certify_num_total}, total time={t_end - t_start}, time_iss={time_iss}, total sample size={sample_size_total}, loss control={100 * loss:.2f}%, average radius={radius_total/certify_num_total:.3f}",
file=f, flush=True)
print('Radius: 0.0 Number: {} Acc: {}%'.format(
cnt_grid_hard[0], cnt_grid_hard[0] / certify_num_total * 100),file=f, flush=True)
for j in range(len(grid)):
print('Radius: {} Number: {} Acc: {}%'.format(
grid[j], cnt_grid_hard[j + 1], cnt_grid_hard[j + 1] / certify_num_total * 100),file=f, flush=True)
print('ACR: {}'.format(radius_total / certify_num_total), file=f, flush=True)
f.close()
| 8,532 | 38.50463 | 219 | py |
Input-Specific-Certification | Input-Specific-Certification-main/model.py | '''
ResNet110 for Cifar-10
References:
[1] K. He, X. Zhang, S. Ren, and J. Sun. Deep residual learning for image recognition. In CVPR, 2016.
[2] K. He, X. Zhang, S. Ren, and J. Sun. Identity mappings in deep residual networks. In ECCV, 2016.
'''
import torch.nn as nn
import torch.nn.functional as F
import math
def conv3x3(in_planes, out_planes, stride=1):
" 3x3 convolution with padding "
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet_Cifar(nn.Module):
def __init__(self, block, layers, width=1, num_classes=10):
super(ResNet_Cifar, self).__init__()
self.inplanes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, 16 * width, layers[0])
self.layer2 = self._make_layer(block, 32 * width, layers[1], stride=2)
self.layer3 = self._make_layer(block, 64 * width, layers[2], stride=2)
self.avgpool = nn.AvgPool2d(8, stride=1)
self.fc = nn.Linear(64 * block.expansion * width, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion)
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet110(**kwargs):
model = ResNet_Cifar(BasicBlock, [18, 18, 18], width=1, **kwargs)
return model
| 3,129 | 27.198198 | 101 | py |
Input-Specific-Certification | Input-Specific-Certification-main/datasets.py | import bisect
import os
import pickle
from PIL import Image
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, datasets
from torchvision.datasets.utils import check_integrity
from typing import *
from zipdata import ZipData
# set this environment variable to the location of your imagenet directory if you want to read ImageNet data.
# make sure your val directory is preprocessed to look like the train directory, e.g. by running this script
# https://raw.githubusercontent.com/soumith/imagenetloader.torch/master/valprep.sh
IMAGENET_LOC_ENV = "IMAGENET_DIR"
IMAGENET_ON_PHILLY_DIR = "/hdfs/public/imagenet/2012/"
# list of all datasets
DATASETS = ["imagenet", "imagenet32", "cifar10"]
def get_dataset(dataset: str, split: str) -> Dataset:
"""Return the dataset as a PyTorch Dataset object"""
if dataset == "imagenet":
if "PT_DATA_DIR" in os.environ: #running on Philly
return _imagenet_on_philly(split)
else:
return _imagenet(split)
elif dataset == "imagenet32":
return _imagenet32(split)
elif dataset == "cifar10":
return _cifar10(split)
def get_num_classes(dataset: str):
"""Return the number of classes in the dataset. """
if dataset == "imagenet":
return 1000
elif dataset == "cifar10":
return 10
def get_normalize_layer(dataset: str) -> torch.nn.Module:
"""Return the dataset's normalization layer"""
if dataset == "imagenet":
return NormalizeLayer(_IMAGENET_MEAN, _IMAGENET_STDDEV)
elif dataset == "cifar10":
return NormalizeLayer(_CIFAR10_MEAN, _CIFAR10_STDDEV)
elif dataset == "imagenet32":
return NormalizeLayer(_CIFAR10_MEAN, _CIFAR10_STDDEV)
def get_input_center_layer(dataset: str) -> torch.nn.Module:
"""Return the dataset's Input Centering layer"""
if dataset == "imagenet":
return InputCenterLayer(_IMAGENET_MEAN)
elif dataset == "cifar10":
return InputCenterLayer(_CIFAR10_MEAN)
_IMAGENET_MEAN = [0.485, 0.456, 0.406]
_IMAGENET_STDDEV = [0.229, 0.224, 0.225]
_CIFAR10_MEAN = [0.4914, 0.4822, 0.4465]
_CIFAR10_STDDEV = [0.2023, 0.1994, 0.2010]
def _cifar10(split: str) -> Dataset:
dataset_path = os.path.join('datasets', 'dataset_cache')
if split == "train":
return datasets.CIFAR10(dataset_path, train=True, download=True, transform=transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor()
]))
elif split == "test":
return datasets.CIFAR10(dataset_path, train=False, download=True, transform=transforms.ToTensor())
elif split in ["mini_labelled", "mini_unlabelled", "mini_test"]:
return HybridCifarDataset(split)
# return MiniCifarDataset(split)
else:
raise Exception("Unknown split name.")
def _imagenet_on_philly(split: str) -> Dataset:
trainpath = os.path.join(IMAGENET_ON_PHILLY_DIR, 'train.zip')
train_map = os.path.join(IMAGENET_ON_PHILLY_DIR, 'train_map.txt')
valpath = os.path.join(IMAGENET_ON_PHILLY_DIR, 'val.zip')
val_map = os.path.join(IMAGENET_ON_PHILLY_DIR, 'val_map.txt')
if split == "train":
return ZipData(trainpath, train_map,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
]))
elif split == "test":
return ZipData(valpath, val_map,
transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
]))
def _imagenet(split: str) -> Dataset:
if not IMAGENET_LOC_ENV in os.environ:
raise RuntimeError("environment variable for ImageNet directory not set")
dir = os.environ[IMAGENET_LOC_ENV]
if split == "train":
subdir = os.path.join(dir, "train")
transform = transforms.Compose([
transforms.RandomSizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor()
])
elif split == "test":
subdir = os.path.join(dir, "val")
transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor()
])
return datasets.ImageFolder(subdir, transform)
def _imagenet32(split: str) -> Dataset:
dataset_path = os.path.join('datasets', 'Imagenet32')
if split == "train":
return ImageNetDS(dataset_path, 32, train=True, transform=transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor()]))
elif split == "test":
return ImageNetDS(dataset_path, 32, train=False, transform=transforms.ToTensor())
class NormalizeLayer(torch.nn.Module):
"""Standardize the channels of a batch of images by subtracting the dataset mean
and dividing by the dataset standard deviation.
In order to certify radii in original coordinates rather than standardized coordinates, we
add the Gaussian noise _before_ standardizing, which is why we have standardization be the first
layer of the classifier rather than as a part of preprocessing as is typical.
"""
def __init__(self, means: List[float], sds: List[float]):
"""
:param means: the channel means
:param sds: the channel standard deviations
"""
super(NormalizeLayer, self).__init__()
self.means = torch.tensor(means).cuda()
self.sds = torch.tensor(sds).cuda()
def forward(self, input: torch.tensor):
(batch_size, num_channels, height, width) = input.shape
means = self.means.repeat((batch_size, height, width, 1)).permute(0, 3, 1, 2)
sds = self.sds.repeat((batch_size, height, width, 1)).permute(0, 3, 1, 2)
return (input - means)/sds
class InputCenterLayer(torch.nn.Module):
"""Centers the channels of a batch of images by subtracting the dataset mean.
In order to certify radii in original coordinates rather than standardized coordinates, we
add the Gaussian noise _before_ standardizing, which is why we have standardization be the first
layer of the classifier rather than as a part of preprocessing as is typical.
"""
def __init__(self, means: List[float]):
"""
:param means: the channel means
:param sds: the channel standard deviations
"""
super(InputCenterLayer, self).__init__()
self.means = torch.tensor(means).cuda()
def forward(self, input: torch.tensor):
(batch_size, num_channels, height, width) = input.shape
means = self.means.repeat((batch_size, height, width, 1)).permute(0, 3, 1, 2)
return input - means
# from https://github.com/hendrycks/pre-training
class ImageNetDS(Dataset):
"""`Downsampled ImageNet <https://patrykchrabaszcz.github.io/Imagenet32/>`_ Datasets.
Args:
root (string): Root directory of dataset where directory
``ImagenetXX_train`` exists.
img_size (int): Dimensions of the images: 64,32,16,8
train (bool, optional): If True, creates dataset from training set, otherwise
creates from test set.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
"""
base_folder = 'Imagenet{}_train'
train_list = [
['train_data_batch_1', ''],
['train_data_batch_2', ''],
['train_data_batch_3', ''],
['train_data_batch_4', ''],
['train_data_batch_5', ''],
['train_data_batch_6', ''],
['train_data_batch_7', ''],
['train_data_batch_8', ''],
['train_data_batch_9', ''],
['train_data_batch_10', '']
]
test_list = [
['val_data', ''],
]
def __init__(self, root, img_size, train=True, transform=None, target_transform=None):
self.root = os.path.expanduser(root)
self.transform = transform
self.target_transform = target_transform
self.train = train # training set or test set
self.img_size = img_size
self.base_folder = self.base_folder.format(img_size)
# if not self._check_integrity():
# raise RuntimeError('Dataset not found or corrupted.') # TODO
# now load the picked numpy arrays
if self.train:
self.train_data = []
self.train_labels = []
for fentry in self.train_list:
f = fentry[0]
file = os.path.join(self.root, self.base_folder, f)
with open(file, 'rb') as fo:
entry = pickle.load(fo)
self.train_data.append(entry['data'])
self.train_labels += [label - 1 for label in entry['labels']]
self.mean = entry['mean']
self.train_data = np.concatenate(self.train_data)
self.train_data = self.train_data.reshape((self.train_data.shape[0], 3, 32, 32))
self.train_data = self.train_data.transpose((0, 2, 3, 1)) # convert to HWC
else:
f = self.test_list[0][0]
file = os.path.join(self.root, f)
fo = open(file, 'rb')
entry = pickle.load(fo)
self.test_data = entry['data']
self.test_labels = [label - 1 for label in entry['labels']]
fo.close()
self.test_data = self.test_data.reshape((self.test_data.shape[0], 3, 32, 32))
self.test_data = self.test_data.transpose((0, 2, 3, 1)) # convert to HWC
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
if self.train:
img, target = self.train_data[index], self.train_labels[index]
else:
img, target = self.test_data[index], self.test_labels[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
if self.train:
return len(self.train_data)
else:
return len(self.test_data)
def _check_integrity(self):
root = self.root
for fentry in (self.train_list + self.test_list):
filename, md5 = fentry[0], fentry[1]
fpath = os.path.join(root, self.base_folder, filename)
if not check_integrity(fpath, md5):
return False
return True
## To use this dataset, please contact the authors of https://arxiv.org/pdf/1905.13736.pdf
# to get access to this pickle file (ti_top_50000_pred_v3.1.pickle) containing the dataset.
class TiTop50KDataset(Dataset):
"""500K images closest to the CIFAR-10 dataset from
the 80 Millon Tiny Images Datasets"""
def __init__(self):
super(TiTop50KDataset, self).__init__()
dataset_path = os.path.join('datasets', 'ti_top_50000_pred_v3.1.pickle')
self.dataset_dict = pickle.load(open(dataset_path,'rb'))
#{'data', 'extrapolated_targets', 'ti_index',
# 'prediction_model', 'prediction_model_epoch'}
self.length = len(self.dataset_dict['data'])
self.transforms = transforms.Compose([
transforms.Resize((32,32)),
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor()
])
def __getitem__(self, index):
img = self.dataset_dict['data'][index]
target = self.dataset_dict['extrapolated_targets'][index]
img = Image.fromarray(img)
img = self.transforms(img)
return img, target
def __len__(self):
return self.length
class MultiDatasetsDataLoader(object):
"""Dataloader to alternate between batches from multiple dataloaders
"""
def __init__(self, task_data_loaders, equal_num_batch=True, start_iteration=0):
if equal_num_batch:
lengths = [len(task_data_loaders[0]) for i,_ in enumerate(task_data_loaders)]
else:
lengths = [len(data_loader) for data_loader in task_data_loaders]
self.task_data_loaders = task_data_loaders
self.start_iteration = start_iteration
self.length = sum(lengths)
self.dataloader_indices = np.hstack([
np.full(task_length, loader_id)
for loader_id, task_length in enumerate(lengths)
])
def __iter__(self):
self.task_data_iters = [iter(data_loader)
for data_loader in self.task_data_loaders]
self.cur_idx = self.start_iteration
# synchronizing the task sequence on each of the worker processes
# for distributed training. The data will still be different, but
# will come from the same task on each GPU.
# np.random.seed(22)
np.random.shuffle(self.dataloader_indices)
# np.random.seed()
return self
def __next__(self):
if self.cur_idx == len(self.dataloader_indices):
raise StopIteration
loader_id = self.dataloader_indices[self.cur_idx]
self.cur_idx += 1
return next(self.task_data_iters[loader_id]), loader_id
next = __next__ # Python 2 compatibility
def __len__(self):
return self.length
@property
def num_tasks(self):
return len(self.task_data_iters)
| 14,566 | 36.836364 | 109 | py |
ENCAS | ENCAS-main/evaluate.py | import time
from collections import defaultdict
import json
import torch
import numpy as np
from ofa.imagenet_classification.elastic_nn.utils import set_running_statistics
from networks.attentive_nas_dynamic_model import AttentiveNasDynamicModel
from networks.ofa_mbv3_my import OFAMobileNetV3My
from networks.proxyless_my import OFAProxylessNASNetsMy
from run_manager import get_run_config
from ofa.imagenet_classification.elastic_nn.modules.dynamic_op import DynamicSeparableConv2d
DynamicSeparableConv2d.KERNEL_TRANSFORM_MODE = 1
from run_manager.run_manager_my import RunManagerMy
from utils import set_seed, get_net_info, SupernetworkWrapper
def evaluate_many_configs(supernet_folder_path, configs, if_test=False, config_msunas=None, **kwargs):
accs = []
args = {k: v[0] for k, v in default_kwargs.items()}
if config_msunas is not None:
for key in ['data', 'dataset', 'n_classes', 'trn_batch_size', 'vld_batch_size',
'vld_size', 'n_workers', 'sec_obj']:
args[key] = config_msunas[key]
args['pass_subnet_config_directly'] = True
args['test'] = if_test
args['cutout_size'] = config_msunas.get('cutout_size', 32)
args['reset_running_statistics'] = True
args.update(kwargs)
if 'thresholds' not in args:
args['thresholds'] = None
info_keys_to_return = []
if 'info_keys_to_return' in kwargs:
info_keys_to_return = kwargs['info_keys_to_return']
info_keys_to_return_2_values = defaultdict(list)
args['info_keys_to_return'] = info_keys_to_return
args['supernet_path'] = supernet_folder_path
args['search_space_name'] = kwargs['search_space_name']
args['ensemble_ss_names'] = kwargs['ensemble_ss_names']
# a hack for speed: reuse run_config for all the subnetworks evaluated
# it should change nothing because the subnet architecture is the only
# thing changing in the _for_ loop
run_config = kwargs.get('run_config', None)
for config in configs:
args['subnet'] = config
args['run_config'] = run_config
info = _evaluate_one_config(args)
top1_error = info['top1']
run_config = info['run_config']
accs.append(top1_error)
for key in info_keys_to_return:
info_keys_to_return_2_values[key].append(info[key])
if len(info_keys_to_return) > 0:
return accs, info_keys_to_return_2_values
return accs
def _evaluate_one_config(args):
set_seed(args['random_seed'])
preproc_alphanet = False
if args['pass_subnet_config_directly']:
config = args['subnet']
else:
config = json.load(open(args['subnet']))
if args['search_space_name'] == 'reproduce_nat':
if config['w'] == 1.0:
evaluator = SupernetworkWrapper(n_classes=args['n_classes'], model_path=args['supernet_path'][0],
engine_class_to_use=OFAMobileNetV3My, dataset=args['dataset'],
search_space_name='ofa')
else:
evaluator = SupernetworkWrapper(n_classes=args['n_classes'], model_path=args['supernet_path'][1],
engine_class_to_use=OFAMobileNetV3My, dataset=args['dataset'],
search_space_name='ofa')
subnet, _ = evaluator.sample(config)
subnet = subnet.cuda()
resolution = config['r']
elif args['search_space_name'] == 'ensemble':
ensemble_ss_names = args['ensemble_ss_names']
supernet_paths = args['supernet_path']
ss_name_to_class = {'alphanet': AttentiveNasDynamicModel, 'ofa': OFAMobileNetV3My,
'proxyless': OFAProxylessNASNetsMy}
# some ensembles have missing members which are represented by config that is None
# for ENCAS, also need to remove thresholds
if args['thresholds'] is None:
filtered = filter(lambda conf_p_e: conf_p_e[0] is not None, zip(config, supernet_paths, ensemble_ss_names))
config, supernet_paths, ensemble_ss_names = list(zip(*filtered))
else:
filtered = filter(lambda conf_p_e_t: conf_p_e_t[0] is not None, zip(config, supernet_paths, ensemble_ss_names, args['thresholds']))
config, supernet_paths, ensemble_ss_names, thresholds = list(zip(*filtered))
args['thresholds'] = thresholds
print(f'{supernet_paths=}')
classes_to_use = [ss_name_to_class[ss_name] for ss_name in ensemble_ss_names]
evaluators = [SupernetworkWrapper(n_classes=args['n_classes'], model_path=supernet_path,
engine_class_to_use=encoder_class, dataset=args['dataset'],
search_space_name=ss_name)
for supernet_path, ss_name, encoder_class in zip(supernet_paths, ensemble_ss_names, classes_to_use)]
subnet = [e.sample(c)[0] for e, c in zip(evaluators, config)]
resolution = [conf['r'] for conf in config]
# If normal ENCAS, thresholds are already provided. Otherwise:
if args['thresholds'] is None:
if 'threshold' in config[0]: # ENCAS-joint
# (but the condition is also satisfied if ENCAS was run on subnets extracted from ENCAS-joint)
# (that was a bug, now ENCAS won't execute this code no matter which subnets it uses)
thresholds = [c['threshold'] for c in config]
positions = [c['position'] for c in config]
idx = np.argsort(positions)[::-1]
# don' need to sort positions_list itself?
thresholds = np.array(thresholds)[idx].tolist()
resolution = np.array(resolution)[idx].tolist()
subnet = np.array(subnet)[idx].tolist()
args['thresholds'] = thresholds
else: # not a cascade => can rearrange order
idx = np.argsort(resolution)[::-1]
resolution = np.array(resolution)[idx].tolist()
subnet = np.array(subnet)[idx].tolist()
preproc_alphanet ='alphanet' in ensemble_ss_names
return _evaluate_one_model(
subnet, data_path=args['data'], dataset=args['dataset'], resolution=resolution,
trn_batch_size=args['trn_batch_size'], vld_batch_size=args['vld_batch_size'], num_workers=args['n_workers'],
valid_size=args['vld_size'], is_test=args['test'], measure_latency=args['latency'],
no_logs=(not args['verbose']), reset_running_statistics=args['reset_running_statistics'],
run_config=args.get('run_config', None), sec_obj=args['sec_obj'],
info_keys_to_return=args['info_keys_to_return'], cutout_size=args['cutout_size'], thresholds=args['thresholds'],
if_use_logit_gaps=args['if_use_logit_gaps'], preproc_alphanet=preproc_alphanet)
def _evaluate_one_model(subnet, data_path, dataset='imagenet', resolution=224, trn_batch_size=128,
vld_batch_size=250, num_workers=4, valid_size=None, is_test=True,
measure_latency=None, no_logs=False, reset_running_statistics=True,
run_config=None, sec_obj='flops', info_keys_to_return=(), cutout_size=None, thresholds=None, if_use_logit_gaps=False,
preproc_alphanet=False):
info = get_net_info(subnet, (3, resolution, resolution), measure_latency=measure_latency,
print_info=False, clean=True, if_dont_sum=thresholds is not None)
print(f"{info['flops']=}")
if_return_logit_gaps = 'logit_gaps' in info_keys_to_return
validation_kwargs = {'if_return_outputs': 'output_distr' in info_keys_to_return,
'if_return_logit_gaps': if_return_logit_gaps}
resolution_is_list = type(resolution) is list
if resolution_is_list:
validation_kwargs['resolutions_list'] = resolution_list = resolution
resolution = max(resolution) # collators need max resolution; will downsample in the val loop
# Actually, collators need the first resolution, which in a cascade won't be the largest one
validation_kwargs['thresholds'] = thresholds
if thresholds is not None:
resolution = resolution_list[0]
validation_kwargs['if_use_logit_gaps'] = if_use_logit_gaps
if run_config is None:
run_config = get_run_config(dataset=dataset, data_path=data_path, image_size=resolution, n_epochs=0,
train_batch_size=trn_batch_size, test_batch_size=vld_batch_size, n_worker=num_workers,
valid_size=valid_size, total_epochs=0, dataset_name=dataset, cutout_size=cutout_size,
preproc_alphanet=preproc_alphanet)
data_provider = run_config.data_provider
data_provider.collator_train.set_resolutions([resolution])
data_provider.collator_subtrain.set_resolutions([resolution])
run_config.valid_loader.collate_fn.set_resolutions([resolution])
run_config.test_loader.collate_fn.set_resolutions([resolution])
data_provider.assign_active_img_size(resolution)
run_manager = RunManagerMy(subnet, run_config, no_gpu=False, sec_obj=sec_obj)
if reset_running_statistics:
# same subset size & batch size as during evaluation in training
if not run_manager.is_ensemble:
data_provider.collator_subtrain.set_resolutions([resolution])
data_loader = run_config.random_sub_train_loader(2304 * 6, vld_batch_size, resolution)
set_running_statistics(subnet, data_loader)
else:
for i_net, net_cur in enumerate(subnet):
print(f'Resetting BNs for network {i_net}')
st = time.time()
data_provider.collator_subtrain.set_resolutions([resolution_list[i_net]])
mul_due_to_logit_gaps = 6 # logit gaps differ a lot when comparing 3 and 1
data_loader = run_config.random_sub_train_loader(2304 * mul_due_to_logit_gaps, vld_batch_size,
resolution_list[i_net])
net_cur.cuda()
if hasattr(net_cur, 'reset_running_stats_for_calibration'): # alphanet & attentiveNAS
with torch.no_grad(), torch.cuda.amp.autocast():
net_cur.set_bn_param(0.1, 1e-5)
net_cur.eval()
net_cur.reset_running_stats_for_calibration()
for images, _ in data_loader:
images = images.cuda(non_blocking=True)
out = net_cur(images)
images.cpu(), out.cpu()
del images, out
else:
set_running_statistics(net_cur, data_loader)
ed = time.time()
print(f'BN resetting time for {i_net}: {ed - st}')
print('BNs reset')
loss, dict_of_metrics = run_manager.validate(net=subnet, is_test=is_test, no_logs=no_logs, **validation_kwargs)
top1 = dict_of_metrics['top1']
info['loss'], info['top1'] = loss, top1
if thresholds is not None:
n_not_predicted_per_stage = dict_of_metrics['n_not_predicted_per_stage']
flops_per_stage = info['flops']
if is_test:
data_loader = run_config.test_loader
else:
data_loader = run_config.valid_loader
n_images_total = len(data_loader.dataset)
print(f'{n_images_total=}, {n_not_predicted_per_stage=}')
true_flops = flops_per_stage[0] + sum([n_not_predicted / n_images_total * flops for (n_not_predicted, flops) in
zip(n_not_predicted_per_stage, flops_per_stage[1:])])
info['flops'] = true_flops
print(f'{thresholds=}')
print(info)
info['run_config'] = run_config # a hack
for k in info_keys_to_return:
if k not in info:
info[k] = dict_of_metrics[k]
return info
# these are mostly irrelevant, will be overwritten. TODO: remove
default_kwargs = {
'n_gpus': [1, 'total number of available gpus'],
'gpu': [1, 'number of gpus per evaluation job'],
'data': ['/export/scratch3/aleksand/data/CIFAR/', 'location of the data corpus'],
'dataset': ['cifar10', 'name of the dataset [imagenet, cifar10, cifar100, ...]'],
'n_classes': [10, 'number of classes of the given dataset'],
'n_workers': [8, 'number of workers for dataloaders'],
'vld_size': [5000, 'validation set size, randomly sampled from training set'],
'trn_batch_size': [96, 'train batch size for training'],
'vld_batch_size': [96, 'validation batch size'],
'n_epochs': [0, 'n epochs to train'],
'drop_rate': [0.2, 'dropout rate'],
'drop_connect_rate': [0.0, ''],
'resolution': [224, 'resolution'],
'supernet_path': ['/export/scratch3/aleksand/nsganetv2/data/ofa_mbv3_d234_e346_k357_w1.0',
'path to supernet'],
'subnet': ['', 'location of a json file of ks, e, d, and e'],
'pass_subnet_config_directly': [False, 'Pass config as object instead of file path'],
'config': ['', 'location of a json file of specific model declaration; not relevant for me'],
'init': [None, 'location of initial weight to load'],
'test': [False, 'if evaluate on test set'],
'verbose': [True, ''],
'save': [None, ''],
'reset_running_statistics': [False, 'reset_running_statistics for BN'],
'latency': [None, 'latency measurement settings (gpu64#cpu)'],
'random_seed': [42, 'random seed'],
'teacher_model': [None, ''],
} | 13,651 | 49.940299 | 143 | py |
ENCAS | ENCAS-main/nat.py | import itertools
import os
import time
from concurrent.futures.process import ProcessPoolExecutor
from pathlib import Path
import torch
import torch.nn.functional as F
import torchvision.transforms.functional
from torch.cuda.amp import GradScaler
from ofa.utils import AverageMeter, accuracy
from tqdm import tqdm
from matplotlib import pyplot as plt
import utils
from networks.ofa_mbv3_my import OFAMobileNetV3My
from run_manager import get_run_config
from ofa.imagenet_classification.elastic_nn.utils import set_running_statistics
from networks.attentive_nas_dynamic_model import AttentiveNasDynamicModel
from networks.proxyless_my import OFAProxylessNASNetsMy
from utils import validate_config, get_net_info
from searcher_wrappers.mo_gomea_wrapper import MoGomeaWrapper
from searcher_wrappers.nsga3_wrapper import Nsga3Wrapper
from searcher_wrappers.random_search_wrapper import RandomSearchWrapper
import subset_selectors
import gc
from filelock import FileLock
import dill
from utils_train import CutMixCrossEntropyLoss, LabelSmoothing
os.environ['MKL_THREADING_LAYER'] = 'GNU'
import json
import shutil
import numpy as np
from utils import get_correlation, alphabet_dict, get_metric_complement, setup_logging
from search_space import OFASearchSpace
from search_space.ensemble_ss import EnsembleSearchSpace
from acc_predictor.factory import get_acc_predictor
from pymoo.visualization.scatter import Scatter
plt.rcParams.update({'font.size': 16})
from collections import defaultdict
from utils import set_seed
import re
import yaml
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.enabled = True
class NAT:
def __init__(self, kwargs):
kwargs_copy = dict(kwargs)
plt.rcParams['axes.grid'] = True
def get_from_kwargs_or_default_kwargs(key_name):
return kwargs.pop(key_name, default_kwargs[key_name][0])
self.random_seed = kwargs.pop('random_seed', default_kwargs['random_seed'][0])
set_seed(self.random_seed)
# 1. search space & alphabets
self.search_space_name = kwargs.pop('search_space', default_kwargs['search_space'][0])
search_goal = kwargs.pop('search_goal', default_kwargs['search_goal'][0])
self.if_cascade = search_goal == 'cascade'
self.ensemble_ss_names = kwargs.pop('ensemble_ss_names', default_kwargs['ensemble_ss_names'][0])
alphabet_names = kwargs.pop('alphabet', default_kwargs['alphabet'][0])
alphabet_paths = [alphabet_dict[alphabet_name] for alphabet_name in alphabet_names]
if self.search_space_name == 'ensemble':
self.search_space = EnsembleSearchSpace(self.ensemble_ss_names,
[{'alphabet': alphabet_name, 'ensemble_size': len(alphabet_names)}
for alphabet_name in alphabet_names])
self.alphabets = []
self.alphabets_lb = []
for alphabet_path in alphabet_paths:
with open(alphabet_path, 'r') as f:
self.alphabets.append(list(map(int, f.readline().split(' '))))
with open(alphabet_path.replace('.txt', '_lb.txt'), 'r') as f:
self.alphabets_lb.append(list(map(int, f.readline().split(' ')))) #lower bound
# combined alphabets
self.alphabet = list(itertools.chain(*self.alphabets))
self.alphabet_lb = list(itertools.chain(*self.alphabets_lb))
elif self.search_space_name == 'reproduce_nat':
assert len(alphabet_names) == 2
assert alphabet_names[0] == alphabet_names[1]
alphabet_path = alphabet_paths[0]
alphabet_name = alphabet_names[0]
self.search_space = OFASearchSpace(alphabet=alphabet_name)
with open(alphabet_path, 'r') as f:
self.alphabet = list(map(int, f.readline().split(' ')))
with open(alphabet_path.replace('.txt', '_lb.txt'), 'r') as f:
self.alphabet_lb = list(map(int, f.readline().split(' '))) # lower bound
# 2. save & log
self.path_logs = kwargs.pop('path_logs', default_kwargs['path_logs'][0])
self.resume = kwargs.pop('resume', default_kwargs['resume'][0])
if self.resume is not None:
self.resume = os.path.join(self.path_logs, self.resume)
save_name = kwargs.pop('experiment_name', default_kwargs['experiment_name'][0])
self.path_logs = os.path.join(self.path_logs, save_name)
Path(self.path_logs).mkdir(exist_ok=True)
self.log_file_path = os.path.join(self.path_logs, '_log.txt')
setup_logging(self.log_file_path)
print(f'{self.path_logs=}')
# 3. copy pre-trained supernets
supernet_paths = kwargs.pop('supernet_path', default_kwargs['supernet_path'][0])
print(f'{supernet_paths=}')
supernet_paths_true = []
for supernet_path in supernet_paths:
# try:
shutil.copy(supernet_path, self.path_logs)
# except:
# pass
supernet_paths_true.append(os.path.join(self.path_logs, os.path.basename(supernet_path)))
self.supernet_paths = supernet_paths_true
# 4. data
trn_batch_size = get_from_kwargs_or_default_kwargs('trn_batch_size')
vld_batch_size = get_from_kwargs_or_default_kwargs('vld_batch_size')
n_workers = get_from_kwargs_or_default_kwargs('n_workers')
vld_size = get_from_kwargs_or_default_kwargs('vld_size')
total_size = get_from_kwargs_or_default_kwargs('total_size')
data_path = get_from_kwargs_or_default_kwargs('data')
init_lr = get_from_kwargs_or_default_kwargs('init_lr')
lr_schedule_type = kwargs.pop('lr_schedule_type', default_kwargs['lr_schedule_type'][0])
cutout_size = kwargs.pop('cutout_size', default_kwargs['cutout_size'][0])
weight_decay = kwargs.pop('weight_decay', default_kwargs['weight_decay'][0])
if_center_crop = kwargs.pop('if_center_crop', default_kwargs['if_center_crop'][0])
auto_augment = kwargs.pop('auto_augment', default_kwargs['auto_augment'][0])
resize_scale = kwargs.pop('resize_scale', default_kwargs['resize_scale'][0])
if_cutmix = kwargs.pop('if_cutmix', default_kwargs['if_cutmix'][0])
self.iterations = kwargs.pop('iterations', default_kwargs['iterations'][0])
self.dataset = kwargs.pop('dataset', default_kwargs['dataset'][0])
self.n_epochs = kwargs.pop('n_epochs', default_kwargs['n_epochs'][0])
# in order not to pickle "self", create variables without it:
dataset, n_epochs, iterations, ensemble_ss_names = self.dataset, self.n_epochs, self.iterations, self.ensemble_ss_names
self.run_config_lambda = lambda: get_run_config(
dataset=dataset, data_path=data_path, image_size=256,
n_epochs=n_epochs, train_batch_size=trn_batch_size, test_batch_size=vld_batch_size,
n_worker=n_workers, valid_size=vld_size, total_size=total_size, dataset_name=dataset,
total_epochs=(iterations + 1) * n_epochs, lr_schedule_type=lr_schedule_type,
weight_decay=weight_decay, init_lr=init_lr, cutout_size=cutout_size, if_center_crop=if_center_crop,
auto_augment=auto_augment, resize_scale=resize_scale, if_cutmix=if_cutmix,
preproc_alphanet='alphanet' in ensemble_ss_names # needed only for imagenet
)
# 5. search algorithm
run_config = self.run_config_lambda() # need to create run_config here just to get the number of classes
self.n_classes = run_config.data_provider.n_classes
gomea_exe_path = get_from_kwargs_or_default_kwargs('gomea_exe')
search_algo = kwargs.pop('search_algo', default_kwargs['search_algo'][0])
assert search_algo in ['nsga3', 'mo-gomea', 'random']
search_algo_class = {'nsga3': Nsga3Wrapper, 'mo-gomea': MoGomeaWrapper,
'random': RandomSearchWrapper}[search_algo]
init_with_nd_front_size = kwargs.pop('init_with_nd_front_size', default_kwargs['init_with_nd_front_size'][0])
n_surrogate_evals = kwargs.pop('n_surrogate_evals', default_kwargs['n_surrogate_evals'][0])
self.sec_obj = kwargs.pop('sec_obj', default_kwargs['sec_obj'][0])
self.if_add_archive_to_candidates = get_from_kwargs_or_default_kwargs('add_archive_to_candidates')
self.search_wrapper = search_algo_class(self.search_space, self.sec_obj, self.path_logs,
self.n_classes, self.supernet_paths,
n_surrogate_evals, self.if_add_archive_to_candidates,
alphabet=self.alphabet, alphabet_path=alphabet_paths, alphabet_name=alphabet_names,
init_with_nd_front_size=init_with_nd_front_size, gomea_exe_path=gomea_exe_path,
n_image_channels=3,
dataset=self.dataset, search_space_name=self.search_space_name,
alphabet_lb=self.alphabet_lb, ensemble_ss_names=self.ensemble_ss_names)
subset_selector_name = kwargs.pop('subset_selector', default_kwargs['subset_selector'][0])
archive_size = kwargs.pop('n_iter', default_kwargs['n_iter'][0])
self.subset_selector = subset_selectors.create_subset_selector(subset_selector_name, archive_size)
# 6. create lambdas for creating supernets (engines)
# Why lambdas? Because they can be used multiple times and in subprocesses to create engines
# with the same setup (but loaded weights will be different because I'll be overwriting save files)
self.create_engine_lambdas = []
ss_name_to_class = {'alphanet': AttentiveNasDynamicModel, 'ofa': OFAMobileNetV3My,
'proxyless': OFAProxylessNASNetsMy}
use_gradient_checkpointing = get_from_kwargs_or_default_kwargs('use_gradient_checkpointing')
for ss_name in self.ensemble_ss_names:
class_to_use = ss_name_to_class[ss_name]
self.create_engine_lambdas.append(NAT.make_lambda_for_engine_creation(class_to_use, self.n_classes,
use_gradient_checkpointing,
self.dataset, ss_name))
# 7. loss functions
label_smoothing = kwargs.pop('label_smoothing', default_kwargs['label_smoothing'][0])
if label_smoothing == 0.0:
if if_cutmix:
self.train_criterion = CutMixCrossEntropyLoss()
else:
self.train_criterion = torch.nn.CrossEntropyLoss()
self.val_criterion = torch.nn.CrossEntropyLoss()
else:
assert not if_cutmix
print(f'Using label smoothing with coefficient == {label_smoothing}')
self.train_criterion = LabelSmoothing(label_smoothing)
self.val_criterion = LabelSmoothing(label_smoothing)
# 8. used later
self.initial_sample_size = kwargs.pop('n_doe', default_kwargs['n_doe'][0])
self.predictor = kwargs.pop('predictor', default_kwargs['predictor'][0])
self.n_warmup_epochs = kwargs.pop('n_warmup_epochs', default_kwargs['n_warmup_epochs'][0])
self.if_amp = get_from_kwargs_or_default_kwargs('if_amp')
self.rbf_ensemble_size = kwargs.pop('rbf_ensemble_size', default_kwargs['rbf_ensemble_size'][0])
self.if_check_duplicates = not kwargs.pop('dont_check_duplicates', default_kwargs['dont_check_duplicates'][0])
self.if_sample_configs_to_train = get_from_kwargs_or_default_kwargs('sample_configs_to_train')
self.store_checkpoint_freq = kwargs.pop('store_checkpoint_freq', default_kwargs['store_checkpoint_freq'][0])
self.get_scalar_from_accuracy = lambda acc: acc[0].item()
self.lock = FileLock(os.path.join(str(Path(self.path_logs).parents[1]),
f'gpu_{os.environ["CUDA_VISIBLE_DEVICES"].replace(",", "_")}.lock'))
# 9. save config
with open(os.path.join(self.path_logs, 'config_msunas.yml'), 'w') as f:
yaml.dump(kwargs_copy, f)
def search(self):
worst_top1_err, worst_flops = 40, 4000
ref_pt = np.array([worst_top1_err, worst_flops])
archive, first_iteration = self.create_or_restore_archive(ref_pt)
for it in range(first_iteration, self.iterations + 1):
archive, *_ = self.search_step(archive, it, ref_pt)
def search_step(self, archive, it, ref_pt):
acc_predictor, pred_for_archive = self.fit_surrogate(archive, self.alphabet, self.alphabet_lb)
candidates, pred_for_candidates = self.surrogate_search(archive, acc_predictor, it=it)
objs_evaluated = self.train_and_evaluate(candidates, it)
candidates_top1_err, candidates_complexity = objs_evaluated[0], objs_evaluated[1]
# correlation for accuracy
rmse, rho, tau = get_correlation(np.hstack((pred_for_archive[:, 0], pred_for_candidates[:, 0])),
np.array([x[1] for x in archive] + candidates_top1_err))
# correlation for flops
if self.if_cascade:
_, rho_flops, _ = get_correlation(np.hstack((pred_for_archive[:, 1], pred_for_candidates[:, 1])),
np.array([x[2] for x in archive] + candidates_complexity))
print(f'{rho_flops=}')
candidates_with_objs = []
for member in zip(candidates, *objs_evaluated):
candidates_with_objs.append(member)
if self.if_add_archive_to_candidates:
archive = candidates_with_objs # because archive was added to candidates in self.surrogate_search
else:
archive += candidates_with_objs # because candidates don't include archive
hv = utils.compute_hypervolume(ref_pt, np.column_stack(list(zip(*archive))[1:3]))
hv_candidates = utils.compute_hypervolume(ref_pt, np.column_stack(list(zip(*candidates_with_objs))[1:3]))
print(f'\nIter {it}: hv = {hv:.2f}')
print(f"fitting {self.predictor}: RMSE = {rmse:.4f}, Spearman's Rho = {rho:.4f}, Kendall’s Tau = {tau:.4f}")
with open(os.path.join(self.path_logs, 'iter_{}.stats'.format(it)), 'w') as handle:
json.dump({'archive': archive, 'candidates': candidates_with_objs, 'hv': hv, 'hv_candidates': hv_candidates,
'surrogate': {'model': self.predictor, 'name': acc_predictor.name, 'winner': acc_predictor.name,
'rmse': rmse, 'rho': rho, 'tau': tau}}, handle)
self.plot_archive(archive, candidates_top1_err, candidates, candidates_complexity, it, pred_for_candidates)
return archive, {'acc_val_max': get_metric_complement(np.min([x[1] for x in archive]))}
def create_or_restore_archive(self, ref_pt):
if self.resume:
# loads the full archive, not just the candidates of the latest iteration
data = json.load(open(self.resume))
iter = re.search('(\d+)(?!.*\d)', self.resume)[0] # last number in the name
archive, first_iteration = data['archive'], int(iter)
if first_iteration == 0:
# MO-GOMEA needs the archive of previous iteration => copy it into the folder of the current run
try:
shutil.copy(self.resume, self.path_logs)
except shutil.SameFileError:
pass
first_iteration += 1
else:
archive = []
arch_doe = self.search_space.initialize(self.initial_sample_size)
if self.n_warmup_epochs > 0:
print(f'Warmup: train for {self.n_warmup_epochs} epochs')
self.lock.acquire()
st = time.time()
self._train(arch_doe, -1, n_epochs=self.n_warmup_epochs, if_warmup=True)
ed = time.time()
print(f'Train time = {ed - st}')
self.lock.release()
objs_evaluated = self.train_and_evaluate(arch_doe, 0)
for member in zip(arch_doe, *objs_evaluated):
archive.append(member)
hv = utils.compute_hypervolume(ref_pt, np.column_stack(list(zip(*archive))[1:3]))
with open(os.path.join(self.path_logs, 'iter_0.stats'), 'w') as handle:
json.dump({'archive': archive, 'candidates': [], 'hv': hv, 'hv_candidates': hv,
'surrogate': {}}, handle)
first_iteration = 1
return archive, first_iteration
def fit_surrogate(self, archive, alphabet, alphabet_lb):
if 'rbf_ensemble_per_ensemble_member' not in self.predictor:
inputs = np.array([self.search_space.encode(x[0]) for x in archive])
targets = np.array([x[1] for x in archive])
print(len(inputs), len(inputs[0]))
assert len(inputs) > len(inputs[0]), '# of training samples have to be > # of dimensions'
inputs_additional = {}
else:
inputs = list(zip(*[self.search_space.encode(x[0], if_return_separate=True) for x in archive]))
inputs = [np.array(i) for i in inputs]
targets = {}
metric_per_member = list(zip(*[x[-1][0] for x in archive]))
targets['metrics_sep'] = [np.array(x) for x in metric_per_member]
targets['flops_cascade'] = np.array([x[2] for x in archive])
inputs_additional = {}
flops_per_member = list(zip(*[x[-1][1] for x in archive]))
flops_per_member = [np.array(x) for x in flops_per_member]
flops_per_member = np.array(flops_per_member, dtype=np.int).T
inputs_for_flops = [i[:, -2:] for i in inputs]
inputs_for_flops = np.concatenate(inputs_for_flops, axis=1)
inputs_for_flops = np.hstack((inputs_for_flops, flops_per_member)) # n_samples, (ensemble_size*3) // because positions, thresholds, flops for each member
inputs_additional['inputs_for_flops'] = inputs_for_flops
inputs_for_flops_alphabet = np.concatenate([a[-2:] for a in self.alphabets] + [[2000] * len(self.alphabets)]) # for flops: they shouldn't be bigger than 2000
inputs_for_flops_alphabet_lb = np.concatenate([a[-2:] for a in self.alphabets_lb] + [[0] * len(self.alphabets)])
inputs_additional['inputs_for_flops_alphabet'] = inputs_for_flops_alphabet
inputs_additional['inputs_for_flops_alphabet_lb'] = inputs_for_flops_alphabet_lb
print(len(inputs), len(inputs[0]), len(inputs[0][0]))
assert len(inputs[0]) > max([len(x) for x in inputs[0]]), '# of training samples have to be > # of dimensions'
if 'combo' in self.predictor:
targets['metrics_ens'] = np.array([x[1] for x in archive])
if self.search_space_name == 'reproduce_nat':
# NAT uses only 100 out of 300 archs to fit the predictor
# we can use the same subset selector, but need to change number of archs to select, and then change it back
normal_n_select = self.subset_selector.n_select
self.subset_selector.n_select = 100
errs = 100 - targets # reference selection assumes minimization
flops = np.array([x[2] for x in archive])
objs = np.vstack((errs, flops)).T
# ReferenceBasedSelector doesn't actually use archive
indices = self.subset_selector.select([], objs)
self.subset_selector.n_select = normal_n_select
actual_inputs_for_fit = inputs[indices]
targets = targets[indices]
print(f'{actual_inputs_for_fit.shape=}, {targets.shape=}')
else:
actual_inputs_for_fit = inputs
acc_predictor = get_acc_predictor(self.predictor, actual_inputs_for_fit, targets, np.array(alphabet),
np.array(alphabet_lb), inputs_additional=inputs_additional,
ensemble_size=self.rbf_ensemble_size)
if 'rbf_ensemble_per_ensemble_member' in self.predictor:
inputs = np.concatenate(inputs, axis=1) # for creating predictor need them separately, but for prediction need a single vector
inputs = {'for_acc': inputs, 'for_flops': inputs_for_flops}
# to calculate predictor correlation:
predictions = acc_predictor.predict(inputs)
return acc_predictor, predictions
def surrogate_search(self, archive, predictor, it=0):
seed_cur = self.random_seed + it
set_seed(seed_cur)
st = time.time()
genomes, objs = self.search_wrapper.search(archive, predictor, it, seed=seed_cur)
ed = time.time()
print(f'Search time = {ed - st}')
if self.if_check_duplicates:
archive_genomes = [x[0] for x in archive]
new_genomes_decoded = [self.search_space.decode(x) for x in genomes]
not_duplicate = np.logical_not([x in archive_genomes for x in new_genomes_decoded])
else:
not_duplicate = np.full(genomes.shape[0], True, dtype=bool)
st = time.time()
indices = self.subset_selector.select(archive, objs[not_duplicate])
genomes_selected = genomes[not_duplicate][indices]
objs_selected = objs[not_duplicate][indices]
ed = time.time()
print(f'Select time = {ed - st}')
genomes_selected, unique_idx = np.unique(genomes_selected, axis=0, return_index=True)
objs_selected = objs_selected[unique_idx]
candidates = [self.search_space.decode(x) for x in genomes_selected]
return candidates, objs_selected
def train_and_evaluate(self, archs, it, n_epochs=None, if_warmup=False):
self.lock.acquire()
st = time.time()
self._train(archs, it, n_epochs=n_epochs, if_warmup=if_warmup)
ed = time.time()
print(f'Train time = {ed - st}')
self.lock.release()
st = time.time()
eval_res = self._evaluate_model_list(archs)
ed = time.time()
print(f'Eval time = {ed - st}')
gc.collect()
torch.cuda.empty_cache()
# self.lock.release()
return eval_res
@staticmethod
def _init_subprocess(log_file_path, fraction):
setup_logging(log_file_path)
torch.cuda.set_per_process_memory_fraction(fraction, 0)
def _train(self, archs, it, number_to_add_to_i=0, n_epochs=None, if_warmup=False):
thread_pool = ProcessPoolExecutor(max_workers=1,
initializer=NAT._init_subprocess, initargs=(self.log_file_path, 0.44,))
# initializer=setup_logging, initargs=(self.log_file_path,))
n_engines_to_train = len(self.create_engine_lambdas)
if self.search_space_name == 'ensemble':
percent_train_per_engine = [1 / n_engines_to_train] * len(self.ensemble_ss_names)
lambda_select_archs_per_engine = [lambda _: True] * len(self.ensemble_ss_names)
elif self.search_space_name == 'reproduce_nat':
n_archs_w1_0 = np.sum([config['w'] == 1.0 for config in archs])
percent_w1_0 = n_archs_w1_0 / len(archs)
print(f'{percent_w1_0=}')
percent_train_per_engine = [percent_w1_0, 1 - percent_w1_0]
lambda_select_archs_per_engine = [lambda arch: arch['w'] == 1.0, lambda arch: arch['w'] == 1.2]
for i, (ss_name, create_engine_lambda) in enumerate(zip(self.ensemble_ss_names, self.create_engine_lambdas)):
dump_path_train1 = os.path.join(self.path_logs, 'dump_train1.pkl')
if self.search_space_name == 'ensemble':
archs_cur = [arch[i] for arch in archs] # archs is a list of lists, each of which contains configs for an ensemble
search_space = self.search_space.search_spaces[i]
elif self.search_space_name == 'reproduce_nat':
archs_cur = archs
search_space = self.search_space
actual_logs_path = self.path_logs
with open(dump_path_train1, 'wb') as f:
dill.dump((archs_cur, it, number_to_add_to_i, n_epochs, if_warmup, create_engine_lambda,
self.random_seed + i, self.run_config_lambda, self.if_sample_configs_to_train,
search_space, self.dataset, torch.device('cuda' if torch.cuda.is_available() else 'cpu'),
self.train_criterion, self.get_scalar_from_accuracy, actual_logs_path,
self.supernet_paths[i], lambda_select_archs_per_engine[i], percent_train_per_engine[i],
self.store_checkpoint_freq, self.sec_obj, self.if_amp), f)
future = thread_pool.submit(NAT._train_one_supernetwork_stateless, dump_path_train1)
future.result()
del thread_pool
gc.collect()
torch.cuda.empty_cache()
@staticmethod
def _train_one_supernetwork_stateless(args_dump_path):
with open(args_dump_path, 'rb') as f:
archs, it, number_to_add_to_i, n_epochs, if_warmup, create_engine_lambda, random_seed, run_config_lambda, \
if_sample_configs_to_train, search_space, dataset_name, device, train_criterion, \
get_scalar_from_accuracy, path_logs, supernet_path, lambda_filter_archs, percent_steps_to_take, \
store_checkpoint_freq, sec_obj, if_amp \
= dill.load(f)
set_seed(random_seed + it) # need to keep changing the seed, otherwise all the epochs use the same random values
run_config = run_config_lambda()
engine, optimizer = create_engine_lambda(supernet_path, run_config, device=device)
n_batches = len(run_config.train_loader)
if n_epochs is None:
n_epochs = run_config.n_epochs
if if_sample_configs_to_train:
configs_encoded = np.array([search_space.encode(c) for c in archs])
unique_with_counts = [np.unique(i, return_counts=True) for i in configs_encoded.T]
unique_with_probs = [(u, c / configs_encoded.shape[0]) for (u, c) in unique_with_counts]
sample = np.array([np.random.choice(u, n_epochs * n_batches, p=p)
for (u, p) in unique_with_probs])
sample_decoded = [search_space.decode(c) for c in sample.T]
else:
archs = [arch for arch in archs if lambda_filter_archs(arch)]
all_resolutions = [arch['r'] for arch in archs]
run_config.data_provider.collator_train.set_resolutions(all_resolutions)
n_steps_to_take = int(n_epochs * n_batches * percent_steps_to_take)
n_epochs_to_take = n_steps_to_take // n_batches
if if_amp:
scaler = GradScaler()
step = 0
epoch = 0 # for saving not to fail when n_epochs == 0
for epoch in range(0, n_epochs):
if step == n_steps_to_take: #don't waste time initializing dataloader threads for the epochs that won't run
break
engine.train()
losses = AverageMeter()
metric_dict = defaultdict(lambda: AverageMeter())
data_time = AverageMeter()
with tqdm(total=n_batches,
desc='{} Train #{}'.format(run_config.dataset, epoch + number_to_add_to_i), ncols=175) as t:
end = time.time()
for i, (images, labels, config_idx) in enumerate(run_config.train_loader):
time_diff = time.time() - end
data_time.update(time_diff)
if step == n_steps_to_take:
break
step += 1
if if_sample_configs_to_train:
config = sample_decoded[epoch * n_batches + i] # all the variables other than resolution have already been sampled in advance
else:
config = archs[config_idx]
if search_space.name in ['ofa', 'proxyless']:
config = validate_config(config)
engine.set_active_subnet(ks=config['ks'], e=config['e'], d=config['d'], w=config['w'])
if if_warmup:
# new_lr = run_config.init_lr
# previously warmup had constant lr, switch to linear warmup
new_lr = (step / n_steps_to_take) * run_config.init_lr
else:
new_lr = run_config.adjust_learning_rate(optimizer, epoch, i, n_batches,
it * n_epochs + epoch, n_epochs_to_take, n_epochs)
images, labels = images.to(device), labels.to(device)
if not if_amp:
output = engine(images)
loss = train_criterion(output, labels)
else:
with torch.cuda.amp.autocast():
output = engine(images)
loss = train_criterion(output, labels)
optimizer.zero_grad()
if not if_amp:
loss.backward()
optimizer.step()
else:
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
losses.update(loss.item(), images.size(0))
labels_for_acc = labels
if len(labels.shape) > 1:
labels_for_acc = torch.argmax(labels, dim=-1)
acc1 = accuracy(output, labels_for_acc, topk=(1,))
acc1 = get_scalar_from_accuracy(acc1)
metric_dict['top1'].update(acc1, output.size(0))
t.set_postfix({'loss': losses.avg,
**{key: metric_dict[key].avg for key in metric_dict},
'img_size': images.size(2),
'lr': new_lr,
'data_time': data_time.avg})
t.update(1)
end = time.time()
width_mult = engine.width_mult[0]
# save the new supernet weights
save_path_iter = os.path.join(path_logs, f'iter_{it}')
Path(save_path_iter).mkdir(exist_ok=True)
def save_engine_weights(save_path):
dict_to_save = {'epoch': epoch,
'model_state_dict': engine.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'width_mult': width_mult}
engine.state_dict(torch.save(dict_to_save, save_path))
if (it + 1) % store_checkpoint_freq == 0:
save_engine_weights(os.path.join(save_path_iter, os.path.basename(supernet_path)))
# but additionally always save in the main log folder: needed for the whole thing to keep on working
# ("train" updates & overwrites these weights, "eval" uses the latest version of the weights)
save_engine_weights(supernet_path)
def _evaluate_model_list(self, archs, number_to_add_to_i=0):
engines = []
for i, (create_engine_lambda, supernet_path) in enumerate(zip(self.create_engine_lambdas, self.supernet_paths)):
run_config = self.run_config_lambda() # only used within create_engine_lambda
engine, opt = create_engine_lambda(supernet_path, run_config, to_cuda=False)
engines.append(engine)
def capture_variable_in_lambda(t):
return lambda _: t
get_engines = [capture_variable_in_lambda(engine) for engine in engines]
thread_pool = ProcessPoolExecutor(max_workers=1,
# initializer=setup_logging, initargs=(self.log_file_path,))
initializer=NAT._init_subprocess, initargs=(self.log_file_path,0.44))
dump1_path = os.path.join(self.path_logs, 'dump1.pkl')
with open(dump1_path, 'wb') as f:
dill.dump({'archs': archs, 'device': torch.device('cuda' if torch.cuda.is_available() else 'cpu'),
'val_criterion': self.val_criterion, 'get_scalar_from_accuracy': self.get_scalar_from_accuracy,
'sec_obj': self.sec_obj, 'search_space_ensemble': self.search_space,
'get_engines': get_engines,
'run_config_lambda': self.run_config_lambda, 'number_to_add_to_i': number_to_add_to_i,
'if_ensemble_perf_per_member': 'rbf_ensemble_per_ensemble_member' in self.predictor,
'if_cascade': self.if_cascade}, f)
future = thread_pool.submit(NAT._evaluate_model_list_stateless, dump1_path)
res = future.result()
del thread_pool
try:
os.remove(dump1_path)
except:
pass
return tuple(res)
@staticmethod
def _evaluate_model_list_stateless(args_dump_path): # must be called by _evaluate_model_list
with open(args_dump_path, 'rb') as f:
kwargs_loaded = dill.load(f)
top1_errs = []
complexities = []
if_ensemble_perf_per_member = kwargs_loaded['if_ensemble_perf_per_member']
if if_ensemble_perf_per_member:
perf_and_flops_per_subnet_all = []
run_config = kwargs_loaded['run_config_lambda']()
kwargs_loaded['run_config'] = run_config
archs = kwargs_loaded['archs']
for i_config in range(len(archs)):
kwargs_loaded['config_ensemble'] = archs[i_config]
kwargs_loaded['i_config'] = i_config
top1_err, complexity, perf_and_flops_per_subnet = NAT._evaluate_model(**kwargs_loaded)
top1_errs.append(top1_err)
complexities.append(complexity)
if if_ensemble_perf_per_member:
perf_and_flops_per_subnet_all.append(perf_and_flops_per_subnet)
to_return = top1_errs, complexities
if if_ensemble_perf_per_member:
to_return += (perf_and_flops_per_subnet_all,)
return to_return
@staticmethod
def _evaluate_model(device, val_criterion,
get_scalar_from_accuracy, sec_obj, search_space_ensemble, get_engines,
run_config, config_ensemble, i_config, number_to_add_to_i, if_ensemble_perf_per_member,
if_cascade, **kwargs): #don't need kwargs, have them to ignore irrelevant parameters passed here
print('started _evaluate_model')
subnets = []
resolution_max = -1
resolutions_list = []
thresholds = None
if if_cascade:
positions_list = []
thresholds = []
if type(search_space_ensemble) is OFASearchSpace: # reproduce_nat
search_spaces = [search_space_ensemble]
config_ensemble = [config_ensemble]
# I had a bug caused by the fact that the zero-th engine is used every time
if config_ensemble[0]['w'] == 1.0:
get_engines = [get_engines[0]]
else:
get_engines = [get_engines[1]]
else:
search_spaces = search_space_ensemble.search_spaces
vld_batch_size = run_config.valid_loader.batch_size
for i, search_space in enumerate(search_spaces):
if search_space.name in ['ofa', 'proxyless']:
config_ensemble[i].update(validate_config(config_ensemble[i])) # tuple doesn't support item assignment
resolution, subnet = NAT._extract_subnet_from_supernet(config_ensemble[i], get_engines[i], run_config, vld_batch_size, device)
subnets.append(subnet)
resolution_max = max(resolution_max, resolution)
resolutions_list.append(resolution)
if if_cascade:
positions_list.append(config_ensemble[i]['position'])
thresholds.append(config_ensemble[i]['threshold'])
if if_cascade:
idx = np.argsort(positions_list)[::-1]
thresholds = np.array(thresholds)[idx].tolist()
resolutions_list = np.array(resolutions_list)[idx].tolist()
subnets = np.array(subnets)[idx].tolist()
reverse_idx = np.argsort(idx) #https://stackoverflow.com/questions/2483696/undo-or-reverse-argsort-python
resolution = resolution_max
run_config.valid_loader.collate_fn.set_resolutions([resolution]) # at this point all resolutions should be the same
metric_dict_val = defaultdict(lambda: AverageMeter())
losses_val = AverageMeter()
n_input_channels = -1
if if_cascade:
n_not_predicted_per_stage = [0 for _ in range(len(subnets) - 1)]
with torch.no_grad(), torch.cuda.amp.autocast():
with tqdm(total=len(run_config.valid_loader),
desc='{} Val #{}'.format(run_config.dataset, i_config + number_to_add_to_i),
ncols=200) as t:
# print(i_cuda, 'before dataloader_val loop')
for i, (images, labels, *other_stuff) in enumerate(run_config.valid_loader):
images, labels = images.to(device), labels.to(device)
images_orig = None # don't make a backup unless I need to
output = None
if if_cascade:
idx_more_predictions_needed = torch.ones(images.shape[0], dtype=torch.bool)
for i_subnet, subnet in enumerate(subnets):
if i_subnet > 0:
cur_threshold = thresholds[i_subnet - 1]
idx_more_predictions_needed[torch.max(output, dim=1).values >= cur_threshold] = False
output_tmp = output[idx_more_predictions_needed]
if len(output_tmp) == 0:
n_not_predicted = 0
else:
not_predicted_idx = torch.max(output_tmp, dim=1).values < cur_threshold
n_not_predicted = torch.sum(not_predicted_idx).item()
n_not_predicted_per_stage[i_subnet - 1] += n_not_predicted
'''
wanna know accuracies of all the subnets even if their predictions aren't used
=> no breaking
'''
# if n_not_predicted == 0:
# break
if resolutions_list[i_subnet] != resolutions_list[i_subnet - 1]:
if images_orig is None:
images_orig = torch.clone(images)
r = resolutions_list[i_subnet]
images = torchvision.transforms.functional.resize(images_orig, (r, r))
if i_subnet == 0:
out_logits = subnet(images)
output_cur_softmaxed = torch.nn.functional.softmax(out_logits, dim=1)
else:
out_logits = subnet(images)
if len(out_logits.shape) < 2: # a single image is left in the batch, need to fix dim # wait, because I want per-subnet accuracies I pass the whole batch through the net, so this isn't necessary?
out_logits = out_logits[None, ...]
output_cur_softmaxed = torch.nn.functional.softmax(out_logits, dim=1)
if i_subnet == 0:
output = output_cur_softmaxed
else:
if n_not_predicted > 0: # if 0, actual predictions are not modified
n_nets_used_in_cascade = i_subnet + 1
coeff1 = ((n_nets_used_in_cascade - 1) / n_nets_used_in_cascade)
coeff2 = (1 / n_nets_used_in_cascade)
output_tmp[not_predicted_idx] = coeff1 * output_tmp[not_predicted_idx] \
+ coeff2 * output_cur_softmaxed[idx_more_predictions_needed][not_predicted_idx]
# need "output_tmp" because in pytorch "a[x][y] = z" doesn't modify "a".
output[idx_more_predictions_needed] = output_tmp
if if_ensemble_perf_per_member:
acc1 = accuracy(output_cur_softmaxed.detach(), labels, topk=(1,))
acc1 = get_scalar_from_accuracy(acc1)
# the line below caused a bug because I sorted the subnets by their desired position
# the fix is done at the very end because I want the numbering to be consistent,
# i.e. within the loop the subnets are sorted by their desired position.
metric_dict_val[f'top1_s{i_subnet}'].update(acc1, output.size(0))
loss = val_criterion(output, labels)
acc1 = accuracy(output, labels, topk=(1,))
acc1 = get_scalar_from_accuracy(acc1)
metric_dict_val['top1'].update(acc1, output.size(0))
losses_val.update(loss.item(), images.size(0))
n_input_channels = images.size(1)
tqdm_postfix = {'l': losses_val.avg,
**{key: metric_dict_val[key].avg for key in metric_dict_val},
'i': images.size(2)}
if thresholds is not None:
tqdm_postfix['not_pr'] = n_not_predicted_per_stage
tqdm_postfix['thr'] = thresholds
t.set_postfix(tqdm_postfix)
t.update(1)
metric = metric_dict_val['top1'].avg
top1_err = utils.get_metric_complement(metric)
resolution_for_flops = resolutions_list
info = get_net_info(subnets, (n_input_channels, resolution_for_flops, resolution_for_flops),
measure_latency=None, print_info=False, clean=True, lut=None, if_dont_sum=if_cascade)
if not if_cascade:
complexity = info[sec_obj]
else:
flops_per_stage = info[sec_obj]
n_images_total = len(run_config.valid_loader.dataset)
true_flops = flops_per_stage[0] + sum(
[n_not_predicted / n_images_total * flops for (n_not_predicted, flops) in
zip(n_not_predicted_per_stage, flops_per_stage[1:])])
complexity = true_flops
del subnet
to_return = top1_err, complexity
if if_ensemble_perf_per_member:
top1_err_per_member = []
for i_subnet in range(len(subnets)):
metric_cur = metric_dict_val[f'top1_s{i_subnet}'].avg
top1_err_cur = utils.get_metric_complement(metric_cur)
top1_err_per_member.append(top1_err_cur)
# fixing the bug that arose because subnets were sorted by resolution but the code that gets
# the output of this assumes sorting by supernet
top1_err_per_member = np.array(top1_err_per_member)[reverse_idx].tolist()
flops_per_member = np.array(flops_per_stage)[reverse_idx].tolist()
to_return = (*to_return, (tuple(top1_err_per_member), tuple(flops_per_member)))
else:
to_return = (*to_return, None)
return to_return
@staticmethod
def _extract_subnet_from_supernet(config_padded, get_engine, run_config, vld_batch_size, device):
engine = get_engine(config_padded['w'])
engine.set_active_subnet(ks=config_padded['ks'], e=config_padded['e'], d=config_padded['d'],
w=config_padded['w'])
resolution = config_padded['r']
run_config.data_provider.collator_subtrain.set_resolutions([resolution])# for sub_train_loader
run_config.data_provider.assign_active_img_size(resolution) # if no training is done, active image size is not set
st = time.time()
data_loader_set_bn = run_config.random_sub_train_loader(2000, vld_batch_size, resolution)
end = time.time()
print(f'sub_train_loader time = {end-st}')
subnet = engine.get_active_subnet(True)
subnet.eval().to(device)
# set BatchNorm for proper values for this subnet
st = time.time()
set_running_statistics(subnet, data_loader_set_bn)
end = time.time()
print(f'Setting BN time = {end-st}')
return resolution, subnet
@staticmethod
def make_lambda_for_engine_creation(class_to_use, n_classes, use_gradient_checkpointing,
dataset_name, search_space_name):
def inner(supernet_path, run_config, to_cuda=True, device=None, if_create_optimizer=True):
loaded_checkpoint = torch.load(supernet_path, map_location='cpu')
n_in_channels = 3
if search_space_name == 'ofa':
if 'width_mult' in loaded_checkpoint:
width_mult = loaded_checkpoint['width_mult']
else:
width_mult = 1.0 if 'w1.0' in supernet_path else 1.2 if 'w1.2' in supernet_path else None
assert width_mult is not None
kernel_size = [3, 5, 7]
exp_ratio = [3, 4, 6]
depth = [2, 3, 4]
engine = class_to_use(n_classes=n_classes, dropout_rate=0, width_mult=width_mult, ks_list=kernel_size,
expand_ratio_list=exp_ratio, depth_list=depth, if_use_gradient_checkpointing=use_gradient_checkpointing,
n_image_channels=n_in_channels)
elif search_space_name == 'alphanet':
engine = class_to_use(n_classes=n_classes, if_use_gradient_checkpointing=use_gradient_checkpointing,
n_image_channels=n_in_channels)
elif search_space_name == 'proxyless':
width_mult = 1.3
kernel_size = [3, 5, 7]
exp_ratio = [3, 4, 6]
depth = [2, 3, 4]
engine = class_to_use(n_classes=n_classes, dropout_rate=0, width_mult=width_mult, ks_list=kernel_size,
expand_ratio_list=exp_ratio, depth_list=depth, if_use_gradient_checkpointing=use_gradient_checkpointing,
n_image_channels=n_in_channels)
else:
raise NotImplementedError
if 'state_dict' in loaded_checkpoint: # for the pretrained model
init = loaded_checkpoint['state_dict']
elif 'model_state_dict' in loaded_checkpoint:
init = loaded_checkpoint['model_state_dict']
else:
raise ValueError
if search_space_name == 'alphanet': #each key in the pretrained model starts with "module."
init = {k.replace('module.', ''):v for k, v in init.items()}
classifier_linear_name = 'classifier.linear'
if classifier_linear_name + '.weight' not in init:
classifier_linear_name += '.linear'
loaded_classifier_weight_shape = init[classifier_linear_name + '.weight'].shape
if (loaded_classifier_weight_shape[0] != n_classes):
init[classifier_linear_name + '.weight'] = torch.rand((n_classes, loaded_classifier_weight_shape[1]))
init[classifier_linear_name + '.bias'] = torch.rand((n_classes))
engine.load_state_dict(init)
if to_cuda:
assert device is not None
print(f'{device=}')
engine.to(device)
if if_create_optimizer:
try:
net_params = engine.weight_parameters()
except:
net_params = [param for param in engine.parameters() if param.requires_grad]
optimizer = run_config.build_optimizer(net_params)
if 'optimizer_state_dict' in loaded_checkpoint:
optimizer.load_state_dict(loaded_checkpoint['optimizer_state_dict'])
print(optimizer)
else:
optimizer = None
return engine, optimizer
return inner
def plot_archive(self, archive, c_top1_err, candidates, complexity, it, pred_for_candidates):
plot = Scatter(legend=(True, {'loc': 'lower right'}), figsize=(12, 9))
F = np.full((len(archive), 2), np.nan)
F[:, 0] = np.array([x[2] for x in archive]) # second obj. (complexity)
F[:, 1] = get_metric_complement(np.array([x[1] for x in archive])) # top-1 accuracy
plot.add(F, s=15, facecolors='none', edgecolors='b', label='archive')
F = np.full((len(candidates), 2), np.nan)
proper_second_obj = np.array(complexity)
F[:, 0] = proper_second_obj
F[:, 1] = get_metric_complement(np.array(c_top1_err))
plot.add(F, s=30, color='r', label='candidates evaluated')
F = np.full((len(candidates), 2), np.nan)
if not self.if_cascade:
F[:, 0] = proper_second_obj
else:
F[:, 0] = pred_for_candidates[:, 1]
F[:, 1] = get_metric_complement(pred_for_candidates[:, 0])
plot.add(F, s=20, facecolors='none', edgecolors='g', label='candidates predicted')
plot.plot_if_not_done_yet()
plt.xlim(left=30)
if self.dataset == 'cifar10':
if np.median(F[:, 1]) > 85:
plt.xlim(left=0, right=3000)
plt.ylim(85, 100)
elif self.dataset == 'cifar100':
if np.median(F[:, 1]) > 70:
plt.xlim(left=0, right=3000)
plt.ylim(70, 90)
elif self.dataset == 'imagenet':
plt.xlim(left=0, right=2100)
plt.ylim(64, 78)
plot.save(os.path.join(self.path_logs, 'iter_{}.png'.format(it)))
def main(args):
engine = NAT(args)
engine.search()
try:
save_for_c_api_last_path = os.path.join(engine.path_logs, f'iter_{args["iterations"]}', 'save_for_c_api')
os.remove(save_for_c_api_last_path)
except:
pass
del engine
gc.collect()
torch.cuda.empty_cache()
default_kwargs = {
'experiment_name': ['debug_run', 'location of dir to save'],
'resume': [None, 'resume search from a checkpoint'],
'sec_obj': ['flops', 'second objective to optimize simultaneously'],
'iterations': [30, 'number of search iterations'],
'n_doe': [100, 'number of architectures to sample initially '
'(I kept the old name which is a bit weird; "doe"=="design of experiment")'],
'n_iter': [8, 'number of architectures to evaluate in each iteration'],
'predictor': ['rbf', 'which accuracy predictor model to fit'],
'data': ['/export/scratch3/aleksand/data/CIFAR/', 'location of the data corpus'],
'dataset': ['cifar10', 'name of the dataset [imagenet, cifar10, cifar100, ...]'],
'n_workers': [8, 'number of workers for dataloaders'],
'vld_size': [10000, 'validation size'],
'total_size': [None, 'train+validation size'],
'trn_batch_size': [96, 'train batch size'],
'vld_batch_size': [96, 'validation batch size '],
'n_epochs': [5, 'test batch size for inference'],
'supernet_path': [['/export/scratch3/aleksand/nsganetv2/data/ofa_mbv3_d234_e346_k357_w1.0'], 'list of paths to supernets'],
'search_algo': ['nsga3', 'which search algo to use [NSGA-III, MO-GOMEA, random]'],
'subset_selector': ['reference', 'which subset selector algo to use'],
'init_with_nd_front_size': [0, 'initialize the search algorithm with subset of non-dominated front of this size'],
'dont_check_duplicates': [False, 'if disable check for duplicates in search results'],
'add_archive_to_candidates': [False, 'if a searcher should append archive to the candidates'],
'sample_configs_to_train': [False, 'if instead of training selected candidates, a probability distribution '
'should be constructed from archive, and sampled from (like in NAT)'],
'random_seed': [42, 'random seed'],
'n_warmup_epochs': [0, 'number of epochs for warmup'],
'path_logs': ['/export/scratch3/aleksand/nsganetv2/logs/', 'Path to the logs folder'],
'n_surrogate_evals': [800, 'Number of evaluations of the surrogate per meta-iteration'],
'config_msunas_path': [None, 'Path to the yml file with all the parameters'],
'gomea_exe': [None, 'Path to the mo-gomea executable file'],
'alphabet': [['2'], 'Paths to text files (one per supernetwork) with alphabet size per variable'],
'search_space': [['ensemble'], 'Supernetwork search space to use'],
'store_checkpoint_freq': [1, 'Checkpoints will be stored for every x-th iteration'],
'init_lr': [None, 'initial learning rate'],
'ensemble_ss_names': [[], 'names of search spaces used in the ensemble'],
'rbf_ensemble_size': [500, 'number of the predictors in the rbf_ensemble surrogate'],
'cutout_size': [32, 'Cutout size. 0 == disabled'],
'label_smoothing': [0.0, 'label smoothing coeff when doing classification'],
'if_amp': [False, 'if train in mixed precision'],
'use_gradient_checkpointing': [False, 'if use gradient checkpointing'],
'lr_schedule_type': ['cosine', 'learning rate schedule; "cosine" is cyclic'],
'if_cutmix': [False, 'if to use cutmix'],
'weight_decay': [4e-5, ''],
'if_center_crop': [True, 'if do center crop, or just resize to target size'],
'auto_augment': ['rand-m9-mstd0.5', 'randaugment policy to use, or None to not use randaugment'],
'resize_scale': [0.08, 'minimum resize scale in RandomResizedCrop, or None to not use RandomResizedCrop'],
'search_goal': ['ensemble', 'Either "reproduce_nat" for reproducing NAT, or "ensemble" for everything else'],
} | 54,638 | 52.672888 | 223 | py |
ENCAS | ENCAS-main/utils.py | import atexit
import gzip
import logging
import math
import os
import random
import sys
import yaml
from ofa.utils import count_parameters, measure_net_latency
from pathlib import Path
from ptflops import get_model_complexity_info
from pymoo.factory import get_performance_indicator
from pymoo.util.nds.non_dominated_sorting import NonDominatedSorting
from typing import List
import numpy as np
from collections import defaultdict
from PIL import Image, ImageDraw
import torch
import torch.nn.functional
from matplotlib import pyplot as plt
import io
import selectors
import subprocess
from networks.ofa_mbv3_my import OFAMobileNetV3My
# NAT_PATH = '/export/scratch3/aleksand/nsganetv2'
NAT_PATH = '/projects/0/einf2071/nsganetv2'
NAT_LOGS_PATH = os.path.join(NAT_PATH, 'logs')
NAT_DATA_PATH = os.path.join(NAT_PATH, 'data')
_alphabets = ['full_nat', 'full_nat_w12', 'full_nat_w10', 'full_alphanet', 'full_nat_proxyless',
'full_alphanet_cascade2', 'full_nat_w12_cascade2',
'full_nat_w12_cascade5', 'full_nat_w10_cascade5', 'full_alphanet_cascade5', 'full_nat_proxyless_cascade5']
alphabet_dict = {a: os.path.join(NAT_PATH, 'alphabets', f'{a}.txt') for a in _alphabets}
ss_name_to_supernet_path = {'ofa12': 'supernet_w1.2', 'ofa10': 'supernet_w1.0',
'alphanet': 'alphanet_pretrained.pth.tar',
'alphanet1': 'alphanet_pretrained.pth.tar',
'alphanet2': 'alphanet_pretrained.pth.tar',
'alphanet3': 'alphanet_pretrained.pth.tar',
'alphanet4': 'alphanet_pretrained.pth.tar',
'attn': 'attentive_nas_pretrained.pth.tar',
'proxyless': 'ofa_proxyless_d234_e346_k357_w1.3'}
threshold_gene_to_value = {i: 0.1*(i + 1) for i in range(10)}
threshold_gene_to_value_moregranular = {i: 0.02 * i for i in range(51)}
def get_correlation(prediction, target):
import scipy.stats as stats
rmse = np.sqrt(((prediction - target) ** 2).mean())
rho, _ = stats.spearmanr(prediction, target)
tau, _ = stats.kendalltau(prediction, target)
return rmse, rho, tau
def look_up_latency(net, lut, resolution=224):
def _half(x, times=1):
for _ in range(times):
x = np.ceil(x / 2)
return int(x)
predicted_latency = 0
# first_conv
predicted_latency += lut.predict(
'first_conv', [resolution, resolution, 3],
[resolution // 2, resolution // 2, net.first_conv.out_channels])
# final_expand_layer (only for MobileNet V3 models)
input_resolution = _half(resolution, times=5)
predicted_latency += lut.predict(
'final_expand_layer',
[input_resolution, input_resolution, net.final_expand_layer.in_channels],
[input_resolution, input_resolution, net.final_expand_layer.out_channels]
)
# feature_mix_layer
predicted_latency += lut.predict(
'feature_mix_layer',
[1, 1, net.feature_mix_layer.in_channels],
[1, 1, net.feature_mix_layer.out_channels]
)
# classifier
predicted_latency += lut.predict(
'classifier',
[net.classifier.in_features],
[net.classifier.out_features]
)
# blocks
fsize = _half(resolution)
for block in net.blocks:
idskip = 0 if block.config['shortcut'] is None else 1
se = 1 if block.config['mobile_inverted_conv']['use_se'] else 0
stride = block.config['mobile_inverted_conv']['stride']
out_fz = _half(fsize) if stride > 1 else fsize
block_latency = lut.predict(
'MBConv',
[fsize, fsize, block.config['mobile_inverted_conv']['in_channels']],
[out_fz, out_fz, block.config['mobile_inverted_conv']['out_channels']],
expand=block.config['mobile_inverted_conv']['expand_ratio'],
kernel=block.config['mobile_inverted_conv']['kernel_size'],
stride=stride, idskip=idskip, se=se
)
predicted_latency += block_latency
fsize = out_fz
return predicted_latency
def get_metric_complement(metric, if_segmentation=False):
max_value = 100
if if_segmentation:
max_value = 1
return max_value - metric
def fix_folder_names_imagenetv2():
import os, glob
for path in glob.glob('/export/scratch3/aleksand/data/imagenet/imagenetv2_all'):
if os.path.isdir(path):
for subpath in glob.glob(f'{path}/*'):
dirname = subpath.split('/')[-1]
os.rename(subpath, '/'.join(subpath.split('/')[:-1]) + '/' + dirname.zfill(4))
def compute_hypervolume(ref_pt, F, normalized=True, if_increase_ref_pt=True, if_input_already_pareto=False):
# calculate hypervolume on the non-dominated set of F
if not if_input_already_pareto:
front = NonDominatedSorting().do(F, only_non_dominated_front=True)
nd_F = F[front, :]
else:
nd_F = F
if if_increase_ref_pt:
ref_pt = 1.01 * ref_pt
hv = get_performance_indicator('hv', ref_point=ref_pt).calc(nd_F)
if normalized:
hv = hv / np.prod(ref_pt)
return hv
class LoggerWriter:
def __init__(self, log_fun):
self.log_fun = log_fun
self.buf = []
self.is_tqdm_msg_fun = lambda msg: '%|' in msg
def write(self, msg):
is_tqdm = self.is_tqdm_msg_fun(msg)
has_newline = msg.endswith('\n')
if has_newline or is_tqdm:
self.buf.append(msg)#.rstrip('\n'))
self.log_fun(''.join(self.buf))
self.buf = []
else:
self.buf.append(msg)
def flush(self):
pass
def close(self):
self.log_fun.close()
def setup_logging(log_path):
from importlib import reload
reload(logging)
logging.StreamHandler.terminator = '' # don't add new line, I'll do it myself; this line affects both handlers
stream_handler = logging.StreamHandler(sys.__stdout__)
file_handler = logging.FileHandler(log_path, mode='a')
# don't want a bazillion tqdm lines in the log:
# file_handler.filter = lambda record: '%|' not in record.msg or '100%|' in record.msg
file_handler.filter = lambda record: '[A' not in record.msg and ('%|' not in record.msg or '100%|' in record.msg)
handlers = [
file_handler,
stream_handler]
logging.basicConfig(level=logging.INFO,
# format='%(asctime)s %(message)s',
format='%(message)s',
handlers=handlers,
datefmt='%H:%M')
sys.stdout = LoggerWriter(logging.info)
sys.stderr = LoggerWriter(logging.error)
# https://dev.to/taqkarim/extending-simplenamespace-for-nested-dictionaries-58e8
from types import SimpleNamespace
class RecursiveNamespace(SimpleNamespace):
@staticmethod
def map_entry(entry):
if isinstance(entry, dict):
return RecursiveNamespace(**entry)
return entry
def __init__(self, **kwargs):
super().__init__(**kwargs)
for key, val in kwargs.items():
if type(val) == dict:
setattr(self, key, RecursiveNamespace(**val))
elif type(val) == list:
setattr(self, key, list(map(self.map_entry, val)))
alphanet_config_str = '''
use_v3_head: True
resolutions: [192, 224, 256, 288]
first_conv:
c: [16, 24]
act_func: 'swish'
s: 2
mb1:
c: [16, 24]
d: [1, 2]
k: [3, 5]
t: [1]
s: 1
act_func: 'swish'
se: False
mb2:
c: [24, 32]
d: [3, 4, 5]
k: [3, 5]
t: [4, 5, 6]
s: 2
act_func: 'swish'
se: False
mb3:
c: [32, 40]
d: [3, 4, 5, 6]
k: [3, 5]
t: [4, 5, 6]
s: 2
act_func: 'swish'
se: True
mb4:
c: [64, 72]
d: [3, 4, 5, 6]
k: [3, 5]
t: [4, 5, 6]
s: 2
act_func: 'swish'
se: False
mb5:
c: [112, 120, 128]
d: [3, 4, 5, 6, 7, 8]
k: [3, 5]
t: [4, 5, 6]
s: 1
act_func: 'swish'
se: True
mb6:
c: [192, 200, 208, 216]
d: [3, 4, 5, 6, 7, 8]
k: [3, 5]
t: [6]
s: 2
act_func: 'swish'
se: True
mb7:
c: [216, 224]
d: [1, 2]
k: [3, 5]
t: [6]
s: 1
act_func: 'swish'
se: True
last_conv:
c: [1792, 1984]
act_func: 'swish'
'''
def images_list_to_grid_image(ims, if_rgba=False, if_draw_middle_line=False, if_draw_grid=False,
n_rows=None, n_cols=None):
n_ims = len(ims)
width, height = ims[0].size
rows_num = math.floor(math.sqrt(n_ims)) if n_rows is None else n_rows
cols_num = int(math.ceil(n_ims / rows_num)) if n_cols is None else n_cols
new_im = Image.new('RGB' if not if_rgba else 'RGBA', (cols_num * width, rows_num * height))
for j in range(n_ims):
row = j // cols_num
column = j - row * cols_num
new_im.paste(ims[j], (column * width, row * height))
if if_draw_middle_line or if_draw_grid:
draw = ImageDraw.Draw(new_im)
if if_draw_middle_line:
draw.line((0, height // 2 * rows_num - 1, width * cols_num, height // 2 * rows_num - 1),
fill=(200, 100, 100, 255), width=1)
if if_draw_grid:
if rows_num > 1:
for i in range(1, rows_num):
draw.line((0, height * i - 1, width * cols_num, height * i - 1), fill=(0, 0, 0, 255), width=5)
if cols_num > 1:
for i in range(1, cols_num):
draw.line((width * i - 1, 0, width * i - 1, height * rows_num), fill=(0, 0, 0, 255), width=5)
return new_im
class CsvLogger():
def __init__(self, path, name):
Path(path).mkdir(exist_ok=True)
self.full_path = os.path.join(path, name)
self.columns = ['Evaluation', 'Time', 'Solution', 'Fitness']
self.data = []
self.f = open(self.full_path, 'w', buffering=100)
self.f.write(' '.join(self.columns) + '\n')
atexit.register(self.close_f)
def log(self, values: List):
values_str = ' '.join(str(v) for v in values) + '\n'
# print(values_str)
self.f.write(values_str)
def close_f(self):
self.f.close()
def capture_subprocess_output(subprocess_args):
# taken from https://gist.github.com/nawatts/e2cdca610463200c12eac2a14efc0bfb
# Start subprocess
# bufsize = 1 means output is line buffered
# universal_newlines = True is required for line buffering
process = subprocess.Popen(subprocess_args,
bufsize=1,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
# env=dict(os.environ, OMP_NUM_THREADS='9')
)
# Create callback function for process output
buf = io.StringIO()
def handle_output(stream, mask):
# Because the process' output is line buffered, there's only ever one
# line to read when this function is called
line = stream.readline()
buf.write(line)
sys.stdout.write(line)
# Register callback for an "available for read" event from subprocess' stdout stream
selector = selectors.DefaultSelector()
selector.register(process.stdout, selectors.EVENT_READ, handle_output)
# Loop until subprocess is terminated
while process.poll() is None:
# Wait for events and handle them with their registered callbacks
events = selector.select()
for key, mask in events:
callback = key.data
callback(key.fileobj, mask)
# Get process return code
return_code = process.wait()
selector.close()
success = (return_code == 0)
# Store buffered output
output = buf.getvalue()
buf.close()
return output
def set_seed(seed):
print(f'Setting random seed to {seed}')
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
def execute_func_for_all_runs_and_combine(experiment_name, func, func_combine=None, **kwargs):
experiment_path = os.path.join(NAT_LOGS_PATH, experiment_name)
algo_names = []
algo_name_to_seed_to_result = defaultdict(dict)
target_algos = kwargs.get('target_algos', None) # useful for debugging
target_runs = kwargs.get('target_runs', None) # useful for debugging
# print(f'{target_algos=}, {target_runs=}')
for f in reversed(sorted(os.scandir(experiment_path), key=lambda e: e.name)):
if not f.is_dir():
continue
name_cur = f.name
if target_algos is not None and name_cur not in target_algos:
continue
algo_names.append(name_cur)
for run_folder in os.scandir(f.path):
if not run_folder.is_dir():
continue
run_idx = int(run_folder.name)
if target_runs is not None and run_idx not in target_runs:
continue
run_path = os.path.join(experiment_path, name_cur, str(run_idx))
out = func(run_path, run_idx=run_idx, **kwargs)
algo_name_to_seed_to_result[name_cur][run_idx] = out
if func_combine:
return func_combine(experiment_path, algo_name_to_seed_to_result, experiment_name=experiment_name, **kwargs)
return algo_name_to_seed_to_result
def save_gz(path, data):
f = gzip.GzipFile(path, "w")
np.save(file=f, arr=data)
f.close()
print(f'{path} saved')
def _pil_interp(method):
if method == 'bicubic':
return Image.BICUBIC
elif method == 'lanczos':
return Image.LANCZOS
elif method == 'hamming':
return Image.HAMMING
else:
# default bilinear, do we want to allow nearest?
return Image.BILINEAR
def show_im_from_torch_tensor(t):
im = t.permute(1, 2, 0).numpy()
plt.imshow(im * np.array([0.24703233, 0.24348505, 0.26158768]) + np.array([0.49139968, 0.48215827, 0.44653124]))
plt.show()
def onehot(size, target):
vec = torch.zeros(size, dtype=torch.float32)
vec[target] = 1.
return vec
def rand_bbox(W, H, lam):
cut_rat = np.sqrt(1. - lam)
cut_w = np.int(W * cut_rat)
cut_h = np.int(H * cut_rat)
# uniform
cx = np.random.randint(W)
cy = np.random.randint(H)
bbx1 = np.clip(cx - cut_w // 2, 0, W)
bby1 = np.clip(cy - cut_h // 2, 0, H)
bbx2 = np.clip(cx + cut_w // 2, 0, W)
bby2 = np.clip(cy + cut_h // 2, 0, H)
return bbx1, bby1, bbx2, bby2
def transform_supernet_name_swa(supernet_name_in, swa):
if supernet_name_in == 'alphanet_pretrained.pth.tar':
return f'alphanet_pretrained_swa{swa}.pth.tar'
elif supernet_name_in == 'attentive_nas_pretrained.pth.tar':
return f'attentive_nas_pretrained_swa{swa}.pth.tar'
elif 'supernet_w1' in supernet_name_in:
return supernet_name_in + f'_swa{swa}'
elif 'ofa_proxyless' in supernet_name_in:
return supernet_name_in + f'_swa{swa}'
else:
return 'noop'
class LatencyEstimator(object):
"""
Modified from https://github.com/mit-han-lab/proxylessnas/blob/
f273683a77c4df082dd11cc963b07fc3613079a0/search/utils/latency_estimator.py#L29
"""
def __init__(self, fname):
# fname = download_url(url, overwrite=True)
with open(fname, 'r') as fp:
self.lut = yaml.safe_load(fp, yaml.SafeLoader)
@staticmethod
def repr_shape(shape):
if isinstance(shape, (list, tuple)):
return 'x'.join(str(_) for _ in shape)
elif isinstance(shape, str):
return shape
else:
return TypeError
def predict(self, ltype: str, _input, output, expand=None,
kernel=None, stride=None, idskip=None, se=None):
"""
:param ltype:
Layer type must be one of the followings
1. `first_conv`: The initial stem 3x3 conv with stride 2
2. `final_expand_layer`: (Only for MobileNet-V3)
The upsample 1x1 conv that increases num_filters by 6 times + GAP.
3. 'feature_mix_layer':
The upsample 1x1 conv that increase num_filters to num_features + torch.squeeze
3. `classifier`: fully connected linear layer (num_features to num_classes)
4. `MBConv`: MobileInvertedResidual
:param _input: input shape (h, w, #channels)
:param output: output shape (h, w, #channels)
:param expand: expansion ratio
:param kernel: kernel size
:param stride:
:param idskip: indicate whether has the residual connection
:param se: indicate whether has squeeze-and-excitation
"""
infos = [ltype, 'input:%s' % self.repr_shape(_input),
'output:%s' % self.repr_shape(output), ]
if ltype in ('MBConv',):
assert None not in (expand, kernel, stride, idskip, se)
infos += ['expand:%d' % expand, 'kernel:%d' % kernel,
'stride:%d' % stride, 'idskip:%d' % idskip, 'se:%d' % se]
key = '-'.join(infos)
return self.lut[key]['mean']
def parse_string_list(string):
if isinstance(string, str):
# convert '[5 5 5 7 7 7 3 3 7 7 7 3 3]' to [5, 5, 5, 7, 7, 7, 3, 3, 7, 7, 7, 3, 3]
return list(map(int, string[1:-1].split()))
else:
return string
def pad_none(x, depth, max_depth):
new_x, counter = [], 0
for d in depth:
for _ in range(d):
new_x.append(x[counter])
counter += 1
if d < max_depth:
new_x += [None] * (max_depth - d)
return new_x
def validate_config(config, max_depth=4):
kernel_size, exp_ratio, depth = config['ks'], config['e'], config['d']
if isinstance(kernel_size, str): kernel_size = parse_string_list(kernel_size)
if isinstance(exp_ratio, str): exp_ratio = parse_string_list(exp_ratio)
if isinstance(depth, str): depth = parse_string_list(depth)
assert (isinstance(kernel_size, list) or isinstance(kernel_size, int))
assert (isinstance(exp_ratio, list) or isinstance(exp_ratio, int))
assert isinstance(depth, list)
if len(kernel_size) < len(depth) * max_depth:
kernel_size = pad_none(kernel_size, depth, max_depth)
if len(exp_ratio) < len(depth) * max_depth:
exp_ratio = pad_none(exp_ratio, depth, max_depth)
# return {'ks': kernel_size, 'e': exp_ratio, 'd': depth, 'w': config['w']}
res = {'ks': kernel_size, 'e': exp_ratio, 'd': depth}
if 'r' in config:
res['r'] = config['r']
if 'w' in config:
res['w'] = config['w']
else:
res['w'] = 1.0
if 'position' in config:
res['position'] = config['position']
if 'threshold' in config:
res['threshold'] = config['threshold']
return res
if __name__ == '__main__':
fix_folder_names_imagenetv2()
sys.exit()
def get_net_info(net, data_shape, measure_latency=None, print_info=True, clean=False, lut=None,
if_dont_sum=False):
def inner(net_cur, data_shape):
net_info = {}
if isinstance(net_cur, torch.nn.DataParallel):
net_cur = net_cur.module
net_info['params'] = count_parameters(net_cur)
net_info['flops'] = get_model_complexity_info(net_cur, (data_shape[0], data_shape[1], data_shape[2]),
print_per_layer_stat=False, as_strings=False, verbose=False)[0]
latency_types = [] if measure_latency is None else measure_latency.split('#')
for l_type in latency_types:
if l_type == 'flops':
continue # already calculated above
if lut is not None and l_type in lut:
latency_estimator = LatencyEstimator(lut[l_type])
latency = look_up_latency(net_cur, latency_estimator, data_shape[2])
measured_latency = None
else:
latency, measured_latency = measure_net_latency(
net_cur, l_type, fast=False, input_shape=data_shape, clean=clean)
net_info['%s latency' % l_type] = {'val': latency, 'hist': measured_latency}
if print_info:
print('Total training params: %.2fM' % (net_info['params'] / 1e6))
print('Total FLOPs: %.2fM' % (net_info['flops'] / 1e6))
for l_type in latency_types:
print('Estimated %s latency: %.3fms' % (l_type, net_info['%s latency' % l_type]['val']))
gpu_latency, cpu_latency = None, None
for k in net_info.keys():
if 'gpu' in k:
gpu_latency = np.round(net_info[k]['val'], 2)
if 'cpu' in k:
cpu_latency = np.round(net_info[k]['val'], 2)
return {'params': np.round(net_info['params'] / 1e6, 2),
'flops': np.round(net_info['flops'] / 1e6, 2),
'gpu': gpu_latency, 'cpu': cpu_latency}
if not isinstance(net, list): # if not an ensemble, just calculate it
return inner(net, data_shape)
# if an ensemble, need to sum properly
data_shapes = [(data_shape[0], s1, s2) for s1, s2 in zip(data_shape[1], data_shape[2])]
results = [inner(net_cur, d_s) for net_cur, d_s in zip(net, data_shapes)]
res_final = {} # sum everything, keep None as None
for k, v in results[0].items():
if not if_dont_sum:
res_final[k] = v
for res_i in results[1:]:
if v is None:
continue
res_final[k] += res_i[k]
else:
res_final[k] = [v]
for res_i in results[1:]:
if v is None:
continue
res_final[k] += [res_i[k]]
return res_final
class SupernetworkWrapper:
def __init__(self,
n_classes=1000,
model_path='./data/ofa_mbv3_d234_e346_k357_w1.0',
engine_class_to_use=OFAMobileNetV3My, **kwargs):
from nat import NAT
self.dataset_name = kwargs['dataset']
self.search_space_name = kwargs['search_space_name']
engine_lambda = NAT.make_lambda_for_engine_creation(engine_class_to_use, n_classes, False,
self.dataset_name, self.search_space_name)
self.engine, _ = engine_lambda(model_path, None, to_cuda=False, if_create_optimizer=False)
def sample(self, config):
if self.search_space_name == 'ofa':
config = validate_config(config)
self.engine.set_active_subnet(ks=config['ks'], e=config['e'], d=config['d'], w=config['w'])
subnet = self.engine.get_active_subnet(preserve_weight=True)
return subnet, config
| 23,267 | 34.577982 | 120 | py |
ENCAS | ENCAS-main/nat_run_many.py | import argparse
import glob
import os
from concurrent.futures.process import ProcessPoolExecutor
from pathlib import Path
import datetime
import torch
from matplotlib import pyplot as plt
import utils
from nat import default_kwargs, main
import yaml
from shutil import copy
import traceback
from concurrent.futures import ThreadPoolExecutor
import cv2
from plot_results.plotting_functions import compare_val_and_test
from after_search.average_weights import swa_for_whole_experiment
from after_search.evaluate_stored_outputs import evaluate_stored_whole_experiment
from after_search.store_outputs import store_cumulative_pareto_front_outputs
from after_search.symlink_imagenet import create_symlinks
cv2.setNumThreads(0)
def create_all_run_kwargs(config_path):
config_loaded = yaml.safe_load(open(config_path))
experiment_name = config_loaded['experiment_name']
print(experiment_name)
experiment_kwargs = {k: v[0] for k, v in default_kwargs.items()} # don't need help string
experiment_kwargs = dict(experiment_kwargs, **config_loaded)
path_logs = experiment_kwargs['path_logs'] # '/export/scratch3/aleksand/nsganetv2/'
Path(path_logs).mkdir(exist_ok=True)
path = os.path.join(path_logs, experiment_name)
experiment_kwargs['experiment_name'] = experiment_name
Path(path).mkdir(exist_ok=True)
copy(config_path, path)
algo_mods_all = config_loaded['algo_mods_all']
transform_str = lambda x: x if type(x) != str else x.replace("/", "_").replace(".", "_")
algo_mods_names = [
'!'.join(f'{k}:{transform_str(v)}' for k, v in algo_mods.items())
for algo_mods in algo_mods_all
]
algo_kwargs_all = [dict(experiment_kwargs, **algo_mods) for algo_mods in algo_mods_all]
seed_offset = experiment_kwargs.get('seed_offset', 0) # wanna run the 10 runs on different machines
cur_seed = experiment_kwargs['random_seed'] + seed_offset
algo_run_kwargs_all = []
for i_algo, algo_kwargs in enumerate(algo_kwargs_all):
path_algo = os.path.join(path, algo_mods_names[i_algo])
Path(path_algo).mkdir(exist_ok=True)
n_runs = algo_kwargs['n_runs']
for run in range(n_runs):
algo_run_kwargs = algo_kwargs.copy() # because NAT pops the values, which breaks all the runs after the first
path_algo_run = os.path.join(path_algo, f'{run + seed_offset}')
algo_run_kwargs['experiment_name'] = path_algo_run
algo_run_kwargs['random_seed'] = cur_seed
algo_run_kwargs_all.append(algo_run_kwargs)
cur_seed += 1
return experiment_kwargs, algo_run_kwargs_all
def create_config_for_continuation(run_path, target_max_iter):
config_path = os.path.join(run_path, 'config_msunas.yml')
config = yaml.safe_load(open(config_path, 'r'))
exp_name = config['experiment_name']
n_iters = len(glob.glob(os.path.join(run_path, "iter_*.stats")))
if n_iters > 0:
last_iter = n_iters - 1
if last_iter == target_max_iter:
return None
config['resume'] = os.path.join(exp_name, f'iter_{last_iter}.stats')
supernet_paths = config['supernet_path']
supernet_paths_new = []
for p in supernet_paths:
name = os.path.basename(p)
supernet_paths_new.append(os.path.join(exp_name, f'iter_{last_iter}', name))
config['supernet_path'] = supernet_paths_new
config_path_new = os.path.join(run_path, 'config_msunas_cont.yml')
yaml.dump(config, open(config_path_new, 'w'))
return config_path_new
def create_all_run_continue_kwargs(config_path):
config_loaded = yaml.safe_load(open(config_path))
experiment_name = config_loaded['experiment_name']
print(experiment_name)
experiment_kwargs = {k: v[0] for k, v in default_kwargs.items()} # don't need help string
experiment_kwargs = dict(experiment_kwargs, **config_loaded)
path_logs = experiment_kwargs['path_logs'] # '/export/scratch3/aleksand/nsganetv2/'
experiment_path = os.path.join(path_logs, experiment_name)
experiment_kwargs['experiment_name'] = experiment_name
algo_run_kwargs_all = []
for f in sorted(os.scandir(experiment_path), key=lambda e: e.name):
if not f.is_dir():
continue
name_cur = f.name
for run_folder in sorted(os.scandir(f.path), key=lambda e: e.name):
if not run_folder.is_dir():
continue
run_idx = run_folder.name
run_path = os.path.join(experiment_path, name_cur, run_folder.name)
config_path_cur = create_config_for_continuation(run_path, experiment_kwargs['iterations'])
if config_path_cur is not None:
algo_run_kwargs_all.append(yaml.safe_load(open(config_path_cur, 'r')))
return experiment_kwargs, algo_run_kwargs_all
def execute_run(algo_run_kwargs):
try:
main(algo_run_kwargs)
except Exception as e:
print(traceback.format_exc())
print(e)
def init_worker(zeroeth_gpu):
os.environ["CUDA_VISIBLE_DEVICES"] = f'{zeroeth_gpu}'
print('cuda = ', os.environ["CUDA_VISIBLE_DEVICES"])
def run_kwargs_many(experiment_kwargs, algo_run_kwargs_all):
zeroeth_gpu = experiment_kwargs['zeroeth_gpu']
executor_class = ProcessPoolExecutor
if experiment_kwargs['if_debug_run']:
executor_class = ThreadPoolExecutor # it's easier to debug with threads
with executor_class(max_workers=experiment_kwargs['n_gpus'], initializer=init_worker,
initargs=(zeroeth_gpu,)) as executor:
print(algo_run_kwargs_all)
futures = [executor.submit(execute_run, kwargs) for kwargs in algo_run_kwargs_all]
for f in futures:
f.result() # wait on everything
print(datetime.datetime.now())
def store(store_kwargs):
print(f'{store_kwargs=}')
store_cumulative_pareto_front_outputs(store_kwargs['exp_name'], store_kwargs['dataset_type'],
max_iter=store_kwargs['max_iter'], swa=store_kwargs['swa'],
target_runs=store_kwargs['target_runs'])
if __name__ == '__main__':
torch.multiprocessing.set_start_method('spawn')
p = argparse.ArgumentParser()
p.add_argument(f'--config', default='configs_nat/cifar100_q0_ofa10_sep_DEBUG.yml', type=str)
p.add_argument(f'--continue', default=False, action='store_true')
cfgs = vars(p.parse_args())
config_path = cfgs['config']
# 1. run NAT
if not cfgs['continue']:
experiment_kwargs, algo_run_kwargs_all = create_all_run_kwargs(config_path)
else:
experiment_kwargs, algo_run_kwargs_all = create_all_run_continue_kwargs(config_path)
run_kwargs_many(experiment_kwargs, algo_run_kwargs_all)
# 2 (optional). do SWA, store subnetwork outputs, compare validation & test
plt.rcParams.update({'font.size': 14})
plt.rcParams['axes.grid'] = True
exp_name = experiment_kwargs['experiment_name']
max_iter = experiment_kwargs['iterations']
if_store = experiment_kwargs['if_store']
dataset = experiment_kwargs['dataset']
os.environ["CUDA_VISIBLE_DEVICES"] = f'{experiment_kwargs["zeroeth_gpu"]}'
swa = experiment_kwargs.get('post_swa', None)
if len(algo_run_kwargs_all) > 0: # == 0 can occur with 'continue'
target_runs = [int(x['experiment_name'][-1]) for x in algo_run_kwargs_all]
else:
target_runs = list(range(experiment_kwargs['seed_offset'],
experiment_kwargs['seed_offset'] + experiment_kwargs['n_runs']))
if dataset == 'imagenet':
# for imagenet weights are not trained => stored only once, but my code needs a supernet-per-metaiteration
# => symlink
utils.execute_func_for_all_runs_and_combine(exp_name, create_symlinks, target_runs=target_runs)
if swa is not None:
for supernet in experiment_kwargs['supernet_path']:
swa_for_whole_experiment(exp_name, range(max_iter + 1 - swa, max_iter + 1),
os.path.basename(supernet), target_runs=target_runs)
if not if_store:
compare_val_and_test(exp_name, f'test_swa{swa}', swa=swa, max_iter=max_iter, target_runs=target_runs)
if if_store:
zeroeth_gpu = experiment_kwargs['zeroeth_gpu']
kwargs_for_store = [
dict(exp_name=exp_name, dataset_type='val', max_iter=max_iter, swa=swa, target_runs=target_runs),
dict(exp_name=exp_name, dataset_type='test', max_iter=max_iter, swa=swa, target_runs=target_runs)
]
n_workers = 1
with ProcessPoolExecutor(max_workers=n_workers, initializer=init_worker,
initargs=(zeroeth_gpu,)) as executor:
futures = [executor.submit(store, kwargs) for kwargs in kwargs_for_store]
for f in futures:
f.result() # wait on everything
test_name = 'test' if swa is None else f'test_swa{swa}'
dataset_to_label_path = {'cifar100': 'labels_cifar100_test.npy', 'cifar10': 'labels_cifar10_test.npy',
'imagenet': 'labels_imagenet_test.npy'}
evaluate_stored_whole_experiment(exp_name, test_name, dataset_to_label_path[dataset],
max_iter=max_iter, target_runs=target_runs)
compare_val_and_test(exp_name, test_name, max_iter=max_iter, target_runs=target_runs)
else:
if swa is None:
compare_val_and_test(exp_name, f'test', max_iter=max_iter, target_runs=target_runs)
| 9,599 | 41.105263 | 122 | py |
ENCAS | ENCAS-main/dynamic_resolution_collator.py | import random
import copy
import ctypes
import torch
import multiprocessing as mp
import numpy as np
from torchvision import transforms
from utils import onehot, rand_bbox, show_im_from_torch_tensor
class DynamicResolutionCollator:
def __init__(self, n_resolutions_max, if_return_target_idx=True, if_cutmix=False, cutmix_kwargs=None):
self.resolutions = mp.Array(ctypes.c_int, n_resolutions_max)
self.n_resolutions_to_use = n_resolutions_max
self.n_resolutions_max = n_resolutions_max
self.resize_dict = {}
self.if_return_target_idx = if_return_target_idx
self.if_cutmix = if_cutmix
self.prev_batch_for_cutmix = None
self.cutmix_kwargs = cutmix_kwargs
def set_info_for_transforms(self, resize_class_lambda, transforms_after_resize, transforms_pre_resize=[]):
# this MUST be called before the dataloaders are actually used!
# I would've put it in __init__, but I need to create collators before creating the dataprovider,
# and these values are created only during creation of the dataprovider
self.resize_class_lambda = resize_class_lambda
self.transforms_after_resize = transforms_after_resize
self.transforms_pre_resize = transforms_pre_resize
def set_resolutions(self, resolutions):
self.n_resolutions_to_use = len(resolutions)
if self.n_resolutions_to_use > self.n_resolutions_max:
raise ValueError('self.n_resolutions_to_use > self.n_resolutions_max')
for i in range(self.n_resolutions_to_use):
cur_res = resolutions[i]
self.resolutions[i] = cur_res
def __call__(self, batch):
# don't need sync 'cause don't need to change the array of resolutions
target_idx = np.random.choice(self.n_resolutions_to_use)
target_res = self.resolutions[target_idx]
if target_res not in self.resize_dict:
self.resize_dict[target_res] = self.resize_class_lambda(target_res)
cur_resize_op = self.resize_dict[target_res]
transforms_composed = transforms.Compose(self.transforms_pre_resize + [cur_resize_op] + self.transforms_after_resize)
imgs = [transforms_composed(img_n_label[0]) for img_n_label in batch]
label = [img_n_label[1] for img_n_label in batch]
if self.if_cutmix:
cur_batch_before_cutmix = list(zip(copy.deepcopy(imgs), copy.deepcopy(label)))
if self.prev_batch_for_cutmix is None: #this is the first batch
self.prev_batch_for_cutmix = cur_batch_before_cutmix
def cutmix(img, lbl):
args = self.cutmix_kwargs
lbl_onehot = onehot(args['n_classes'], lbl)
if np.random.rand(1) > args['prob']:
return img, lbl_onehot
rand_index = random.choice(range(len(self.prev_batch_for_cutmix)))
img2, lbl2 = self.prev_batch_for_cutmix[rand_index]
lbl2_onehot = onehot(args['n_classes'], lbl2)
lam = np.random.beta(args['beta'], args['beta'])
W, H = img.shape[-2:]
W2, H2 = img2.shape[-2:]
# my batches have different spatial sizes - that's the whole point of this collator!
W, H = min(W, W2), min(H, H2)
bbx1, bby1, bbx2, bby2 = rand_bbox(W, H, lam)
img[:, bbx1:bbx2, bby1:bby2] = img2[:, bbx1:bbx2, bby1:bby2]
# adjust lambda to exactly match pixel ratio
lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) / (W * H))
lbl_onehot = lbl_onehot * lam + lbl2_onehot * (1. - lam)
return img, lbl_onehot
img_n_label_cutmix = [cutmix(im, lbl) for im, lbl in zip(imgs, label)]
imgs = [img_n_label[0] for img_n_label in img_n_label_cutmix]
label = [img_n_label[1] for img_n_label in img_n_label_cutmix]
self.prev_batch_for_cutmix = cur_batch_before_cutmix
imgs = torch.stack(imgs)
if type(label[0]) is int:
label = torch.LongTensor(label)
else:
label = torch.stack(label)
to_return = (imgs, label)
if self.if_return_target_idx:
to_return += (target_idx,)
return to_return | 4,319 | 43.081633 | 125 | py |
ENCAS | ENCAS-main/utils_train.py | import random
import numpy as np
import torch
from torch.nn.modules.module import Module
# implementation of CutMixCrossEntropyLoss taken from https://github.com/ildoonet/cutmix
class CutMixCrossEntropyLoss(Module):
def __init__(self, size_average=True):
super().__init__()
self.size_average = size_average
def forward(self, input, target):
if len(target.size()) == 1:
target = torch.nn.functional.one_hot(target, num_classes=input.size(-1))
target = target.float().cuda()
return cross_entropy(input, target, self.size_average)
def cross_entropy(input, target, size_average=True):
""" Cross entropy that accepts soft targets
Args:
pred: predictions for neural network
targets: targets, can be soft
size_average: if false, sum is returned instead of mean
Examples::
input = torch.FloatTensor([[1.1, 2.8, 1.3], [1.1, 2.1, 4.8]])
input = torch.autograd.Variable(out, requires_grad=True)
target = torch.FloatTensor([[0.05, 0.9, 0.05], [0.05, 0.05, 0.9]])
target = torch.autograd.Variable(y1)
loss = cross_entropy(input, target)
loss.backward()
"""
logsoftmax = torch.nn.LogSoftmax(dim=1)
if size_average:
return torch.mean(torch.sum(-target * logsoftmax(input), dim=1))
else:
return torch.sum(torch.sum(-target * logsoftmax(input), dim=1))
def init_dataloader_worker_state(worker_id):
seed = (torch.initial_seed() + worker_id) % 2 ** 32
print(f'Init dataloader seed: {seed}')
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
random.seed(seed)
return np.random.seed(seed)
def create_resize_class_lambda_train(resize_transform_class, image_size, **kwargs): #pickle can't handle lambdas
return resize_transform_class((image_size, image_size), **kwargs)
class Cutout(object):
def __init__(self, length):
self.length = length
def __call__(self, img):
if self.length == 0:
return img
h, w = img.size(1), img.size(2)
mask = np.ones((h, w), np.float32)
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
mask[y1: y2, x1: x2] = 0.
mask = torch.from_numpy(mask)
mask = mask.expand_as(img)
img *= mask
return img
class LabelSmoothing(torch.nn.Module):
"""NLL loss with label smoothing.
"""
def __init__(self, smoothing=0.0):
"""Constructor for the LabelSmoothing module.
:param smoothing: label smoothing factor
"""
super(LabelSmoothing, self).__init__()
self.confidence = 1.0 - smoothing
self.smoothing = smoothing
def forward(self, x, target):
logprobs = torch.nn.functional.log_softmax(x, dim=-1)
nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1))
nll_loss = nll_loss.squeeze(1)
smooth_loss = -logprobs.mean(dim=-1)
loss = self.confidence * nll_loss + self.smoothing * smooth_loss
return loss.mean()
def albumentation_to_torch_transform(albumentation_f, image):
return torch.tensor(albumentation_f(image=image.permute(1, 2, 0).numpy())['image']).permute(2, 0, 1) | 3,420 | 32.213592 | 112 | py |
ENCAS | ENCAS-main/networks/attentive_nas_dynamic_model.py | # taken from https://github.com/facebookresearch/AttentiveNAS
# Difference: images not resized in forward, but beforehand, in the collator (which is faster)
import copy
import random
import collections
import math
import torch
import torch.nn as nn
from torch.utils.checkpoint import checkpoint_sequential, checkpoint
from .modules_alphanet.dynamic_layers import DynamicMBConvLayer, DynamicConvBnActLayer, DynamicLinearLayer, DynamicShortcutLayer
from .modules_alphanet.static_layers import MobileInvertedResidualBlock
from .modules_alphanet.nn_utils import make_divisible, int2list
from .modules_alphanet.nn_base import MyNetwork
from .attentive_nas_static_model import AttentiveNasStaticModel
from utils import alphanet_config_str, RecursiveNamespace
import yaml
class AttentiveNasDynamicModel(MyNetwork):
def __init__(self, n_classes=1000, bn_param=(0., 1e-5), if_use_gradient_checkpointing=False, n_image_channels=3, **kwargs):
super(AttentiveNasDynamicModel, self).__init__()
self.supernet = RecursiveNamespace(**yaml.safe_load(alphanet_config_str)) # a config of the supernet space
self.n_classes = n_classes
self.use_v3_head = getattr(self.supernet, 'use_v3_head', False)
self.stage_names = ['first_conv', 'mb1', 'mb2', 'mb3', 'mb4', 'mb5', 'mb6', 'mb7', 'last_conv']
self.n_image_channels = n_image_channels
self.width_list, self.depth_list, self.ks_list, self.expand_ratio_list = [], [], [], []
for name in self.stage_names:
block_cfg = getattr(self.supernet, name)
self.width_list.append(block_cfg.c)
if name.startswith('mb'):
self.depth_list.append(block_cfg.d)
self.ks_list.append(block_cfg.k)
self.expand_ratio_list.append(block_cfg.t)
self.resolution_list = self.supernet.resolutions
self.cfg_candidates = {
'resolution': self.resolution_list ,
'width': self.width_list,
'depth': self.depth_list,
'kernel_size': self.ks_list,
'expand_ratio': self.expand_ratio_list
}
#first conv layer, including conv, bn, act
out_channel_list, act_func, stride = \
self.supernet.first_conv.c, self.supernet.first_conv.act_func, self.supernet.first_conv.s
self.first_conv = DynamicConvBnActLayer(
in_channel_list=int2list(self.n_image_channels), out_channel_list=out_channel_list,
kernel_size=3, stride=stride, act_func=act_func,
)
# inverted residual blocks
self.block_group_info = []
blocks = []
_block_index = 0
feature_dim = out_channel_list
for stage_id, key in enumerate(self.stage_names[1:-1]):
block_cfg = getattr(self.supernet, key)
width = block_cfg.c
n_block = max(block_cfg.d)
act_func = block_cfg.act_func
ks = block_cfg.k
expand_ratio_list = block_cfg.t
use_se = block_cfg.se
self.block_group_info.append([_block_index + i for i in range(n_block)])
_block_index += n_block
output_channel = width
for i in range(n_block):
stride = block_cfg.s if i == 0 else 1
if min(expand_ratio_list) >= 4:
expand_ratio_list = [_s for _s in expand_ratio_list if _s >= 4] if i == 0 else expand_ratio_list
mobile_inverted_conv = DynamicMBConvLayer(
in_channel_list=feature_dim,
out_channel_list=output_channel,
kernel_size_list=ks,
expand_ratio_list=expand_ratio_list,
stride=stride,
act_func=act_func,
use_se=use_se,
channels_per_group=getattr(self.supernet, 'channels_per_group', 1)
)
shortcut = DynamicShortcutLayer(feature_dim, output_channel, reduction=stride)
blocks.append(MobileInvertedResidualBlock(mobile_inverted_conv, shortcut))
feature_dim = output_channel
self.blocks = nn.ModuleList(blocks)
last_channel, act_func = self.supernet.last_conv.c, self.supernet.last_conv.act_func
if not self.use_v3_head:
self.last_conv = DynamicConvBnActLayer(
in_channel_list=feature_dim, out_channel_list=last_channel,
kernel_size=1, act_func=act_func,
)
else:
expand_feature_dim = [f_dim * 6 for f_dim in feature_dim]
self.last_conv = nn.Sequential(collections.OrderedDict([
('final_expand_layer', DynamicConvBnActLayer(
feature_dim, expand_feature_dim, kernel_size=1, use_bn=True, act_func=act_func)
),
('pool', nn.AdaptiveAvgPool2d((1,1))),
('feature_mix_layer', DynamicConvBnActLayer(
in_channel_list=expand_feature_dim, out_channel_list=last_channel,
kernel_size=1, act_func=act_func, use_bn=False,)
),
]))
#final conv layer
self.classifier = DynamicLinearLayer(
in_features_list=last_channel, out_features=n_classes, bias=True
)
# set bn param
self.set_bn_param(momentum=bn_param[0], eps=bn_param[1])
# runtime_depth
self.runtime_depth = [len(block_idx) for block_idx in self.block_group_info]
self.zero_residual_block_bn_weights()
self.active_dropout_rate = 0
self.active_drop_connect_rate = 0
# self.active_resolution = 224
self.width_mult = [None] # for compatibility with Ofa supernet
self.if_use_gradient_checkpointing = if_use_gradient_checkpointing#True# if_use_gradient_checkpointing
""" set, sample and get active sub-networks """ #
def set_active_subnet(self, ks=None, e=None, d=None, w=None, **kwargs):
width, depth, kernel_size, expand_ratio = w, d, ks, e
assert len(depth) == len(kernel_size) == len(expand_ratio) == len(width) - 2
# set resolution
# self.active_resolution = resolution
# first conv
self.first_conv.active_out_channel = width[0]
for stage_id, (c, k, e, d) in enumerate(zip(width[1:-1], kernel_size, expand_ratio, depth)):
start_idx, end_idx = min(self.block_group_info[stage_id]), max(self.block_group_info[stage_id])
for block_id in range(start_idx, start_idx + d):
block = self.blocks[block_id]
# block output channels
block.mobile_inverted_conv.active_out_channel = c
if block.shortcut is not None:
block.shortcut.active_out_channel = c
# dw kernel size
block.mobile_inverted_conv.active_kernel_size = k
# dw expansion ration
block.mobile_inverted_conv.active_expand_ratio = e
# IRBlocks repated times
for i, d in enumerate(depth):
self.runtime_depth[i] = min(len(self.block_group_info[i]), d)
# last conv
if not self.use_v3_head:
self.last_conv.active_out_channel = width[-1]
else:
# default expansion ratio: 6
self.last_conv.final_expand_layer.active_out_channel = width[-2] * 6
self.last_conv.feature_mix_layer.active_out_channel = width[-1]
def get_active_subnet(self, preserve_weight=True):
with torch.no_grad():
first_conv = self.first_conv.get_active_subnet(3, preserve_weight)
blocks = []
input_channel = first_conv.out_channels
# blocks
for stage_id, block_idx in enumerate(self.block_group_info):
depth = self.runtime_depth[stage_id]
active_idx = block_idx[:depth]
stage_blocks = []
for idx in active_idx:
stage_blocks.append(MobileInvertedResidualBlock(
self.blocks[idx].mobile_inverted_conv.get_active_subnet(input_channel, preserve_weight),
self.blocks[idx].shortcut.get_active_subnet(input_channel, preserve_weight) if self.blocks[
idx].shortcut is not None else None
))
input_channel = stage_blocks[-1].mobile_inverted_conv.out_channels
blocks += stage_blocks
if not self.use_v3_head:
last_conv = self.last_conv.get_active_subnet(input_channel, preserve_weight)
in_features = last_conv.out_channels
else:
final_expand_layer = self.last_conv.final_expand_layer.get_active_subnet(input_channel, preserve_weight)
feature_mix_layer = self.last_conv.feature_mix_layer.get_active_subnet(input_channel * 6,
preserve_weight)
in_features = feature_mix_layer.out_channels
last_conv = nn.Sequential(
final_expand_layer,
nn.AdaptiveAvgPool2d((1, 1)),
feature_mix_layer
)
classifier = self.classifier.get_active_subnet(in_features, preserve_weight)
_subnet = AttentiveNasStaticModel(
first_conv, blocks, last_conv, classifier, use_v3_head=self.use_v3_head
)
_subnet.set_bn_param(**self.get_bn_param())
return _subnet
def zero_residual_block_bn_weights(self):
with torch.no_grad():
for m in self.modules():
if isinstance(m, MobileInvertedResidualBlock):
if isinstance(m.mobile_inverted_conv, DynamicMBConvLayer) and m.shortcut is not None:
m.mobile_inverted_conv.point_linear.bn.bn.weight.zero_()
@staticmethod
def name():
return 'AttentiveNasModel'
def forward(self, x):
if not (self.if_use_gradient_checkpointing and self.training):
# first conv
x = self.first_conv(x)
# blocks
for stage_id, block_idx in enumerate(self.block_group_info):
depth = self.runtime_depth[stage_id]
active_idx = block_idx[:depth]
for idx in active_idx:
x = self.blocks[idx](x)
else:
x = self.first_conv(x)
blocks_to_run = []
for stage_id, block_idx in enumerate(self.block_group_info):
depth = self.runtime_depth[stage_id]
active_idx = block_idx[:depth]
for idx in active_idx:
blocks_to_run.append(self.blocks[idx])
x = checkpoint_sequential(blocks_to_run, 7, x) # before clean-up it used to be 6
x = self.last_conv(x)
x = x.mean(3, keepdim=True).mean(2, keepdim=True) # global average pooling
x = torch.squeeze(x)
if self.active_dropout_rate > 0 and self.training:
x = torch.nn.functional.dropout(x, p = self.active_dropout_rate)
x = self.classifier(x)
return x
@property
def module_str(self):
_str = self.first_conv.module_str + '\n'
_str += self.blocks[0].module_str + '\n'
for stage_id, block_idx in enumerate(self.block_group_info):
depth = self.runtime_depth[stage_id]
active_idx = block_idx[:depth]
for idx in active_idx:
_str += self.blocks[idx].module_str + '\n'
if not self.use_v3_head:
_str += self.last_conv.module_str + '\n'
else:
_str += self.last_conv.final_expand_layer.module_str + '\n'
_str += self.last_conv.feature_mix_layer.module_str + '\n'
_str += self.classifier.module_str + '\n'
return _str
@property
def config(self):
return {
'name': AttentiveNasDynamicModel.__name__,
'bn': self.get_bn_param(),
'first_conv': self.first_conv.config,
'blocks': [
block.config for block in self.blocks
],
'last_conv': self.last_conv.config if not self.use_v3_head else None,
'final_expand_layer': self.last_conv.final_expand_layer if self.use_v3_head else None,
'feature_mix_layer': self.last_conv.feature_mix_layer if self.use_v3_head else None,
'classifier': self.classifier.config,
# 'resolution': self.active_resolution
}
@staticmethod
def build_from_config(config):
raise ValueError('do not support this function')
def get_active_subnet_settings(self):
# r = self.active_resolution
width, depth, kernel_size, expand_ratio= [], [], [], []
#first conv
width.append(self.first_conv.active_out_channel)
for stage_id in range(len(self.block_group_info)):
start_idx = min(self.block_group_info[stage_id])
block = self.blocks[start_idx] #first block
width.append(block.mobile_inverted_conv.active_out_channel)
kernel_size.append(block.mobile_inverted_conv.active_kernel_size)
expand_ratio.append(block.mobile_inverted_conv.active_expand_ratio)
depth.append(self.runtime_depth[stage_id])
if not self.use_v3_head:
width.append(self.last_conv.active_out_channel)
else:
width.append(self.last_conv.feature_mix_layer.active_out_channel)
return {
# 'resolution': r,
'width': width,
'kernel_size': kernel_size,
'expand_ratio': expand_ratio,
'depth': depth,
}
def set_dropout_rate(self, dropout=0, drop_connect=0, drop_connect_only_last_two_stages=True):
self.active_dropout_rate = dropout
for idx, block in enumerate(self.blocks):
if drop_connect_only_last_two_stages:
if idx not in self.block_group_info[-1] + self.block_group_info[-2]:
continue
this_drop_connect_rate = drop_connect * float(idx) / len(self.blocks)
block.drop_connect_rate = this_drop_connect_rate
def sample_min_subnet(self):
return self._sample_active_subnet(min_net=True)
def sample_max_subnet(self):
return self._sample_active_subnet(max_net=True)
def sample_active_subnet(self, compute_flops=False):
cfg = self._sample_active_subnet(
False, False
)
if compute_flops:
cfg['flops'] = self.compute_active_subnet_flops()
return cfg
def sample_active_subnet_within_range(self, targeted_min_flops, targeted_max_flops):
while True:
cfg = self._sample_active_subnet()
cfg['flops'] = self.compute_active_subnet_flops()
if cfg['flops'] >= targeted_min_flops and cfg['flops'] <= targeted_max_flops:
return cfg
def _sample_active_subnet(self, min_net=False, max_net=False):
sample_cfg = lambda candidates, sample_min, sample_max: \
min(candidates) if sample_min else (max(candidates) if sample_max else random.choice(candidates))
cfg = {}
# sample a resolution
cfg['resolution'] = sample_cfg(self.cfg_candidates['resolution'], min_net, max_net)
for k in ['width', 'depth', 'kernel_size', 'expand_ratio']:
cfg[k] = []
for vv in self.cfg_candidates[k]:
cfg[k].append(sample_cfg(int2list(vv), min_net, max_net))
self.set_active_subnet(
cfg['resolution'], cfg['width'], cfg['depth'], cfg['kernel_size'], cfg['expand_ratio']
)
return cfg
def mutate_and_reset(self, cfg, prob=0.1, keep_resolution=False):
cfg = copy.deepcopy(cfg)
pick_another = lambda x, candidates: x if len(candidates) == 1 else random.choice([v for v in candidates if v != x])
# sample a resolution
r = random.random()
if r < prob and not keep_resolution:
cfg['resolution'] = pick_another(cfg['resolution'], self.cfg_candidates['resolution'])
# sample channels, depth, kernel_size, expand_ratio
for k in ['width', 'depth', 'kernel_size', 'expand_ratio']:
for _i, _v in enumerate(cfg[k]):
r = random.random()
if r < prob:
cfg[k][_i] = pick_another(cfg[k][_i], int2list(self.cfg_candidates[k][_i]))
self.set_active_subnet(
cfg['resolution'], cfg['width'], cfg['depth'], cfg['kernel_size'], cfg['expand_ratio']
)
return cfg
def crossover_and_reset(self, cfg1, cfg2, p=0.5):
def _cross_helper(g1, g2, prob):
assert type(g1) == type(g2)
if isinstance(g1, int):
return g1 if random.random() < prob else g2
elif isinstance(g1, list):
return [v1 if random.random() < prob else v2 for v1, v2 in zip(g1, g2)]
else:
raise NotImplementedError
cfg = {}
cfg['resolution'] = cfg1['resolution'] if random.random() < p else cfg2['resolution']
for k in ['width', 'depth', 'kernel_size', 'expand_ratio']:
cfg[k] = _cross_helper(cfg1[k], cfg2[k], p)
self.set_active_subnet(
cfg['resolution'], cfg['width'], cfg['depth'], cfg['kernel_size'], cfg['expand_ratio']
)
return cfg
def get_active_net_config(self):
raise NotImplementedError
def compute_active_subnet_flops(self):
def count_conv(c_in, c_out, size_out, groups, k):
kernel_ops = k**2
output_elements = c_out * size_out**2
ops = c_in * output_elements * kernel_ops / groups
return ops
def count_linear(c_in, c_out):
return c_in * c_out
total_ops = 0
c_in = 3
# size_out = self.active_resolution // self.first_conv.stride
c_out = self.first_conv.active_out_channel
total_ops += count_conv(c_in, c_out, size_out, 1, 3)
c_in = c_out
# mb blocks
for stage_id, block_idx in enumerate(self.block_group_info):
depth = self.runtime_depth[stage_id]
active_idx = block_idx[:depth]
for idx in active_idx:
block = self.blocks[idx]
c_middle = make_divisible(round(c_in * block.mobile_inverted_conv.active_expand_ratio), 8)
# 1*1 conv
if block.mobile_inverted_conv.inverted_bottleneck is not None:
total_ops += count_conv(c_in, c_middle, size_out, 1, 1)
# dw conv
stride = 1 if idx > active_idx[0] else block.mobile_inverted_conv.stride
if size_out % stride == 0:
size_out = size_out // stride
else:
size_out = (size_out +1) // stride
total_ops += count_conv(c_middle, c_middle, size_out, c_middle, block.mobile_inverted_conv.active_kernel_size)
# 1*1 conv
c_out = block.mobile_inverted_conv.active_out_channel
total_ops += count_conv(c_middle, c_out, size_out, 1, 1)
#se
if block.mobile_inverted_conv.use_se:
num_mid = make_divisible(c_middle // block.mobile_inverted_conv.depth_conv.se.reduction, divisor=8)
total_ops += count_conv(c_middle, num_mid, 1, 1, 1) * 2
if block.shortcut and c_in != c_out:
total_ops += count_conv(c_in, c_out, size_out, 1, 1)
c_in = c_out
if not self.use_v3_head:
c_out = self.last_conv.active_out_channel
total_ops += count_conv(c_in, c_out, size_out, 1, 1)
else:
c_expand = self.last_conv.final_expand_layer.active_out_channel
c_out = self.last_conv.feature_mix_layer.active_out_channel
total_ops += count_conv(c_in, c_expand, size_out, 1, 1)
total_ops += count_conv(c_expand, c_out, 1, 1, 1)
# n_classes
total_ops += count_linear(c_out, self.n_classes)
return total_ops / 1e6
def load_weights_from_pretrained_models(self, checkpoint_path):
with open(checkpoint_path, 'rb') as f:
checkpoint = torch.load(f, map_location='cpu')
assert isinstance(checkpoint, dict)
pretrained_state_dicts = checkpoint['state_dict']
for k, v in self.state_dict().items():
name = 'module.' + k if not k.startswith('module') else k
v.copy_(pretrained_state_dicts[name])
| 20,925 | 41.189516 | 142 | py |
ENCAS | ENCAS-main/networks/ofa_mbv3_my.py | import copy
import torch
from ofa.imagenet_classification.elastic_nn.modules import DynamicMBConvLayer, DynamicConvLayer, DynamicLinearLayer
from ofa.imagenet_classification.elastic_nn.networks import OFAMobileNetV3
from ofa.imagenet_classification.networks import MobileNetV3
from ofa.utils import val2list, make_divisible, MyNetwork
from ofa.utils.layers import ConvLayer, MBConvLayer, ResidualBlock, IdentityLayer, LinearLayer, My2DLayer
from torch.utils.checkpoint import checkpoint_sequential
class OFAMobileNetV3My(OFAMobileNetV3):
def __init__(self, n_classes=1000, bn_param=(0.1, 1e-5), dropout_rate=0.1, base_stage_width=None, width_mult=1.0,
ks_list=3, expand_ratio_list=6, depth_list=4, if_use_gradient_checkpointing=False,
class_for_subnet=MobileNetV3, n_image_channels=3):
'''
differences to init of super:
1) several widths in each block instead of one => unneccessary, since NAT turned out to use 2 separate supernets
2) arbitrary n_image_channels => not used on cifars or imagenet
3) specify class_for_subnet => not used in classification
'''
self.width_mult = val2list(width_mult)
# self.width_mults = [1.0, 1.2]
self.ks_list = val2list(ks_list, 1)
self.expand_ratio_list = val2list(expand_ratio_list, 1)
self.depth_list = val2list(depth_list, 1)
self.n_image_channels = n_image_channels
self.ks_list.sort()
self.expand_ratio_list.sort()
self.depth_list.sort()
base_stage_width = [16, 16, 24, 40, 80, 112, 160, 960, 1280]
final_expand_width = [make_divisible(base_stage_width[-2] * w, MyNetwork.CHANNEL_DIVISIBLE) for w in
self.width_mult]
last_channel = [make_divisible(base_stage_width[-1] * w, MyNetwork.CHANNEL_DIVISIBLE) for w in self.width_mult]
stride_stages = [1, 2, 2, 2, 1, 2]
act_stages = ['relu', 'relu', 'relu', 'h_swish', 'h_swish', 'h_swish']
se_stages = [False, False, True, False, True, True]
n_block_list = [1] + [max(self.depth_list)] * 5
width_lists = []
for base_width in base_stage_width[:-2]:
width_list_cur = []
for w in self.width_mult:
width = make_divisible(base_width * w, MyNetwork.CHANNEL_DIVISIBLE)
width_list_cur.append(width)
width_lists.append(width_list_cur)
input_channel, first_block_dim = width_lists[0], width_lists[1]
# first conv layer
first_conv = DynamicConvLayer([self.n_image_channels], input_channel, kernel_size=3, stride=2, act_func='h_swish')
first_block_conv = DynamicMBConvLayer(
in_channel_list=input_channel, out_channel_list=first_block_dim, kernel_size_list=3,
stride=stride_stages[0],
expand_ratio_list=1, act_func=act_stages[0], use_se=se_stages[0],
)
first_block = ResidualBlock(
first_block_conv,
IdentityLayer(max(first_block_dim), max(first_block_dim)) if input_channel == first_block_dim else None,
)
# inverted residual blocks
self.block_group_info = []
blocks = [first_block]
_block_index = 1
feature_dim = first_block_dim
for width_list_cur, n_block, s, act_func, use_se in zip(width_lists[2:], n_block_list[1:],
stride_stages[1:], act_stages[1:], se_stages[1:]):
self.block_group_info.append([_block_index + i for i in range(n_block)])
_block_index += n_block
output_channel = width_list_cur
for i in range(n_block):
stride = s if i == 0 else 1
mobile_inverted_conv = DynamicMBConvLayer(
in_channel_list=val2list(feature_dim), out_channel_list=val2list(width_list_cur),
kernel_size_list=ks_list, expand_ratio_list=expand_ratio_list,
stride=stride, act_func=act_func, use_se=use_se,
)
if stride == 1 and feature_dim == output_channel:
shortcut = IdentityLayer(max(feature_dim), max(feature_dim))
else:
shortcut = None
blocks.append(ResidualBlock(mobile_inverted_conv, shortcut))
feature_dim = output_channel
# final expand layer, feature mix layer & classifier
final_expand_layer = DynamicConvLayer(feature_dim, final_expand_width, kernel_size=1, act_func='h_swish')
feature_mix_layer = DynamicConvLayer(
final_expand_width, last_channel, kernel_size=1, use_bn=False, act_func='h_swish',
)
classifier = DynamicLinearLayer(last_channel, n_classes, dropout_rate=dropout_rate)
super(OFAMobileNetV3, self).__init__(first_conv, blocks, final_expand_layer, feature_mix_layer, classifier)
# set bn param
self.set_bn_param(momentum=bn_param[0], eps=bn_param[1])
# runtime_depth
self.runtime_depth = [len(block_idx) for block_idx in self.block_group_info]
self.if_use_gradient_checkpointing = if_use_gradient_checkpointing
self.class_for_subnet = class_for_subnet
def set_active_subnet(self, ks=None, e=None, d=None, w=None, **kwargs):
ks = val2list(ks, len(self.blocks) - 1)
expand_ratio = val2list(e, len(self.blocks) - 1)
depth = val2list(d, len(self.block_group_info))
width_mult = 0# since it turned out that different widths <=> different supernets, there's always just one width_mult, so we just take that.
self.first_conv.active_out_channel = self.first_conv.out_channel_list[width_mult]
self.blocks[0].conv.active_out_channel = self.blocks[0].conv.out_channel_list[width_mult]
self.final_expand_layer.active_out_channel = self.final_expand_layer.out_channel_list[width_mult]
self.feature_mix_layer.active_out_channel = self.feature_mix_layer.out_channel_list[width_mult]
for block, k, e in zip(self.blocks[1:], ks, expand_ratio):
if k is not None:
block.conv.active_kernel_size = k
if e is not None:
block.conv.active_expand_ratio = e
block.conv.active_out_channel = block.conv.out_channel_list[width_mult]
for i, d in enumerate(depth):
if d is not None:
self.runtime_depth[i] = min(len(self.block_group_info[i]), d)
def get_active_subnet(self, preserve_weight=True):
first_conv = self.first_conv.get_active_subnet(self.n_image_channels, preserve_weight)
blocks = [
ResidualBlock(self.blocks[0].conv.get_active_subnet(self.first_conv.active_out_channel),
copy.deepcopy(self.blocks[0].shortcut))
]
final_expand_layer = self.final_expand_layer.get_active_subnet(self.blocks[-1].conv.active_out_channel)
feature_mix_layer = self.feature_mix_layer.get_active_subnet(self.final_expand_layer.active_out_channel)
classifier = self.classifier.get_active_subnet(self.feature_mix_layer.active_out_channel)
input_channel = blocks[0].conv.out_channels
# blocks
for stage_id, block_idx in enumerate(self.block_group_info):
depth = self.runtime_depth[stage_id]
active_idx = block_idx[:depth]
stage_blocks = []
for idx in active_idx:
stage_blocks.append(ResidualBlock(
self.blocks[idx].conv.get_active_subnet(input_channel, preserve_weight),
copy.deepcopy(self.blocks[idx].shortcut)
))
input_channel = self.blocks[idx].conv.active_out_channel
blocks += stage_blocks
_subnet = self.class_for_subnet(first_conv, blocks, final_expand_layer, feature_mix_layer, classifier)
_subnet.set_bn_param(**self.get_bn_param())
return _subnet
def forward(self, x):
if not (self.if_use_gradient_checkpointing and self.training):
# first conv
x = self.first_conv(x)
# first block
x = self.blocks[0](x)
# blocks
for stage_id, block_idx in enumerate(self.block_group_info):
depth = self.runtime_depth[stage_id]
active_idx = block_idx[:depth]
for idx in active_idx:
x = self.blocks[idx](x)
x = self.final_expand_layer(x)
else:
x = self.first_conv(x)
blocks_to_run = [self.blocks[0]]
for stage_id, block_idx in enumerate(self.block_group_info):
depth = self.runtime_depth[stage_id]
active_idx = block_idx[:depth]
for idx in active_idx:
blocks_to_run.append(self.blocks[idx])
blocks_to_run.append(self.final_expand_layer)
x = checkpoint_sequential(blocks_to_run, 2, x)
x = x.mean(3, keepdim=True).mean(2, keepdim=True) # global average pooling
x = self.feature_mix_layer(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
| 9,234 | 48.12234 | 148 | py |
ENCAS | ENCAS-main/networks/attentive_nas_static_model.py | # taken from https://github.com/facebookresearch/AttentiveNAS
# Difference: images not resized in forward, but beforehand, in the collator (which is faster)
import torch
import torch.nn as nn
from .modules_alphanet.nn_base import MyNetwork
class AttentiveNasStaticModel(MyNetwork):
def __init__(self, first_conv, blocks, last_conv, classifier, use_v3_head=True):
super(AttentiveNasStaticModel, self).__init__()
self.first_conv = first_conv
self.blocks = nn.ModuleList(blocks)
self.last_conv = last_conv
self.classifier = classifier
self.use_v3_head = use_v3_head
def forward(self, x):
x = self.first_conv(x)
for block in self.blocks:
x = block(x)
x = self.last_conv(x)
if not self.use_v3_head:
x = x.mean(3, keepdim=True).mean(2, keepdim=True) # global average pooling
x = torch.squeeze(x)
x = self.classifier(x)
return x
@property
def module_str(self):
_str = self.first_conv.module_str + '\n'
for block in self.blocks:
_str += block.module_str + '\n'
#_str += self.last_conv.module_str + '\n'
_str += self.classifier.module_str
return _str
@property
def config(self):
return {
'name': AttentiveNasStaticModel.__name__,
'bn': self.get_bn_param(),
'first_conv': self.first_conv.config,
'blocks': [
block.config for block in self.blocks
],
#'last_conv': self.last_conv.config,
'classifier': self.classifier.config,
# 'resolution': self.resolution
}
def weight_initialization(self):
# weight initialization
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.zeros_(m.bias)
@staticmethod
def build_from_config(config):
raise NotImplementedError
def reset_running_stats_for_calibration(self):
for m in self.modules():
if isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d) or isinstance(m, nn.SyncBatchNorm):
m.training = True
m.momentum = None # cumulative moving average
m.reset_running_stats()
| 2,694 | 31.083333 | 113 | py |
ENCAS | ENCAS-main/networks/modules_alphanet/dynamic_layers.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# adapted from OFA: https://github.com/mit-han-lab/once-for-all
from collections import OrderedDict
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
from .static_layers import MBInvertedConvLayer, ConvBnActLayer, LinearLayer, SELayer, ShortcutLayer
from .dynamic_ops import DynamicSeparableConv2d, DynamicPointConv2d, DynamicBatchNorm2d, DynamicLinear, DynamicSE
from .nn_utils import int2list, get_net_device, copy_bn, build_activation, make_divisible
from .nn_base import MyModule, MyNetwork
class DynamicMBConvLayer(MyModule):
def __init__(self, in_channel_list, out_channel_list,
kernel_size_list=3, expand_ratio_list=6, stride=1, act_func='relu6', use_se=False, channels_per_group=1):
super(DynamicMBConvLayer, self).__init__()
self.in_channel_list = int2list(in_channel_list)
self.out_channel_list = int2list(out_channel_list)
self.kernel_size_list = int2list(kernel_size_list, 1)
self.expand_ratio_list = int2list(expand_ratio_list, 1)
self.stride = stride
self.act_func = act_func
self.use_se = use_se
self.channels_per_group = channels_per_group
# build modules_alphanet
max_middle_channel = round(max(self.in_channel_list) * max(self.expand_ratio_list))
if max(self.expand_ratio_list) == 1:
self.inverted_bottleneck = None
else:
self.inverted_bottleneck = nn.Sequential(OrderedDict([
('conv', DynamicPointConv2d(max(self.in_channel_list), max_middle_channel)),
('bn', DynamicBatchNorm2d(max_middle_channel)),
('act', build_activation(self.act_func, inplace=True)),
]))
self.depth_conv = nn.Sequential(OrderedDict([
('conv', DynamicSeparableConv2d(max_middle_channel, self.kernel_size_list, stride=self.stride, channels_per_group=self.channels_per_group)),
('bn', DynamicBatchNorm2d(max_middle_channel)),
('act', build_activation(self.act_func, inplace=True))
]))
if self.use_se:
self.depth_conv.add_module('se', DynamicSE(max_middle_channel))
self.point_linear = nn.Sequential(OrderedDict([
('conv', DynamicPointConv2d(max_middle_channel, max(self.out_channel_list))),
('bn', DynamicBatchNorm2d(max(self.out_channel_list))),
]))
self.active_kernel_size = max(self.kernel_size_list)
self.active_expand_ratio = max(self.expand_ratio_list)
self.active_out_channel = max(self.out_channel_list)
def forward(self, x):
in_channel = x.size(1)
if self.inverted_bottleneck is not None:
self.inverted_bottleneck.conv.active_out_channel = \
make_divisible(round(in_channel * self.active_expand_ratio), 8)
self.depth_conv.conv.active_kernel_size = self.active_kernel_size
self.point_linear.conv.active_out_channel = self.active_out_channel
if self.inverted_bottleneck is not None:
x = self.inverted_bottleneck(x)
x = self.depth_conv(x)
x = self.point_linear(x)
return x
@property
def module_str(self):
if self.use_se:
return 'SE(O%d, E%.1f, K%d)' % (self.active_out_channel, self.active_expand_ratio, self.active_kernel_size)
else:
return '(O%d, E%.1f, K%d)' % (self.active_out_channel, self.active_expand_ratio, self.active_kernel_size)
@property
def config(self):
return {
'name': DynamicMBConvLayer.__name__,
'in_channel_list': self.in_channel_list,
'out_channel_list': self.out_channel_list,
'kernel_size_list': self.kernel_size_list,
'expand_ratio_list': self.expand_ratio_list,
'stride': self.stride,
'act_func': self.act_func,
'use_se': self.use_se,
'channels_per_group': self.channels_per_group,
}
@staticmethod
def build_from_config(config):
return DynamicMBConvLayer(**config)
############################################################################################
def get_active_subnet(self, in_channel, preserve_weight=True):
middle_channel = make_divisible(round(in_channel * self.active_expand_ratio), 8)
channels_per_group = self.depth_conv.conv.channels_per_group
# build the new layer
sub_layer = MBInvertedConvLayer(
in_channel, self.active_out_channel, self.active_kernel_size, self.stride, self.active_expand_ratio,
act_func=self.act_func, mid_channels=middle_channel, use_se=self.use_se, channels_per_group=channels_per_group
)
sub_layer = sub_layer.to(get_net_device(self))
if not preserve_weight:
return sub_layer
# copy weight from current layer
if sub_layer.inverted_bottleneck is not None:
sub_layer.inverted_bottleneck.conv.weight.data.copy_(
self.inverted_bottleneck.conv.conv.weight.data[:middle_channel, :in_channel, :, :]
)
copy_bn(sub_layer.inverted_bottleneck.bn, self.inverted_bottleneck.bn.bn)
sub_layer.depth_conv.conv.weight.data.copy_(
self.depth_conv.conv.get_active_filter(middle_channel, self.active_kernel_size).data
)
copy_bn(sub_layer.depth_conv.bn, self.depth_conv.bn.bn)
if self.use_se:
se_mid = make_divisible(middle_channel // SELayer.REDUCTION, divisor=8)
sub_layer.depth_conv.se.fc.reduce.weight.data.copy_(
self.depth_conv.se.fc.reduce.weight.data[:se_mid, :middle_channel, :, :]
)
sub_layer.depth_conv.se.fc.reduce.bias.data.copy_(self.depth_conv.se.fc.reduce.bias.data[:se_mid])
sub_layer.depth_conv.se.fc.expand.weight.data.copy_(
self.depth_conv.se.fc.expand.weight.data[:middle_channel, :se_mid, :, :]
)
sub_layer.depth_conv.se.fc.expand.bias.data.copy_(self.depth_conv.se.fc.expand.bias.data[:middle_channel])
sub_layer.point_linear.conv.weight.data.copy_(
self.point_linear.conv.conv.weight.data[:self.active_out_channel, :middle_channel, :, :]
)
copy_bn(sub_layer.point_linear.bn, self.point_linear.bn.bn)
return sub_layer
def re_organize_middle_weights(self, expand_ratio_stage=0):
raise NotImplementedError
#importance = torch.sum(torch.abs(self.point_linear.conv.conv.weight.data), dim=(0, 2, 3))
#if expand_ratio_stage > 0:
# sorted_expand_list = copy.deepcopy(self.expand_ratio_list)
# sorted_expand_list.sort(reverse=True)
# target_width = sorted_expand_list[expand_ratio_stage]
# target_width = round(max(self.in_channel_list) * target_width)
# importance[target_width:] = torch.arange(0, target_width - importance.size(0), -1)
#
#sorted_importance, sorted_idx = torch.sort(importance, dim=0, descending=True)
#self.point_linear.conv.conv.weight.data = torch.index_select(
# self.point_linear.conv.conv.weight.data, 1, sorted_idx
#)
#
#adjust_bn_according_to_idx(self.depth_conv.bn.bn, sorted_idx)
#self.depth_conv.conv.conv.weight.data = torch.index_select(
# self.depth_conv.conv.conv.weight.data, 0, sorted_idx
#)
#if self.use_se:
# # se expand: output dim 0 reorganize
# se_expand = self.depth_conv.se.fc.expand
# se_expand.weight.data = torch.index_select(se_expand.weight.data, 0, sorted_idx)
# se_expand.bias.data = torch.index_select(se_expand.bias.data, 0, sorted_idx)
# # se reduce: input dim 1 reorganize
# se_reduce = self.depth_conv.se.fc.reduce
# se_reduce.weight.data = torch.index_select(se_reduce.weight.data, 1, sorted_idx)
# # middle weight reorganize
# se_importance = torch.sum(torch.abs(se_expand.weight.data), dim=(0, 2, 3))
# se_importance, se_idx = torch.sort(se_importance, dim=0, descending=True)
# se_expand.weight.data = torch.index_select(se_expand.weight.data, 1, se_idx)
# se_reduce.weight.data = torch.index_select(se_reduce.weight.data, 0, se_idx)
# se_reduce.bias.data = torch.index_select(se_reduce.bias.data, 0, se_idx)
#
## if inverted_bottleneck is None, the previous layer should be reorganized accordingly
#if self.inverted_bottleneck is not None:
# adjust_bn_according_to_idx(self.inverted_bottleneck.bn.bn, sorted_idx)
# self.inverted_bottleneck.conv.conv.weight.data = torch.index_select(
# self.inverted_bottleneck.conv.conv.weight.data, 0, sorted_idx
# )
# return None
#else:
# return sorted_idx
class DynamicConvBnActLayer(MyModule):
def __init__(self, in_channel_list, out_channel_list, kernel_size=3, stride=1, dilation=1,
use_bn=True, act_func='relu6'):
super(DynamicConvBnActLayer, self).__init__()
self.in_channel_list = int2list(in_channel_list)
self.out_channel_list = int2list(out_channel_list)
self.kernel_size = kernel_size
self.stride = stride
self.dilation = dilation
self.use_bn = use_bn
self.act_func = act_func
self.conv = DynamicPointConv2d(
max_in_channels=max(self.in_channel_list), max_out_channels=max(self.out_channel_list),
kernel_size=self.kernel_size, stride=self.stride, dilation=self.dilation,
)
if self.use_bn:
self.bn = DynamicBatchNorm2d(max(self.out_channel_list))
if self.act_func is not None:
self.act = build_activation(self.act_func, inplace=True)
self.active_out_channel = max(self.out_channel_list)
def forward(self, x):
self.conv.active_out_channel = self.active_out_channel
x = self.conv(x)
if self.use_bn:
x = self.bn(x)
if self.act_func is not None:
x = self.act(x)
return x
@property
def module_str(self):
return 'DyConv(O%d, K%d, S%d)' % (self.active_out_channel, self.kernel_size, self.stride)
@property
def config(self):
return {
'name': DynamicConvBnActLayer.__name__,
'in_channel_list': self.in_channel_list,
'out_channel_list': self.out_channel_list,
'kernel_size': self.kernel_size,
'stride': self.stride,
'dilation': self.dilation,
'use_bn': self.use_bn,
'act_func': self.act_func,
}
@staticmethod
def build_from_config(config):
return DynamicConvBnActLayer(**config)
def get_active_subnet(self, in_channel, preserve_weight=True):
sub_layer = ConvBnActLayer(
in_channel, self.active_out_channel, self.kernel_size, self.stride, self.dilation,
use_bn=self.use_bn, act_func=self.act_func
)
sub_layer = sub_layer.to(get_net_device(self))
if not preserve_weight:
return sub_layer
sub_layer.conv.weight.data.copy_(self.conv.conv.weight.data[:self.active_out_channel, :in_channel, :, :])
if self.use_bn:
copy_bn(sub_layer.bn, self.bn.bn)
return sub_layer
class DynamicLinearLayer(MyModule):
def __init__(self, in_features_list, out_features, bias=True):
super(DynamicLinearLayer, self).__init__()
self.in_features_list = int2list(in_features_list)
self.out_features = out_features
self.bias = bias
#self.dropout_rate = dropout_rate
#
#if self.dropout_rate > 0:
# self.dropout = nn.Dropout(self.dropout_rate, inplace=True)
#else:
# self.dropout = None
self.linear = DynamicLinear(
max_in_features=max(self.in_features_list), max_out_features=self.out_features, bias=self.bias
)
def forward(self, x):
#if self.dropout is not None:
# x = self.dropout(x)
return self.linear(x)
@property
def module_str(self):
return 'DyLinear(%d)' % self.out_features
@property
def config(self):
return {
'name': DynamicLinear.__name__,
'in_features_list': self.in_features_list,
'out_features': self.out_features,
'bias': self.bias
}
@staticmethod
def build_from_config(config):
return DynamicLinearLayer(**config)
def get_active_subnet(self, in_features, preserve_weight=True):
#sub_layer = LinearLayer(in_features, self.out_features, self.bias, dropout_rate=self.dropout_rate)
sub_layer = LinearLayer(in_features, self.out_features, self.bias)
sub_layer = sub_layer.to(get_net_device(self))
if not preserve_weight:
return sub_layer
sub_layer.linear.weight.data.copy_(self.linear.linear.weight.data[:self.out_features, :in_features])
if self.bias:
sub_layer.linear.bias.data.copy_(self.linear.linear.bias.data[:self.out_features])
return sub_layer
class DynamicShortcutLayer(MyModule):
def __init__(self, in_channel_list, out_channel_list, reduction=1):
super(DynamicShortcutLayer, self).__init__()
self.in_channel_list = int2list(in_channel_list)
self.out_channel_list = int2list(out_channel_list)
self.reduction = reduction
self.conv = DynamicPointConv2d(
max_in_channels=max(self.in_channel_list), max_out_channels=max(self.out_channel_list),
kernel_size=1, stride=1,
)
self.active_out_channel = max(self.out_channel_list)
def forward(self, x):
in_channel = x.size(1)
#identity mapping
if in_channel == self.active_out_channel and self.reduction == 1:
return x
#average pooling, if size doesn't match
if self.reduction > 1:
padding = 0 if x.size(-1) % 2 == 0 else 1
x = F.avg_pool2d(x, self.reduction, padding=padding)
#1*1 conv, if #channels doesn't match
if in_channel != self.active_out_channel:
self.conv.active_out_channel = self.active_out_channel
x = self.conv(x)
return x
@property
def module_str(self):
return 'DyShortcut(O%d, R%d)' % (self.active_out_channel, self.reduction)
@property
def config(self):
return {
'name': DynamicShortcutLayer.__name__,
'in_channel_list': self.in_channel_list,
'out_channel_list': self.out_channel_list,
'reduction': self.reduction,
}
@staticmethod
def build_from_config(config):
return DynamicShortcutLayer(**config)
def get_active_subnet(self, in_channel, preserve_weight=True):
sub_layer = ShortcutLayer(
in_channel, self.active_out_channel, self.reduction
)
sub_layer = sub_layer.to(get_net_device(self))
if not preserve_weight:
return sub_layer
sub_layer.conv.weight.data.copy_(self.conv.conv.weight.data[:self.active_out_channel, :in_channel, :, :])
return sub_layer
| 15,686 | 38.916031 | 152 | py |
ENCAS | ENCAS-main/networks/modules_alphanet/static_layers.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# adapted from OFA: https://github.com/mit-han-lab/once-for-all
from collections import OrderedDict
import torch.nn as nn
from .nn_utils import get_same_padding, build_activation, make_divisible, drop_connect
from .nn_base import MyModule
from .activations import *
def set_layer_from_config(layer_config):
if layer_config is None:
return None
name2layer = {
ConvBnActLayer.__name__: ConvBnActLayer,
IdentityLayer.__name__: IdentityLayer,
LinearLayer.__name__: LinearLayer,
MBInvertedConvLayer.__name__: MBInvertedConvLayer,
}
layer_name = layer_config.pop('name')
layer = name2layer[layer_name]
return layer.build_from_config(layer_config)
class SELayer(nn.Module):
REDUCTION = 4
def __init__(self, channel):
super(SELayer, self).__init__()
self.channel = channel
self.reduction = SELayer.REDUCTION
num_mid = make_divisible(self.channel // self.reduction, divisor=8)
self.fc = nn.Sequential(OrderedDict([
('reduce', nn.Conv2d(self.channel, num_mid, 1, 1, 0, bias=True)),
('relu', nn.ReLU(inplace=True)),
('expand', nn.Conv2d(num_mid, self.channel, 1, 1, 0, bias=True)),
('h_sigmoid', Hsigmoid(inplace=True)),
]))
def forward(self, x):
#x: N, C, H, W
y = x.mean(3, keepdim=True).mean(2, keepdim=True) # N, C, 1, 1
y = self.fc(y)
return x * y
class ConvBnActLayer(MyModule):
def __init__(self, in_channels, out_channels,
kernel_size=3, stride=1, dilation=1, groups=1, bias=False,
use_bn=True, act_func='relu'):
super(ConvBnActLayer, self).__init__()
# default normal 3x3_Conv with bn and relu
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.dilation = dilation
self.groups = groups
self.bias = bias
self.use_bn = use_bn
self.act_func = act_func
pad = get_same_padding(self.kernel_size)
self.conv = nn.Conv2d(in_channels, out_channels, self.kernel_size,
stride, pad, dilation=dilation, groups=groups, bias=bias
)
if self.use_bn:
self.bn = nn.BatchNorm2d(out_channels)
self.act = build_activation(self.act_func, inplace=True)
def forward(self, x):
x = self.conv(x)
if self.use_bn:
x = self.bn(x)
if self.act:
x = self.act(x)
return x
@property
def module_str(self):
if isinstance(self.kernel_size, int):
kernel_size = (self.kernel_size, self.kernel_size)
else:
kernel_size = self.kernel_size
if self.groups == 1:
if self.dilation > 1:
conv_str = '%dx%d_DilatedConv' % (kernel_size[0], kernel_size[1])
else:
conv_str = '%dx%d_Conv' % (kernel_size[0], kernel_size[1])
else:
if self.dilation > 1:
conv_str = '%dx%d_DilatedGroupConv' % (kernel_size[0], kernel_size[1])
else:
conv_str = '%dx%d_GroupConv' % (kernel_size[0], kernel_size[1])
conv_str += '_O%d' % self.out_channels
return conv_str
@property
def config(self):
return {
'name': ConvBnActLayer.__name__,
'in_channels': self.in_channels,
'out_channels': self.out_channels,
'kernel_size': self.kernel_size,
'stride': self.stride,
'dilation': self.dilation,
'groups': self.groups,
'bias': self.bias,
'use_bn': self.use_bn,
'act_func': self.act_func,
}
@staticmethod
def build_from_config(config):
return ConvBnActLayer(**config)
class IdentityLayer(MyModule):
def __init__(self, ):
super(IdentityLayer, self).__init__()
def forward(self, x):
return x
@property
def module_str(self):
return 'Identity'
@property
def config(self):
return {
'name': IdentityLayer.__name__,
}
@staticmethod
def build_from_config(config):
return IdentityLayer(**config)
class LinearLayer(MyModule):
def __init__(self, in_features, out_features, bias=True):
super(LinearLayer, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.bias = bias
#self.dropout_rate = dropout_rate
#if self.dropout_rate > 0:
# self.dropout = nn.Dropout(self.dropout_rate, inplace=True)
#else:
# self.dropout = None
self.linear = nn.Linear(in_features, out_features, bias)
def forward(self, x):
#if dropout is not None:
# x = self.dropout(x)
return self.linear(x)
@property
def module_str(self):
return '%dx%d_Linear' % (self.in_features, self.out_features)
@property
def config(self):
return {
'name': LinearLayer.__name__,
'in_features': self.in_features,
'out_features': self.out_features,
'bias': self.bias,
#'dropout_rate': self.dropout_rate,
}
@staticmethod
def build_from_config(config):
return LinearLayer(**config)
class ShortcutLayer(MyModule):
def __init__(self, in_channels, out_channels, reduction=1):
super(ShortcutLayer, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.reduction = reduction
self.conv = nn.Conv2d(in_channels, out_channels, 1, 1, 0, bias=False)
def forward(self, x):
if self.reduction > 1:
padding = 0 if x.size(-1) % 2 == 0 else 1
x = F.avg_pool2d(x, self.reduction, padding=padding)
if self.in_channels != self.out_channels:
x = self.conv(x)
return x
@property
def module_str(self):
if self.in_channels == self.out_channels and self.reduction == 1:
conv_str = 'IdentityShortcut'
else:
if self.reduction == 1:
conv_str = '%d-%d_Shortcut' % (self.in_channels, self.out_channels)
else:
conv_str = '%d-%d_R%d_Shortcut' % (self.in_channels, self.out_channels, self.reduction)
return conv_str
@property
def config(self):
return {
'name': ShortcutLayer.__name__,
'in_channels': self.in_channels,
'out_channels': self.out_channels,
'reduction': self.reduction,
}
@staticmethod
def build_from_config(config):
return ShortcutLayer(**config)
class MBInvertedConvLayer(MyModule):
def __init__(self, in_channels, out_channels,
kernel_size=3, stride=1, expand_ratio=6, mid_channels=None, act_func='relu6', use_se=False, channels_per_group=1):
super(MBInvertedConvLayer, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.expand_ratio = expand_ratio
self.mid_channels = mid_channels
self.act_func = act_func
self.use_se = use_se
self.channels_per_group = channels_per_group
if self.mid_channels is None:
feature_dim = round(self.in_channels * self.expand_ratio)
else:
feature_dim = self.mid_channels
if self.expand_ratio == 1:
self.inverted_bottleneck = None
else:
self.inverted_bottleneck = nn.Sequential(OrderedDict([
('conv', nn.Conv2d(self.in_channels, feature_dim, 1, 1, 0, bias=False)),
('bn', nn.BatchNorm2d(feature_dim)),
('act', build_activation(self.act_func, inplace=True)),
]))
assert feature_dim % self.channels_per_group == 0
active_groups = feature_dim // self.channels_per_group
pad = get_same_padding(self.kernel_size)
depth_conv_modules = [
('conv', nn.Conv2d(feature_dim, feature_dim, kernel_size, stride, pad, groups=active_groups, bias=False)),
('bn', nn.BatchNorm2d(feature_dim)),
('act', build_activation(self.act_func, inplace=True))
]
if self.use_se:
depth_conv_modules.append(('se', SELayer(feature_dim)))
self.depth_conv = nn.Sequential(OrderedDict(depth_conv_modules))
self.point_linear = nn.Sequential(OrderedDict([
('conv', nn.Conv2d(feature_dim, out_channels, 1, 1, 0, bias=False)),
('bn', nn.BatchNorm2d(out_channels)),
]))
def forward(self, x):
if self.inverted_bottleneck:
x = self.inverted_bottleneck(x)
x = self.depth_conv(x)
x = self.point_linear(x)
return x
@property
def module_str(self):
if self.mid_channels is None:
expand_ratio = self.expand_ratio
else:
expand_ratio = self.mid_channels // self.in_channels
layer_str = '%dx%d_MBConv%d_%s' % (self.kernel_size, self.kernel_size, expand_ratio, self.act_func.upper())
if self.use_se:
layer_str = 'SE_' + layer_str
layer_str += '_O%d' % self.out_channels
return layer_str
@property
def config(self):
return {
'name': MBInvertedConvLayer.__name__,
'in_channels': self.in_channels,
'out_channels': self.out_channels,
'kernel_size': self.kernel_size,
'stride': self.stride,
'expand_ratio': self.expand_ratio,
'mid_channels': self.mid_channels,
'act_func': self.act_func,
'use_se': self.use_se,
'channels_per_group': self.channels_per_group,
}
@staticmethod
def build_from_config(config):
return MBInvertedConvLayer(**config)
class MobileInvertedResidualBlock(MyModule):
def __init__(self, mobile_inverted_conv, shortcut, drop_connect_rate=0):
super(MobileInvertedResidualBlock, self).__init__()
self.mobile_inverted_conv = mobile_inverted_conv
self.shortcut = shortcut
self.drop_connect_rate = drop_connect_rate
def forward(self, x):
in_channel = x.size(1)
if self.mobile_inverted_conv is None: # or isinstance(self.mobile_inverted_conv, ZeroLayer):
res = x
elif self.shortcut is None: # or isinstance(self.shortcut, ZeroLayer):
res = self.mobile_inverted_conv(x)
else:
im = self.shortcut(x)
x = self.mobile_inverted_conv(x)
if self.drop_connect_rate > 0 and in_channel == im.size(1) and self.shortcut.reduction == 1:
x = drop_connect(x, p=self.drop_connect_rate, training=self.training)
res = x + im
return res
@property
def module_str(self):
return '(%s, %s)' % (
self.mobile_inverted_conv.module_str if self.mobile_inverted_conv is not None else None,
self.shortcut.module_str if self.shortcut is not None else None
)
@property
def config(self):
return {
'name': MobileInvertedResidualBlock.__name__,
'mobile_inverted_conv': self.mobile_inverted_conv.config if self.mobile_inverted_conv is not None else None,
'shortcut': self.shortcut.config if self.shortcut is not None else None,
}
@staticmethod
def build_from_config(config):
mobile_inverted_conv = set_layer_from_config(config['mobile_inverted_conv'])
shortcut = set_layer_from_config(config['shortcut'])
return MobileInvertedResidualBlock(mobile_inverted_conv, shortcut)
| 12,039 | 30.76781 | 131 | py |
ENCAS | ENCAS-main/networks/modules_alphanet/activations.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# adapted from OFA: https://github.com/mit-han-lab/once-for-all
import torch
import torch.nn as nn
import torch.nn.functional as F
# A memory-efficient implementation of Swish function
class SwishImplementation(torch.autograd.Function):
@staticmethod
def forward(ctx, i):
result = i * torch.sigmoid(i)
ctx.save_for_backward(i)
return result
@staticmethod
def backward(ctx, grad_output):
i = ctx.saved_tensors[0]
sigmoid_i = torch.sigmoid(i)
return grad_output * (sigmoid_i * (1 + i * (1 - sigmoid_i)))
class MemoryEfficientSwish(nn.Module):
def forward(self, x):
return SwishImplementation.apply(x)
class Hswish(nn.Module):
def __init__(self, inplace=True):
super(Hswish, self).__init__()
self.inplace = inplace
def forward(self, x):
return x * F.relu6(x + 3., inplace=self.inplace) / 6.
#class Swish(nn.Module):
# def __init__(self, inplace=True):
# super(Swish, self).__init__()
# self.inplace = inplace
#
# def forward(self, x):
# return x * torch.sigmoid(x)
class Hsigmoid(nn.Module):
def __init__(self, inplace=True):
super(Hsigmoid, self).__init__()
self.inplace = inplace
def forward(self, x):
return F.relu6(x + 3., inplace=self.inplace) / 6.
| 1,405 | 24.107143 | 70 | py |
ENCAS | ENCAS-main/networks/modules_alphanet/dynamic_ops.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# adapted from OFA: https://github.com/mit-han-lab/once-for-all
from torch.autograd.function import Function
import torch.nn.functional as F
from torch.nn.parameter import Parameter
import torch.nn as nn
import torch
from torch.nn.modules._functions import SyncBatchNorm as sync_batch_norm
import torch.distributed as dist
from .nn_utils import get_same_padding, make_divisible, sub_filter_start_end
from .static_layers import SELayer
class DynamicSeparableConv2d(nn.Module):
KERNEL_TRANSFORM_MODE = None # None or 1
def __init__(self, max_in_channels, kernel_size_list, stride=1, dilation=1, channels_per_group=1):
super(DynamicSeparableConv2d, self).__init__()
self.max_in_channels = max_in_channels
self.channels_per_group = channels_per_group
assert self.max_in_channels % self.channels_per_group == 0
self.kernel_size_list = kernel_size_list
self.stride = stride
self.dilation = dilation
self.conv = nn.Conv2d(
self.max_in_channels, self.max_in_channels, max(self.kernel_size_list), self.stride,
groups=self.max_in_channels // self.channels_per_group, bias=False,
)
self._ks_set = list(set(self.kernel_size_list))
self._ks_set.sort() # e.g., [3, 5, 7]
if self.KERNEL_TRANSFORM_MODE is not None:
# register scaling parameters
# 7to5_matrix, 5to3_matrix
scale_params = {}
for i in range(len(self._ks_set) - 1):
ks_small = self._ks_set[i]
ks_larger = self._ks_set[i + 1]
param_name = '%dto%d' % (ks_larger, ks_small)
scale_params['%s_matrix' % param_name] = Parameter(torch.eye(ks_small ** 2))
for name, param in scale_params.items():
self.register_parameter(name, param)
self.active_kernel_size = max(self.kernel_size_list)
def get_active_filter(self, in_channel, kernel_size):
out_channel = in_channel
max_kernel_size = max(self.kernel_size_list)
start, end = sub_filter_start_end(max_kernel_size, kernel_size)
filters = self.conv.weight[:out_channel, :in_channel, start:end, start:end]
if self.KERNEL_TRANSFORM_MODE is not None and kernel_size < max_kernel_size:
start_filter = self.conv.weight[:out_channel, :in_channel, :, :] # start with max kernel
for i in range(len(self._ks_set) - 1, 0, -1):
src_ks = self._ks_set[i]
if src_ks <= kernel_size:
break
target_ks = self._ks_set[i - 1]
start, end = sub_filter_start_end(src_ks, target_ks)
_input_filter = start_filter[:, :, start:end, start:end]
_input_filter = _input_filter.contiguous()
_input_filter = _input_filter.view(_input_filter.size(0), _input_filter.size(1), -1)
_input_filter = _input_filter.view(-1, _input_filter.size(2))
_input_filter = F.linear(
_input_filter, self.__getattr__('%dto%d_matrix' % (src_ks, target_ks)),
)
_input_filter = _input_filter.view(filters.size(0), filters.size(1), target_ks ** 2)
_input_filter = _input_filter.view(filters.size(0), filters.size(1), target_ks, target_ks)
start_filter = _input_filter
filters = start_filter
return filters
def forward(self, x, kernel_size=None):
if kernel_size is None:
kernel_size = self.active_kernel_size
in_channel = x.size(1)
assert in_channel % self.channels_per_group == 0
filters = self.get_active_filter(in_channel, kernel_size).contiguous()
padding = get_same_padding(kernel_size)
y = F.conv2d(
x, filters, None, self.stride, padding, self.dilation, in_channel // self.channels_per_group
)
return y
class DynamicPointConv2d(nn.Module):
def __init__(self, max_in_channels, max_out_channels, kernel_size=1, stride=1, dilation=1):
super(DynamicPointConv2d, self).__init__()
self.max_in_channels = max_in_channels
self.max_out_channels = max_out_channels
self.kernel_size = kernel_size
self.stride = stride
self.dilation = dilation
self.conv = nn.Conv2d(
self.max_in_channels, self.max_out_channels, self.kernel_size, stride=self.stride, bias=False,
)
self.active_out_channel = self.max_out_channels
def forward(self, x, out_channel=None):
if out_channel is None:
out_channel = self.active_out_channel
in_channel = x.size(1)
filters = self.conv.weight[:out_channel, :in_channel, :, :].contiguous()
padding = get_same_padding(self.kernel_size)
y = F.conv2d(x, filters, None, self.stride, padding, self.dilation, 1)
return y
class DynamicLinear(nn.Module):
def __init__(self, max_in_features, max_out_features, bias=True):
super(DynamicLinear, self).__init__()
self.max_in_features = max_in_features
self.max_out_features = max_out_features
self.bias = bias
self.linear = nn.Linear(self.max_in_features, self.max_out_features, self.bias)
self.active_out_features = self.max_out_features
def forward(self, x, out_features=None):
if out_features is None:
out_features = self.active_out_features
in_features = x.size(1)
weight = self.linear.weight[:out_features, :in_features].contiguous()
bias = self.linear.bias[:out_features] if self.bias else None
y = F.linear(x, weight, bias)
return y
class AllReduce(Function):
@staticmethod
def forward(ctx, input):
input_list = [torch.zeros_like(input) for k in range(dist.get_world_size())]
# Use allgather instead of allreduce since I don't trust in-place operations ..
dist.all_gather(input_list, input, async_op=False)
inputs = torch.stack(input_list, dim=0)
return torch.sum(inputs, dim=0)
@staticmethod
def backward(ctx, grad_output):
dist.all_reduce(grad_output, async_op=False)
return grad_output
class DynamicBatchNorm2d(nn.Module):
'''
1. doesn't acculate bn statistics, (momentum=0.)
2. calculate BN statistics of all subnets after training
3. bn weights are shared
https://arxiv.org/abs/1903.05134
https://detectron2.readthedocs.io/_modules/detectron2/layers/batch_norm.html
'''
#SET_RUNNING_STATISTICS = False
def __init__(self, max_feature_dim):
super(DynamicBatchNorm2d, self).__init__()
self.max_feature_dim = max_feature_dim
self.bn = nn.BatchNorm2d(self.max_feature_dim)
#self.exponential_average_factor = 0 #doesn't acculate bn stats
self.need_sync = False
# reserved to tracking the performance of the largest and smallest network
self.bn_tracking = nn.ModuleList(
[
nn.BatchNorm2d(self.max_feature_dim, affine=False),
nn.BatchNorm2d(self.max_feature_dim, affine=False)
]
)
def forward(self, x):
feature_dim = x.size(1)
if not self.training:
raise ValueError('DynamicBN only supports training')
bn = self.bn
# need_sync
if not self.need_sync:
return F.batch_norm(
x, bn.running_mean[:feature_dim], bn.running_var[:feature_dim], bn.weight[:feature_dim],
bn.bias[:feature_dim], bn.training or not bn.track_running_stats,
bn.momentum, bn.eps,
)
else:
assert dist.get_world_size() > 1, 'SyncBatchNorm requires >1 world size'
B, C = x.shape[0], x.shape[1]
mean = torch.mean(x, dim=[0, 2, 3])
meansqr = torch.mean(x * x, dim=[0, 2, 3])
assert B > 0, 'does not support zero batch size'
vec = torch.cat([mean, meansqr], dim=0)
vec = AllReduce.apply(vec) * (1.0 / dist.get_world_size())
mean, meansqr = torch.split(vec, C)
var = meansqr - mean * mean
invstd = torch.rsqrt(var + bn.eps)
scale = bn.weight[:feature_dim] * invstd
bias = bn.bias[:feature_dim] - mean * scale
scale = scale.reshape(1, -1, 1, 1)
bias = bias.reshape(1, -1, 1, 1)
return x * scale + bias
#if bn.num_features == feature_dim or DynamicBatchNorm2d.SET_RUNNING_STATISTICS:
# return bn(x)
#else:
# exponential_average_factor = 0.0
# if bn.training and bn.track_running_stats:
# # if statement only here to tell the jit to skip emitting this when it is None
# if bn.num_batches_tracked is not None:
# bn.num_batches_tracked += 1
# if bn.momentum is None: # use cumulative moving average
# exponential_average_factor = 1.0 / float(bn.num_batches_tracked)
# else: # use exponential moving average
# exponential_average_factor = bn.momentum
# return F.batch_norm(
# x, bn.running_mean[:feature_dim], bn.running_var[:feature_dim], bn.weight[:feature_dim],
# bn.bias[:feature_dim], bn.training or not bn.track_running_stats,
# exponential_average_factor, bn.eps,
# )
class DynamicSE(SELayer):
def __init__(self, max_channel):
super(DynamicSE, self).__init__(max_channel)
def forward(self, x):
in_channel = x.size(1)
num_mid = make_divisible(in_channel // self.reduction, divisor=8)
y = x.mean(3, keepdim=True).mean(2, keepdim=True)
# reduce
reduce_conv = self.fc.reduce
reduce_filter = reduce_conv.weight[:num_mid, :in_channel, :, :].contiguous()
reduce_bias = reduce_conv.bias[:num_mid] if reduce_conv.bias is not None else None
y = F.conv2d(y, reduce_filter, reduce_bias, 1, 0, 1, 1)
# relu
y = self.fc.relu(y)
# expand
expand_conv = self.fc.expand
expand_filter = expand_conv.weight[:in_channel, :num_mid, :, :].contiguous()
expand_bias = expand_conv.bias[:in_channel] if expand_conv.bias is not None else None
y = F.conv2d(y, expand_filter, expand_bias, 1, 0, 1, 1)
# hard sigmoid
y = self.fc.h_sigmoid(y)
return x * y
| 10,816 | 39.211896 | 106 | py |
ENCAS | ENCAS-main/networks/modules_alphanet/nn_base.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# adapted from OFA: https://github.com/mit-han-lab/once-for-all
import math
import torch
import torch.nn as nn
try:
from fvcore.common.file_io import PathManager
except:
pass
class MyModule(nn.Module):
def forward(self, x):
raise NotImplementedError
@property
def module_str(self):
raise NotImplementedError
@property
def config(self):
raise NotImplementedError
@staticmethod
def build_from_config(config):
raise NotImplementedError
class MyNetwork(MyModule):
def forward(self, x):
raise NotImplementedError
@property
def module_str(self):
raise NotImplementedError
@property
def config(self):
raise NotImplementedError
@staticmethod
def build_from_config(config):
raise NotImplementedError
def zero_last_gamma(self):
raise NotImplementedError
""" implemented methods """
def set_bn_param(self, momentum, eps):
for m in self.modules():
if isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d) or isinstance(m, nn.SyncBatchNorm):
if momentum is not None:
m.momentum = float(momentum)
else:
m.momentum = None
m.eps = float(eps)
return
def get_bn_param(self):
for m in self.modules():
if isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d) or isinstance(m, nn.SyncBatchNorm):
return {
'momentum': m.momentum,
'eps': m.eps,
}
return None
def init_model(self, model_init):
""" Conv2d, BatchNorm2d, BatchNorm1d, Linear, """
for m in self.modules():
if isinstance(m, nn.Conv2d):
if model_init == 'he_fout':
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif model_init == 'he_fin':
n = m.kernel_size[0] * m.kernel_size[1] * m.in_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
else:
raise NotImplementedError
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
stdv = 1. / math.sqrt(m.weight.size(1))
m.weight.data.uniform_(-stdv, stdv)
if m.bias is not None:
m.bias.data.zero_()
def get_parameters(self, keys=None, mode='include', exclude_set=None):
if exclude_set is None:
exclude_set = {}
if keys is None:
for name, param in self.named_parameters():
if name not in exclude_set:
yield param
elif mode == 'include':
for name, param in self.named_parameters():
flag = False
for key in keys:
if key in name:
flag = True
break
if flag and name not in exclude_set:
yield param
elif mode == 'exclude':
for name, param in self.named_parameters():
flag = True
for key in keys:
if key in name:
flag = False
break
if flag and name not in exclude_set:
yield param
else:
raise ValueError('do not support: %s' % mode)
def weight_parameters(self, exclude_set=None):
return self.get_parameters(exclude_set=exclude_set)
def load_weights_from_pretrained_models(self, checkpoint_path, load_from_ema=False):
try:
with PathManager.open(checkpoint_path, 'rb') as f:
checkpoint = torch.load(f, map_location='cpu')
except:
with open(checkpoint_path, 'rb') as f:
checkpoint = torch.load(f, map_location='cpu')
assert isinstance(checkpoint, dict)
pretrained_state_dicts = checkpoint['state_dict']
if load_from_ema and 'state_dict_ema' in checkpoint:
pretrained_state_dicts = checkpoint['state_dict_ema']
for k, v in self.state_dict().items():
name = k
if not load_from_ema:
name = 'module.' + k if not k.startswith('module') else k
v.copy_(pretrained_state_dicts[name])
| 4,767 | 30.576159 | 113 | py |
ENCAS | ENCAS-main/networks/modules_alphanet/nn_utils.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# adapted from OFA: https://github.com/mit-han-lab/once-for-all
import torch.nn as nn
from .activations import *
def make_divisible(v, divisor=8, min_value=1):
"""
forked from slim:
https://github.com/tensorflow/models/blob/\
0344c5503ee55e24f0de7f37336a6e08f10976fd/\
research/slim/nets/mobilenet/mobilenet.py#L62-L69
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
def sub_filter_start_end(kernel_size, sub_kernel_size):
center = kernel_size // 2
dev = sub_kernel_size // 2
start, end = center - dev, center + dev + 1
assert end - start == sub_kernel_size
return start, end
def get_net_device(net):
return net.parameters().__next__().device
def int2list(val, repeat_time=1):
if isinstance(val, list):
return val
elif isinstance(val, tuple):
return list(val)
else:
return [val for _ in range(repeat_time)]
def get_same_padding(kernel_size):
if isinstance(kernel_size, tuple):
assert len(kernel_size) == 2, 'invalid kernel size: %s' % kernel_size
p1 = get_same_padding(kernel_size[0])
p2 = get_same_padding(kernel_size[1])
return p1, p2
assert isinstance(kernel_size, int), 'kernel size should be either `int` or `tuple`'
assert kernel_size % 2 > 0, 'kernel size should be odd number'
return kernel_size // 2
def copy_bn(target_bn, src_bn):
feature_dim = target_bn.num_features
target_bn.weight.data.copy_(src_bn.weight.data[:feature_dim])
target_bn.bias.data.copy_(src_bn.bias.data[:feature_dim])
target_bn.running_mean.data.copy_(src_bn.running_mean.data[:feature_dim])
target_bn.running_var.data.copy_(src_bn.running_var.data[:feature_dim])
def build_activation(act_func, inplace=True):
if act_func == 'relu':
return nn.ReLU(inplace=inplace)
elif act_func == 'relu6':
return nn.ReLU6(inplace=inplace)
elif act_func == 'tanh':
return nn.Tanh()
elif act_func == 'sigmoid':
return nn.Sigmoid()
elif act_func == 'h_swish':
return Hswish(inplace=inplace)
elif act_func == 'h_sigmoid':
return Hsigmoid(inplace=inplace)
elif act_func == 'swish':
return MemoryEfficientSwish()
elif act_func is None:
return None
else:
raise ValueError('do not support: %s' % act_func)
def drop_connect(inputs, p, training):
"""Drop connect.
Args:
input (tensor: BCWH): Input of this structure.
p (float: 0.0~1.0): Probability of drop connection.
training (bool): The running mode.
Returns:
output: Output after drop connection.
"""
assert 0 <= p <= 1, 'p must be in range of [0,1]'
if not training:
return inputs
batch_size = inputs.shape[0]
keep_prob = 1.0 - p
# generate binary_tensor mask according to probability (p for 0, 1-p for 1)
random_tensor = keep_prob
random_tensor += torch.rand([batch_size, 1, 1, 1], dtype=inputs.dtype, device=inputs.device)
binary_tensor = torch.floor(random_tensor)
output = inputs / keep_prob * binary_tensor
return output
| 3,431 | 29.918919 | 96 | py |
ENCAS | ENCAS-main/run_manager/run_manager_my.py | from collections import defaultdict
import time
import torch.nn as nn
import torch.nn.parallel
import torch.optim
from sklearn.metrics import balanced_accuracy_score
from tqdm import tqdm
import torchvision
from ofa.utils import AverageMeter, accuracy
class RunManagerMy:
def __init__(self, net, run_config, no_gpu=False, sec_obj='flops'):
self.device = torch.device('cuda:0' if torch.cuda.is_available() and (not no_gpu) else 'cpu')
self.is_ensemble = isinstance(net, list)
if self.is_ensemble:
for net_i in net:
net_i.to(self.device)
else:
net.to(self.device)
self.accuracy = accuracy
self.get_scalar_from_accuracy = lambda acc: acc[0].item()
self.if_enough_vram = False
self.sec_obj = sec_obj
self.run_config = run_config
self.test_criterion = nn.CrossEntropyLoss()
def update_metric(self, metric_dict, output, labels):
acc1 = self.accuracy(output, labels, topk=(1,))
acc1 = self.get_scalar_from_accuracy(acc1)
metric_dict['top1'].update(acc1, output.size(0))
def validate(self, is_test=False, net=None, data_loader=None, no_logs=False,
if_return_outputs=False, resolutions_list=None, thresholds=None, if_return_logit_gaps=False,
if_use_logit_gaps=False):
assert not(if_use_logit_gaps and if_return_logit_gaps) # they aren't really mutually exclusive, but it's simpler this way
if if_return_outputs:
outputs_to_return = []
if if_return_logit_gaps:
logit_gaps_to_return = []
if data_loader is None:
if is_test:
data_loader = self.run_config.test_loader
else:
data_loader = self.run_config.valid_loader
if not self.is_ensemble:
net.eval()
else:
for net_cur in net:
net_cur.eval()
losses = AverageMeter()
metric_dict = defaultdict(lambda: AverageMeter())
if_cascade = thresholds is not None
if if_cascade:
n_not_predicted_per_stage = [0 for _ in range(len(net) - 1)]
with torch.no_grad(), torch.cuda.amp.autocast():
with tqdm(total=len(data_loader), desc='Evaluate ', disable=no_logs) as t:
st = time.time()
for i, (images, labels, *_) in enumerate(data_loader):
images, labels = images.to(self.device), labels.to(self.device)
images_orig = None # don't make a backup unless I need to
if not self.is_ensemble:
output = net(images)
else:
out_logits = net[0](images)
output = torch.nn.functional.softmax(out_logits, dim=1)
if if_return_logit_gaps or if_use_logit_gaps:
# it only make sense to store the logit gaps if this is a separate network, not an ensemble/cascade
two_max_values = out_logits.topk(k=2, dim=-1).values
logit_gap = two_max_values[:, 0] - two_max_values[:, 1]
if if_cascade:
idx_more_predictions_needed = torch.ones(images.shape[0], dtype=torch.bool)
i_net = 1
for net_cur in net[1:]:
if if_cascade:
cur_threshold = thresholds[i_net - 1]
if if_use_logit_gaps:
idx_more_predictions_needed[logit_gap >= cur_threshold] = False
else:
idx_more_predictions_needed[torch.max(output, dim=1).values >= cur_threshold] = False
output_tmp = output[idx_more_predictions_needed]
if len(output_tmp) == 0:
n_not_predicted = 0
else:
if if_use_logit_gaps:
logit_gap_tmp = logit_gap[idx_more_predictions_needed]
not_predicted_idx = logit_gap_tmp < cur_threshold
else:
not_predicted_idx = torch.max(output_tmp, dim=1).values < cur_threshold
n_not_predicted = torch.sum(not_predicted_idx).item()
n_not_predicted_per_stage[i_net - 1] += n_not_predicted
if n_not_predicted == 0:
break
if resolutions_list is not None:
if resolutions_list[i_net] != resolutions_list[i_net - 1]:
if images_orig is None:
images_orig = torch.clone(images)
r = resolutions_list[i_net]
images = torchvision.transforms.functional.resize(images_orig, (r, r))
if not if_cascade:
output_cur = torch.nn.functional.softmax(net_cur(images), dim=1)
else:
out_logits = net_cur(images[idx_more_predictions_needed][not_predicted_idx])
if len(out_logits.shape) < 2: #a single image is left in the batch, need to fix dim
out_logits = out_logits[None,...]
output_cur = torch.nn.functional.softmax(out_logits, dim=1)
if not if_cascade:
output += output_cur
else:
if if_use_logit_gaps:
# firstly, need to overwrite previous predictions (because they didn't really happen if the gap was too small)
output_tmp[not_predicted_idx] = output_cur
output[idx_more_predictions_needed] = output_tmp
# secondly, need to update the logit gap
two_max_values = out_logits.topk(k=2, dim=-1).values
logit_gap_tmp[not_predicted_idx] = two_max_values[:, 0] - two_max_values[:, 1]
# note that the gap for the previously predicted values will be wrong, but it doesn't matter
# because the idx for them has already been set to False
logit_gap[idx_more_predictions_needed] = logit_gap_tmp
else:
n_nets_used_in_cascade = i_net + 1
coeff1 = ((n_nets_used_in_cascade - 1) / n_nets_used_in_cascade)
coeff2 = (1 / n_nets_used_in_cascade)
output_tmp[not_predicted_idx] = coeff1 * output_tmp[not_predicted_idx] \
+ coeff2 * output_cur #don't need idx here because had it for images passed to the net_cur
output[idx_more_predictions_needed] = output_tmp
i_net += 1
if not if_cascade:
output /= len(net)
if if_return_outputs:
outputs_to_return.append(output.detach().cpu())
if if_return_logit_gaps:
logit_gaps_to_return.append(logit_gap.detach().cpu())
loss = self.test_criterion(output, labels)
self.update_metric(metric_dict, output, labels)
losses.update(loss.item(), images.size(0))
t.set_postfix({'loss': losses.avg,
**self.get_metric_vals(metric_dict),
'img_size': images.size(2)})
t.update(1)
ed = time.time()
print(f'Forward time {ed - st}')
dict_to_return = self.get_metric_vals(metric_dict)
if if_return_outputs:
outputs_to_return = torch.cat(outputs_to_return, dim=0).numpy()
dict_to_return['output_distr'] = outputs_to_return
if if_return_logit_gaps:
logit_gaps_to_return = torch.cat(logit_gaps_to_return, dim=0).numpy()
dict_to_return['logit_gaps'] = logit_gaps_to_return
if if_cascade:
dict_to_return['n_not_predicted_per_stage'] = n_not_predicted_per_stage
return losses.avg, dict_to_return
def get_metric_vals(self, metric_dict):
return {key: metric_dict[key].avg for key in metric_dict} | 9,055 | 50.748571 | 155 | py |
ENCAS | ENCAS-main/after_search/store_outputs_timm.py | import copy
import utils
from collections import defaultdict
import pandas as pd
import timm
import json
import os
from concurrent.futures import ProcessPoolExecutor
from pathlib import Path
from os.path import join
import numpy as np
from ofa.utils import AverageMeter, accuracy
from timm.data import create_dataset, create_loader, resolve_data_config
from tqdm import tqdm
import torch
from fvcore.nn import FlopCountAnalysis
from ptflops import get_model_complexity_info
path_timm_csv = os.path.join(utils.NAT_DATA_PATH, 'timm-results-imagenet.csv')
df_timm = pd.read_csv(path_timm_csv)
def compute_outputs_single_network(net_name, data_provider_kwargs, dataset_type, **kwargs):
model = timm.create_model(net_name, pretrained=True)
model.cuda()
model.eval()
df_row = df_timm[df_timm['model'] == net_name]
image_size = int(df_row['img_size'].values[0])
# here I use timm loader to get exactly the results reported in the repo
if 'val' in dataset_type:
split_name = 'imagenetv2_all'
elif 'test' in dataset_type:
split_name = 'val' # "validation" of ImageNet is used for test
dataset = create_dataset(
root=data_provider_kwargs['data'], name='', split=split_name,
download=False, load_bytes=False, class_map='')
args_for_data_config = {'model': 'beit_base_patch16_224', 'img_size': None,
'input_size': None, 'crop_pct': None, 'mean': None, 'std': None, 'interpolation': '',
'num_classes': 1000, 'class_map': '', 'gp': None, 'pretrained': True,
'test_pool': False, 'no_prefetcher': False, 'pin_mem': False, 'channels_last': False,
'tf_preprocessing': False, 'use_ema': False, 'torchscript': False, 'legacy_jit': False,
'prefetcher': True}
data_config = resolve_data_config(args_for_data_config, model=model, use_test_size=True, verbose=True)
crop_pct = data_config['crop_pct']
loader = create_loader(dataset,
input_size=data_config['input_size'],
batch_size=data_provider_kwargs['vld_batch_size'],
use_prefetcher=True,
interpolation=data_config['interpolation'],
mean=data_config['mean'],
std=data_config['std'],
num_workers=data_provider_kwargs['n_workers'],
crop_pct=crop_pct,
pin_memory=True,
tf_preprocessing=False)
n_batches = len(loader)
# model = torch.nn.DataParallel(model)
metric_dict = defaultdict(lambda: AverageMeter())
outputs_to_return = []
with tqdm(total=n_batches, desc=dataset_type, ncols=130) as t, torch.no_grad():
for i, (images, labels, *_) in enumerate(loader):
images, labels = images.cuda(), labels.cuda()
with torch.cuda.amp.autocast():
output = model(images)
outputs_to_return.append(output.detach().cpu())
acc1 = accuracy(output, labels, topk=(1,))[0].item()
metric_dict['acc'].update(acc1, output.size(0))
t.set_postfix({**{key: metric_dict[key].avg for key in metric_dict},
'img_size': images.size(2)})
t.update(1)
outputs_to_return = torch.cat(outputs_to_return, dim=0)
if True:
flops = FlopCountAnalysis(model.cuda(), torch.randn(1, 3, image_size, image_size).cuda())
metric_dict['flops'] = flops.total() / 10 ** 6
else:
# used this to double-check the results - they are consistent between the libraries
flops = get_model_complexity_info(model.cuda(), (3, image_size, image_size),
print_per_layer_stat=False, as_strings=False, verbose=False)[0]
metric_dict['flops'] = flops / 10 ** 6
return outputs_to_return, metric_dict
def store_outputs_many_networks(net_names, data_provider_kwargs, dataset_type, dir_name, **kwargs):
save_path_base = join(utils.NAT_LOGS_PATH, dir_name)
Path(save_path_base).mkdir(exist_ok=True)
postfix = kwargs.get('postfix', '')
out_folder_path = join(save_path_base, 'pretrained')
Path(out_folder_path).mkdir(exist_ok=True) # need to create all the dirs in the hierarchy
out_folder_path = join(out_folder_path, '0')
Path(out_folder_path).mkdir(exist_ok=True)
out_folder_path = join(out_folder_path, f'output_distrs_{dataset_type}{postfix}')
Path(out_folder_path).mkdir(exist_ok=True)
info_dict_path = os.path.join(out_folder_path, f'info.json')
out_folder_logits_path = join(save_path_base, 'pretrained', '0', f'logit_gaps_{dataset_type}{postfix}')
Path(out_folder_logits_path).mkdir(exist_ok=True)
process_pool = ProcessPoolExecutor(max_workers=1)
futures = []
accs_all = []
flops_all = []
for i, net_name in enumerate(net_names):
print(f'{net_name=}')
if 'efficientnet' not in net_name: # large effnets use a lot of VRAM => use smaller batch
logits, metric_dict = compute_outputs_single_network(net_name, data_provider_kwargs, dataset_type)
else:
data_provider_kwargs_smaller_batch = copy.deepcopy(data_provider_kwargs)
data_provider_kwargs_smaller_batch['vld_batch_size'] = 20
logits, metric_dict = compute_outputs_single_network(net_name, data_provider_kwargs_smaller_batch, dataset_type)
logits_float32 = torch.tensor(logits, dtype=torch.float32)
acc, flops = metric_dict['acc'].avg, metric_dict['flops']
accs_all.append(acc)
flops_all.append(flops)
two_max_values = logits_float32.topk(k=2, dim=-1).values
logit_gap = two_max_values[:, 0] - two_max_values[:, 1]
future = process_pool.submit(utils.save_gz, path=os.path.join(out_folder_logits_path, f'{i}.npy.gz'),
data=logit_gap.numpy().astype(np.float16))
futures.append(future)
outputs = torch.softmax(logits_float32, dim=-1)
future = process_pool.submit(utils.save_gz, path=os.path.join(out_folder_path, f'{i}.npy.gz'),
data=outputs.numpy().astype(np.float16))
futures.append(future)
info_dict = {dataset_type: accs_all, 'flops': flops_all, 'net_names': net_names}
json.dump(info_dict, open(info_dict_path, 'w'))
for f in futures:
f.result() # wait on everything
if __name__ == '__main__':
IMAGENET_PATH = '/projects/0/einf2071/data/imagenet/' #'/export/scratch2/aleksand/data/imagenet/'
# You can set the download location of the model checkpoints like this:
# torch.hub.set_dir('/export/scratch2/aleksand/torch_hub/')
# torch.hub.set_dir('/projects/0/einf2071/torch_hub/')
all_timm_models_without_regnetz = ['beit_large_patch16_512', 'beit_large_patch16_384', 'tf_efficientnet_l2_ns', 'tf_efficientnet_l2_ns_475', 'beit_large_patch16_224', 'swin_large_patch4_window12_384', 'vit_large_patch16_384', 'tf_efficientnet_b7_ns', 'beit_base_patch16_384', 'cait_m48_448', 'tf_efficientnet_b6_ns', 'swin_base_patch4_window12_384', 'tf_efficientnetv2_xl_in21ft1k', 'swin_large_patch4_window7_224', 'tf_efficientnetv2_l_in21ft1k', 'vit_large_r50_s32_384', 'dm_nfnet_f6', 'tf_efficientnet_b5_ns', 'cait_m36_384', 'vit_base_patch16_384', 'xcit_large_24_p8_384_dist', 'vit_large_patch16_224', 'xcit_medium_24_p8_384_dist', 'dm_nfnet_f5', 'xcit_large_24_p16_384_dist', 'dm_nfnet_f4', 'tf_efficientnetv2_m_in21ft1k', 'xcit_small_24_p8_384_dist', 'dm_nfnet_f3', 'tf_efficientnetv2_l', 'cait_s36_384', 'ig_resnext101_32x48d', 'xcit_medium_24_p16_384_dist', 'deit_base_distilled_patch16_384', 'xcit_large_24_p8_224_dist', 'tf_efficientnet_b8_ap', 'tf_efficientnet_b8', 'swin_base_patch4_window7_224', 'beit_base_patch16_224', 'tf_efficientnet_b4_ns', 'tf_efficientnet_b7_ap', 'xcit_small_24_p16_384_dist', 'ig_resnext101_32x32d', 'xcit_small_12_p8_384_dist', 'xcit_medium_24_p8_224_dist', 'dm_nfnet_f2', 'tf_efficientnetv2_m', 'cait_s24_384', 'resnetrs420', 'ecaresnet269d', 'vit_base_r50_s16_384', 'resnetv2_152x4_bitm', 'tf_efficientnet_b7', 'xcit_large_24_p16_224_dist', 'xcit_small_24_p8_224_dist', 'efficientnetv2_rw_m', 'tf_efficientnet_b6_ap', 'eca_nfnet_l2', 'xcit_small_12_p16_384_dist', 'resnetrs350', 'dm_nfnet_f1', 'vit_base_patch16_224', 'resnest269e', 'resnetv2_152x2_bitm', 'vit_large_r50_s32_224', 'resnetrs270', 'resnetv2_101x3_bitm', 'resmlp_big_24_224_in22ft1k', 'xcit_large_24_p8_224', 'seresnet152d', 'tf_efficientnetv2_s_in21ft1k', 'xcit_medium_24_p16_224_dist', 'vit_base_patch16_224_miil', 'swsl_resnext101_32x8d', 'tf_efficientnet_b5_ap', 'xcit_small_12_p8_224_dist', 'crossvit_18_dagger_408', 'ig_resnext101_32x16d', 'pit_b_distilled_224', 'tf_efficientnet_b6', 'resnetrs200', 'cait_xs24_384', 'vit_small_r26_s32_384', 'tf_efficientnet_b3_ns', 'eca_nfnet_l1', 'resnetv2_50x3_bitm', 'resnet200d', 'tf_efficientnetv2_s', 'xcit_small_24_p16_224_dist', 'resnest200e', 'xcit_small_24_p8_224', 'resnetv2_152x2_bit_teacher_384', 'efficientnetv2_rw_s', 'crossvit_15_dagger_408', 'tf_efficientnet_b5', 'vit_small_patch16_384', 'xcit_tiny_24_p8_384_dist', 'xcit_medium_24_p8_224', 'resnetrs152', 'regnety_160', 'twins_svt_large', 'resnet152d', 'resmlp_big_24_distilled_224', 'jx_nest_base', 'cait_s24_224', 'efficientnet_b4', 'deit_base_distilled_patch16_224', 'dm_nfnet_f0', 'swsl_resnext101_32x16d', 'xcit_small_12_p16_224_dist', 'vit_base_patch32_384', 'xcit_small_12_p8_224', 'tf_efficientnet_b4_ap', 'swsl_resnext101_32x4d', 'swin_small_patch4_window7_224', 'twins_pcpvt_large', 'twins_svt_base', 'jx_nest_small', 'deit_base_patch16_384', 'tresnet_m', 'tresnet_xl_448', 'tf_efficientnet_b4', 'resnet101d', 'resnetv2_152x2_bit_teacher', 'xcit_large_24_p16_224', 'resnest101e', 'resnetv2_50x1_bit_distilled', 'pnasnet5large', 'nfnet_l0', 'regnety_032', 'twins_pcpvt_base', 'ig_resnext101_32x8d', 'nasnetalarge', 'xcit_medium_24_p16_224', 'eca_nfnet_l0', 'levit_384', 'xcit_small_24_p16_224', 'xcit_tiny_24_p8_224_dist', 'xcit_tiny_24_p16_384_dist', 'resnet61q', 'crossvit_18_dagger_240', 'gc_efficientnetv2_rw_t', 'pit_b_224', 'crossvit_18_240', 'xcit_tiny_12_p8_384_dist', 'tf_efficientnet_b2_ns', 'resnet51q', 'ecaresnet50t', 'efficientnetv2_rw_t', 'resnetv2_101x1_bitm', 'crossvit_15_dagger_240', 'coat_lite_small', 'mixer_b16_224_miil', 'resnetrs101', 'convit_base', 'tresnet_l_448', 'efficientnet_b3', 'crossvit_base_240', 'cait_xxs36_384', 'ecaresnet101d', 'swsl_resnext50_32x4d', 'visformer_small', 'tresnet_xl', 'resnetv2_101', 'pit_s_distilled_224', 'deit_base_patch16_224', 'xcit_small_12_p16_224', 'tf_efficientnetv2_b3', 'xcit_tiny_24_p8_224', 'ssl_resnext101_32x16d', 'vit_small_r26_s32_224', 'tf_efficientnet_b3_ap', 'tresnet_m_448', 'twins_svt_small', 'tf_efficientnet_b3', 'rexnet_200', 'ssl_resnext101_32x8d', 'halonet50ts', 'tf_efficientnet_lite4', 'crossvit_15_240', 'halo2botnet50ts_256', 'tnt_s_patch16_224', 'vit_large_patch32_384', 'levit_256', 'tresnet_l', 'wide_resnet50_2', 'jx_nest_tiny', 'lamhalobotnet50ts_256', 'convit_small', 'swin_tiny_patch4_window7_224', 'vit_small_patch16_224', 'tf_efficientnet_b1_ns', 'convmixer_1536_20', 'gernet_l', 'legacy_senet154', 'efficientnet_el', 'coat_mini', 'seresnext50_32x4d', 'gluon_senet154', 'xcit_tiny_12_p8_224_dist', 'deit_small_distilled_patch16_224', 'lambda_resnet50ts', 'resmlp_36_distilled_224', 'swsl_resnet50', 'resnest50d_4s2x40d', 'twins_pcpvt_small', 'pit_s_224', 'haloregnetz_b', 'resmlp_big_24_224', 'crossvit_small_240', 'gluon_resnet152_v1s', 'resnest50d_1s4x24d', 'sehalonet33ts', 'resnest50d', 'cait_xxs24_384', 'xcit_tiny_12_p16_384_dist', 'gcresnet50t', 'ssl_resnext101_32x4d', 'gluon_seresnext101_32x4d', 'gluon_seresnext101_64x4d', 'efficientnet_b3_pruned', 'ecaresnet101d_pruned', 'regnety_320', 'resmlp_24_distilled_224', 'vit_base_patch32_224', 'gernet_m', 'nf_resnet50', 'gluon_resnext101_64x4d', 'ecaresnet50d', 'efficientnet_b2', 'gcresnext50ts', 'resnet50d', 'repvgg_b3', 'vit_small_patch32_384', 'gluon_resnet152_v1d', 'mixnet_xl', 'xcit_tiny_24_p16_224_dist', 'ecaresnetlight', 'inception_resnet_v2', 'resnetv2_50', 'gluon_resnet101_v1d', 'regnety_120', 'resnet50', 'seresnet33ts', 'resnetv2_50x1_bitm', 'gluon_resnext101_32x4d', 'rexnet_150', 'tf_efficientnet_b2_ap', 'ssl_resnext50_32x4d', 'efficientnet_el_pruned', 'gluon_resnet101_v1s', 'regnetx_320', 'tf_efficientnet_el', 'seresnet50', 'vit_base_patch16_sam_224', 'legacy_seresnext101_32x4d', 'repvgg_b3g4', 'tf_efficientnetv2_b2', 'dpn107', 'convmixer_768_32', 'inception_v4', 'skresnext50_32x4d', 'eca_resnet33ts', 'gcresnet33ts', 'tf_efficientnet_b2', 'cspresnext50', 'cspdarknet53', 'dpn92', 'ens_adv_inception_resnet_v2', 'gluon_seresnext50_32x4d', 'gluon_resnet152_v1c', 'efficientnet_b2_pruned', 'xception71', 'regnety_080', 'resnetrs50', 'deit_small_patch16_224', 'levit_192', 'ecaresnet26t', 'regnetx_160', 'dpn131', 'tf_efficientnet_lite3', 'resnext50_32x4d', 'resmlp_36_224', 'cait_xxs36_224', 'regnety_064', 'xcit_tiny_12_p8_224', 'ecaresnet50d_pruned', 'gluon_xception65', 'gluon_resnet152_v1b', 'resnext50d_32x4d', 'dpn98', 'gmlp_s16_224', 'regnetx_120', 'cspresnet50', 'xception65', 'gluon_resnet101_v1c', 'rexnet_130', 'tf_efficientnetv2_b1', 'hrnet_w64', 'xcit_tiny_24_p16_224', 'dla102x2', 'resmlp_24_224', 'repvgg_b2g4', 'gluon_resnext50_32x4d', 'tf_efficientnet_cc_b1_8e', 'hrnet_w48', 'resnext101_32x8d', 'ese_vovnet39b', 'gluon_resnet101_v1b', 'resnetblur50', 'nf_regnet_b1', 'pit_xs_distilled_224', 'tf_efficientnet_b1_ap', 'eca_botnext26ts_256', 'botnet26t_256', 'efficientnet_em', 'ssl_resnet50', 'regnety_040', 'regnetx_080', 'dpn68b', 'resnet33ts', 'res2net101_26w_4s', 'halonet26t', 'lambda_resnet26t', 'coat_lite_mini', 'legacy_seresnext50_32x4d', 'gluon_resnet50_v1d', 'regnetx_064', 'xception', 'resnet32ts', 'res2net50_26w_8s', 'mixnet_l', 'lambda_resnet26rpt_256', 'hrnet_w40', 'hrnet_w44', 'wide_resnet101_2', 'eca_halonext26ts', 'tf_efficientnet_b1', 'efficientnet_b1', 'gluon_inception_v3', 'repvgg_b2', 'tf_mixnet_l', 'dla169', 'gluon_resnet50_v1s', 'legacy_seresnet152', 'tf_efficientnet_b0_ns', 'xcit_tiny_12_p16_224_dist', 'res2net50_26w_6s', 'xception41', 'dla102x', 'regnetx_040', 'resnest26d', 'levit_128', 'dla60_res2net', 'vit_tiny_patch16_384', 'hrnet_w32', 'dla60_res2next', 'coat_tiny', 'selecsls60b', 'legacy_seresnet101', 'repvgg_b1', 'cait_xxs24_224', 'tf_efficientnetv2_b0', 'tv_resnet152', 'bat_resnext26ts', 'efficientnet_b1_pruned', 'dla60x', 'res2next50', 'hrnet_w30', 'pit_xs_224', 'regnetx_032', 'tf_efficientnet_em', 'res2net50_14w_8s', 'hardcorenas_f', 'efficientnet_es', 'gmixer_24_224', 'dla102', 'gluon_resnet50_v1c', 'res2net50_26w_4s', 'selecsls60', 'seresnext26t_32x4d', 'resmlp_12_distilled_224', 'mobilenetv3_large_100_miil', 'tf_efficientnet_cc_b0_8e', 'resnet26t', 'regnety_016', 'tf_inception_v3', 'rexnet_100', 'seresnext26ts', 'gcresnext26ts', 'xcit_nano_12_p8_384_dist', 'hardcorenas_e', 'efficientnet_b0', 'legacy_seresnet50', 'tv_resnext50_32x4d', 'repvgg_b1g4', 'seresnext26d_32x4d', 'adv_inception_v3', 'gluon_resnet50_v1b', 'res2net50_48w_2s', 'coat_lite_tiny', 'tf_efficientnet_lite2', 'inception_v3', 'eca_resnext26ts', 'hardcorenas_d', 'tv_resnet101', 'densenet161', 'tf_efficientnet_cc_b0_4e', 'densenet201', 'mobilenetv2_120d', 'mixnet_m', 'selecsls42b', 'xcit_tiny_12_p16_224', 'resnet34d', 'tf_efficientnet_b0_ap', 'legacy_seresnext26_32x4d', 'hardcorenas_c', 'dla60', 'crossvit_9_dagger_240', 'tf_mixnet_m', 'regnetx_016', 'convmixer_1024_20_ks9_p14', 'skresnet34', 'gernet_s', 'tf_efficientnet_b0', 'ese_vovnet19b_dw', 'resnext26ts', 'hrnet_w18', 'resnet26d', 'tf_efficientnet_lite1', 'resmlp_12_224', 'mixer_b16_224', 'tf_efficientnet_es', 'densenetblur121d', 'levit_128s', 'hardcorenas_b', 'mobilenetv2_140', 'repvgg_a2', 'xcit_nano_12_p8_224_dist', 'regnety_008', 'dpn68', 'tv_resnet50', 'vit_small_patch32_224', 'mixnet_s', 'vit_tiny_r_s16_p8_384', 'hardcorenas_a', 'densenet169', 'mobilenetv3_large_100', 'tf_mixnet_s', 'mobilenetv3_rw', 'densenet121', 'tf_mobilenetv3_large_100', 'resnest14d', 'efficientnet_lite0', 'xcit_nano_12_p16_384_dist', 'vit_tiny_patch16_224', 'semnasnet_100', 'resnet26', 'regnety_006', 'repvgg_b0', 'fbnetc_100', 'resnet34', 'hrnet_w18_small_v2', 'regnetx_008', 'mobilenetv2_110d', 'efficientnet_es_pruned', 'tf_efficientnet_lite0', 'legacy_seresnet34', 'tv_densenet121', 'mnasnet_100', 'dla34', 'gluon_resnet34_v1b', 'pit_ti_distilled_224', 'deit_tiny_distilled_patch16_224', 'vgg19_bn', 'spnasnet_100', 'regnety_004', 'ghostnet_100', 'crossvit_9_240', 'xcit_nano_12_p8_224', 'regnetx_006', 'vit_base_patch32_sam_224', 'tf_mobilenetv3_large_075', 'vgg16_bn', 'crossvit_tiny_240', 'tv_resnet34', 'swsl_resnet18', 'convit_tiny', 'skresnet18', 'mobilenetv2_100', 'pit_ti_224', 'ssl_resnet18', 'regnetx_004', 'vgg19', 'hrnet_w18_small', 'xcit_nano_12_p16_224_dist', 'resnet18d', 'tf_mobilenetv3_large_minimal_100', 'deit_tiny_patch16_224', 'mixer_l16_224', 'vit_tiny_r_s16_p8_224', 'legacy_seresnet18', 'vgg16', 'vgg13_bn', 'gluon_resnet18_v1b', 'vgg11_bn', 'regnety_002', 'xcit_nano_12_p16_224', 'vgg13', 'resnet18', 'vgg11', 'regnetx_002', 'tf_mobilenetv3_small_100', 'dla60x_c', 'dla46x_c', 'tf_mobilenetv3_small_075', 'dla46_c', 'tf_mobilenetv3_small_minimal_100']
# it's convenient to download all the models in advance:
for net_name in reversed(all_timm_models_without_regnetz):
try:
model = timm.create_model(net_name, pretrained=True, num_classes=1000)
del model
except:
print(f'Failed {net_name}')
data_provider_kwargs = {'data': IMAGENET_PATH, 'dataset': 'imagenet', 'n_workers': 8, 'vld_batch_size': 128}
# Snellius:
store_outputs_many_networks(all_timm_models_without_regnetz, data_provider_kwargs, 'val', 'timm_all')
store_outputs_many_networks(all_timm_models_without_regnetz, data_provider_kwargs, 'test', 'timm_all') | 18,030 | 109.619632 | 10,476 | py |
ENCAS | ENCAS-main/after_search/evaluate_stored_outputs.py | import copy
import json
import numpy as np
import os
import torch
import glob
import gzip
import yaml
from matplotlib import pyplot as plt
import utils
from utils import execute_func_for_all_runs_and_combine
labels_path_prefix = utils.NAT_DATA_PATH
def evaluate_stored_one_run(run_path, dataset_type, path_labels, **kwargs):
labels = torch.tensor(np.load(os.path.join(labels_path_prefix, path_labels))).cuda() # load, .cuda()
dataset_postfix = kwargs.get('dataset_postfix', '')
path_stored_outputs = os.path.join(run_path, f'output_distrs_{dataset_type}{dataset_postfix}')
max_iter = kwargs.get('max_iter', 15)
max_iter_path = os.path.join(run_path, f'iter_{max_iter}')
n_nets = len(glob.glob(os.path.join(path_stored_outputs, '*.npy.gz')))
info_path = glob.glob(os.path.join(path_stored_outputs, 'info.json'))[0]
info_path_new = os.path.join(max_iter_path, f'best_pareto_val_and_{dataset_type}{dataset_postfix}.json')
accs = []
for i_net in range(n_nets):
path = os.path.join(path_stored_outputs, f'{i_net}.npy.gz')
with gzip.GzipFile(path, 'r') as f:
outs = np.asarray(np.load(f))
outs = torch.tensor(outs).cuda()
preds = torch.argmax(outs, axis=-1)
acc = torch.sum(preds == labels) / len(labels) * 100
accs.append(acc.item())
print(accs)
info = json.load(open(info_path))
info[dataset_type + dataset_postfix] = accs
json.dump(info, open(info_path_new, 'w'))
return accs
def evaluate_stored_whole_experiment(experiment_name, dataset_type, path_labels, **kwargs):
execute_func_for_all_runs_and_combine(experiment_name, evaluate_stored_one_run, dataset_type=dataset_type,
path_labels=path_labels, **kwargs)
def evaluate_stored_one_run_cascade(run_path, dataset_type, path_labels, **kwargs):
labels = torch.tensor(np.load(os.path.join(labels_path_prefix, path_labels)))
cascade_info_name = kwargs.get('cascade_info_name', 'posthoc_ensemble.yml')
cascade_info_name_new = kwargs.get('cascade_info_name_new', 'posthoc_ensemble_from_stored.yml')
cascade_info_path = glob.glob(os.path.join(run_path, cascade_info_name))[0]
info = yaml.safe_load(open(cascade_info_path))
cfgs, thresholds, original_indices, weight_paths, flops_all, ensemble_ss_names, search_space_names = info['cfgs'], info.get('thresholds', None), info['original_indices'], info['weight_paths'], info['flops_all'], info['ensemble_ss_names'], info['search_space_names']
if thresholds is None:
# for ensembles (from ENCAS-ensemble) there are no thresholds, which is equivalent to all thresholds being 1
# could have a separate method for ENCAS-ensemble, but this fix seems simpler and should lead to the same result
thresholds = [[1.0] * len(cfgs[0])] * len(cfgs)
postfix = info.get('dataset_postfix', '')
if_use_logit_gaps = info['algo'] == 'greedy'
cascade_info_path_new = cascade_info_path.replace(cascade_info_name, cascade_info_name_new)
dataset_type_for_path = dataset_type
labels = labels.cuda()
outs_cache = {}
accs = []
flops_new = []
for i_cascade, (cfgs_cascade, thresholds_cascade, orig_indices_cascade, weight_paths_cascade, ss_names_cascade) in enumerate(zip(cfgs, thresholds, original_indices, weight_paths, search_space_names)):
outs = None
flops_cur = 0
n_nets_used_in_cascade = 0
idx_more_predictions_needed = None
# threshold_idx = 0
for i_net, (cfg, orig_idx, weight_path, ss_name) in enumerate(zip(cfgs_cascade, orig_indices_cascade, weight_paths_cascade, ss_names_cascade)):
if cfg is None: #noop
continue
# 1. load
base_path = weight_path[:weight_path.find('/iter')] #need run path
path = os.path.join(base_path, f'output_distrs_{dataset_type_for_path}{postfix}', f'{orig_idx}.npy.gz')
if path not in outs_cache:
with gzip.GzipFile(path, 'r') as f:
outs_cur = np.asarray(np.load(f))
outs_cur = torch.tensor(outs_cur).cuda()
outs_cache[path] = outs_cur
outs_cur = torch.clone(outs_cache[path])
if if_use_logit_gaps:
path = os.path.join(base_path, f'logit_gaps_{dataset_type_for_path}{postfix}', f'{orig_idx}.npy.gz')
with gzip.GzipFile(path, 'r') as f:
logit_gaps_cur = np.asarray(np.load(f))
logit_gaps_cur = torch.tensor(logit_gaps_cur)
# 2. predict
if idx_more_predictions_needed is None:
idx_more_predictions_needed = torch.ones(outs_cur.shape[0], dtype=torch.bool)
outs = outs_cur
n_nets_used_in_cascade = 1
flops_cur += flops_all[ensemble_ss_names.index(ss_name) + 1][orig_idx] # "+1" because the first one is nooop
if if_use_logit_gaps:
logit_gaps = logit_gaps_cur
else:
threshold = thresholds_cascade[i_net - 1]
if not if_use_logit_gaps:
idx_more_predictions_needed[torch.max(outs, dim=1).values > threshold] = False
else:
idx_more_predictions_needed[logit_gaps > threshold] = False
outs_tmp = outs[idx_more_predictions_needed] # outs_tmp is needed because I wanna do (in the end) x[idx1][idx2] = smth, and that doesn't modify the original x
if not if_use_logit_gaps:
not_predicted_idx = torch.max(outs_tmp, dim=1).values <= threshold
else:
logit_gap_tmp = logit_gaps[idx_more_predictions_needed]
not_predicted_idx = logit_gap_tmp <= threshold
n_not_predicted = torch.sum(not_predicted_idx).item()
if n_not_predicted == 0:
break
if not if_use_logit_gaps:
n_nets_used_in_cascade += 1
coeff1 = (n_nets_used_in_cascade - 1) / n_nets_used_in_cascade # for the current predictions that may already be an average
coeff2 = 1 / n_nets_used_in_cascade # for the predictions of the new model
outs_tmp[not_predicted_idx] = coeff1 * outs_tmp[not_predicted_idx] \
+ coeff2 * outs_cur[idx_more_predictions_needed][not_predicted_idx]
outs[idx_more_predictions_needed] = outs_tmp
else:
# firstly, need to overwrite previous predictions (because they didn't really happen if the gap was too small)
outs_tmp[not_predicted_idx] = outs_cur[idx_more_predictions_needed][not_predicted_idx]
outs[idx_more_predictions_needed] = outs_tmp
# secondly, need to update the logit gap
logit_gap_tmp[not_predicted_idx] = logit_gaps_cur[idx_more_predictions_needed][not_predicted_idx]
# note that the gap for the previously predicted values will be wrong, but it doesn't matter
# because the idx for them has already been set to False
logit_gaps[idx_more_predictions_needed] = logit_gap_tmp
flops_cur += flops_all[ensemble_ss_names.index(ss_name) + 1][orig_idx] * (n_not_predicted / len(labels))
assert outs is not None
preds = torch.argmax(outs, axis=-1)
acc = torch.sum(preds == labels) / len(labels) * 100
accs.append(acc.item())
print(f'{i_cascade}: {accs[-1]}')
flops_new.append(flops_cur)
print(accs)
info['val'] = info['true_errs'] # this whole thing is a mess; in plot_results, 'val' is read, but assumed to be true_errs
info['flops_old'] = info['flops']
info['flops'] = flops_new
info[dataset_type] = accs
yaml.safe_dump(info, open(cascade_info_path_new, 'w'), default_flow_style=None)
return accs
def evaluate_stored_whole_experiment_cascade(experiment_name, dataset_type, path_labels, **kwargs):
execute_func_for_all_runs_and_combine(experiment_name, evaluate_stored_one_run_cascade, dataset_type=dataset_type,
path_labels=path_labels, **kwargs)
def filter_one_run_cascade(run_path, cascade_info_name, cascade_info_name_new, **kwargs):
# filter indices based on val
# assume that in 'info' the pareto front for 'val' is stored
# note that even though test was computed for all the cascades for convenience, no selection is done on test.
cascade_info_path = glob.glob(os.path.join(run_path, cascade_info_name))[0]
info = yaml.safe_load(open(cascade_info_path))
cfgs = info['cfgs']
def create_idx(key_for_filt):
val_for_filt = info[key_for_filt]
if_round = True
if if_round:
val_for_filt_new = []
for v in val_for_filt:
if kwargs.get('subtract_from_100', True):
v = 100 - v
v = round(v * 10) / 10 # wanna have unique digit after comma
val_for_filt_new.append(v)
val_for_filt = val_for_filt_new
cur_best = 0.
idx_new_pareto = np.zeros(len(cfgs), dtype=bool)
for i in range(len(cfgs)):
if val_for_filt[i] > cur_best:
idx_new_pareto[i] = True
cur_best = val_for_filt[i]
return idx_new_pareto
def filter_by_idx(l, idx):
return np.array(l)[idx].tolist()
def filter_info(key_list, idx):
info_new = copy.deepcopy(info)
for k in key_list:
if k in info_new:
info_new[k] = filter_by_idx(info[k], idx)
return info_new
val_key = kwargs.get('val_key', 'val')
idx_new_pareto = create_idx(val_key)
info_new = filter_info(['cfgs', 'flops', 'flops_old', 'test', 'thresholds', 'true_errs', 'val', 'weight_paths',
'search_space_names', 'original_indices'], idx_new_pareto)
plt.plot(info['flops'], info['test'], '-o')
plt.plot(info_new['flops'], info_new['test'], '-o')
plt.savefig(os.path.join(run_path, 'filtered.png'))
plt.show()
plt.close()
cascade_info_path_new = cascade_info_path.replace(cascade_info_name, cascade_info_name_new)
yaml.safe_dump(info_new, open(cascade_info_path_new, 'w'), default_flow_style=None)
def filter_whole_experiment_cascade(experiment_name, cascade_info_name, cascade_info_name_new, **kwargs):
execute_func_for_all_runs_and_combine(experiment_name, filter_one_run_cascade, cascade_info_name=cascade_info_name,
cascade_info_name_new=cascade_info_name_new, **kwargs) | 10,795 | 50.409524 | 269 | py |
ENCAS | ENCAS-main/after_search/average_weights.py | import os
import torch
import utils
def swa(run_path, iters, supernet_name_in, supernet_name_out):
checkpoint_paths = [os.path.join(run_path, f'iter_{i}', supernet_name_in) for i in iters]
# read checkpoints
checkpoints = [torch.load(p, map_location='cpu') for p in checkpoint_paths]
state_dicts = [c['model_state_dict'] for c in checkpoints]
# for all keys, average
out_state_dict = {}
for k, v in state_dicts[0].items():
if v.data.dtype in [torch.int, torch.long]:
out_state_dict[k] = state_dicts[-1][k] #num batches tracked => makes sense to take the last value
continue
for state_dict in state_dicts:
if k in out_state_dict:
out_state_dict[k] += state_dict[k]
else:
out_state_dict[k] = state_dict[k]
out_state_dict[k] /= len(state_dicts)
# save the result
out_checkpoint = checkpoints[-1]
out_checkpoint['model_state_dict'] = out_state_dict
torch.save(out_checkpoint, os.path.join(run_path, f'iter_{iters[-1]}', supernet_name_out))
def swa_for_whole_experiment(experiment_name, iters, supernet_name_in, target_runs=None):
nsga_logs_path = utils.NAT_LOGS_PATH
experiment_path = os.path.join(nsga_logs_path, experiment_name)
algo_names = []
image_paths = []
for f in reversed(sorted(os.scandir(experiment_path), key=lambda e: e.name)):
if not f.is_dir():
continue
name_cur = f.name
algo_names.append(name_cur)
for run_folder in os.scandir(f.path):
if not run_folder.is_dir():
continue
run_idx = int(run_folder.name)
if target_runs is not None and run_idx not in target_runs:
continue
run_path = os.path.join(experiment_path, name_cur, str(run_idx))
im_path = swa(run_path, iters, supernet_name_in, utils.transform_supernet_name_swa(supernet_name_in, len(iters)))
image_paths.append(im_path) | 2,015 | 39.32 | 125 | py |
ENCAS | ENCAS-main/encas/encas_api.py | import glob
import pickle
import gzip
import numpy as np
import os
import torch
from utils import threshold_gene_to_value_moregranular as threshold_gene_to_value
class EncasAPI:
def __init__(self, filename):
self.use_cache = True
kwargs = pickle.load(open(filename, 'rb'))
self.if_allow_noop = kwargs['if_allow_noop']
self.subnet_to_flops = kwargs['subnet_to_flops']
self.search_goal = kwargs['search_goal'] # ensemble or cascade
labels_path = kwargs['labels_path']
self.labels = torch.tensor(np.load(labels_path)).cuda()
output_distr_paths = kwargs['output_distr_paths']
outputs_all = []
for p in output_distr_paths:
output_distr = []
n_files = len(glob.glob(os.path.join(p, "*.npy.gz")))
for j in range(n_files):
print(os.path.join(p, f'{j}.npy.gz'))
with gzip.GzipFile(os.path.join(p, f'{j}.npy.gz'), 'r') as f:
output_distr.append(np.asarray(np.load(f), dtype=np.float16)[None, ...])
outputs_all += output_distr
if self.if_allow_noop:
output_distr_onenet_noop = np.zeros_like(output_distr[0]) # copy arbitrary one to get the shape
if len(output_distr_onenet_noop.shape) < 3:
output_distr_onenet_noop = output_distr_onenet_noop[None, ...]
outputs_all = [output_distr_onenet_noop] + outputs_all
if True: #pre-allocation & concatenation on CPU is helpful when there's not enough VRAM
preallocated_array = np.zeros((len(outputs_all), outputs_all[-1].shape[1], outputs_all[-1].shape[2]), dtype=np.float16)
np.concatenate((outputs_all), axis=0, out=preallocated_array)
self.subnet_to_output_distrs = torch.tensor(preallocated_array).cuda()
else:
outputs_all = [torch.tensor(o).cuda() for o in outputs_all]
self.subnet_to_output_distrs = torch.cat(outputs_all, dim=0)
print(f'{self.subnet_to_output_distrs.shape=}')
print(f'{len(self.subnet_to_flops)=}')
def _fitness_ensemble(self, solution):
solution_size = len(solution)
nets_used = solution_size
if self.if_allow_noop:
n_noops = sum([1 if g == 0 else 0 for g in solution])
if n_noops == nets_used:
return (-100, -1e5)
nets_used -= n_noops
preds = torch.clone(self.subnet_to_output_distrs[solution[0]])
for j in range(1, solution_size):
preds_cur = self.subnet_to_output_distrs[solution[j]]
preds += preds_cur
preds /= nets_used
output = torch.argmax(preds, 1)
err = (torch.sum(self.labels != output).item() / len(self.labels)) * 100
flops = sum([self.subnet_to_flops[solution[j]] for j in range(solution_size)])
obj0_proper_form, obj1_proper_form = -err, -flops
return (obj0_proper_form, obj1_proper_form)
def _fitness_cascade(self, solution):
max_n_nets = (len(solution) + 1) // 2
solution_nets, solution_thresholds = solution[:max_n_nets], solution[max_n_nets:]
n_nets_not_noops = max_n_nets
if self.if_allow_noop:
n_noops = sum([1 if g == 0 else 0 for g in solution_nets])
if n_noops == n_nets_not_noops:
return (-100, -1e5)
n_nets_not_noops -= n_noops
n_nets_used_in_cascade = 0
preds = torch.tensor(self.subnet_to_output_distrs[solution_nets[0]])
flops = self.subnet_to_flops[solution_nets[0]]
n_nets_used_in_cascade += int(solution_nets[0] != 0)
idx_more_predictions_needed = torch.ones(preds.shape[0], dtype=torch.bool)
for j in range(1, max_n_nets):
if solution_nets[j] == 0: # noop
continue
cur_threshold = threshold_gene_to_value[solution_thresholds[j - 1]]
idx_more_predictions_needed[torch.max(preds, dim=1).values > cur_threshold] = False
preds_tmp = preds[idx_more_predictions_needed] #preds_tmp is needed because I wanna do (in the end) x[idx1][idx2] = smth, and that doesn't modify the original x
not_predicted_idx = torch.max(preds_tmp, dim=1).values <= cur_threshold
# not_predicted_idx = torch.max(preds, dim=1).values < cur_threshold
n_not_predicted = torch.sum(not_predicted_idx).item()
# print(f'{n_not_predicted=}')
if n_not_predicted == 0:
break
n_nets_used_in_cascade += 1 #it's guaranteed to not be a noop
preds_cur = self.subnet_to_output_distrs[solution_nets[j]]
if_average_outputs = True
if if_average_outputs:
coeff1 = (n_nets_used_in_cascade - 1) / n_nets_used_in_cascade # for the current predictions that may already be an average
coeff2 = 1 / n_nets_used_in_cascade # for the predictions of the new model
preds_tmp[not_predicted_idx] = coeff1 * preds_tmp[not_predicted_idx] \
+ coeff2 * preds_cur[idx_more_predictions_needed][not_predicted_idx]
preds[idx_more_predictions_needed] = preds_tmp
else:
preds_tmp[not_predicted_idx] = preds_cur[idx_more_predictions_needed][not_predicted_idx]
preds[idx_more_predictions_needed] = preds_tmp
flops += self.subnet_to_flops[solution_nets[j]] * (n_not_predicted / len(self.labels))
output = torch.argmax(preds, dim=1)
err = (torch.sum(self.labels != output).item() / len(self.labels)) * 100
obj0_proper_form, obj1_proper_form = -err, -flops
return (obj0_proper_form, obj1_proper_form)
def fitness(self, solution):
# st = time.time()
solution = [int(x) for x in solution]
if self.search_goal == 'ensemble':
res = self._fitness_ensemble(solution)
elif self.search_goal == 'cascade':
res = self._fitness_cascade(solution)
else:
raise NotImplementedError(f'Unknown {self.search_goal=}')
# ed = time.time()
# self.avg_time.update(ed - st)
# if self.avg_time.count % 500 == 0:
# print(f'Avg fitness time is {self.avg_time.avg} @ {self.avg_time.count}')
return res | 6,361 | 42.278912 | 172 | py |
ENCAS | ENCAS-main/encas/greedy_search.py | # implementation of the algorithm from the paper http://proceedings.mlr.press/v80/streeter18a/streeter18a.pdf
import time
from concurrent.futures import ProcessPoolExecutor
import numpy as np
import torch
import utils
class GreedySearchWrapperEnsembleClassification:
def __init__(self, alphabet, subnet_to_output_distrs, subnet_to_flops, labels, if_allow_noop, ensemble_size, logit_gaps_all, **kwargs):
super().__init__()
# ConfidentModel is implemented as boolean idx atop model's predictions
torch.multiprocessing.set_start_method('spawn')
# need to set up logging in subprocesses, so:
self.log_file_path = kwargs['log_file_path']
self.subnet_to_output_distrs = torch.tensor(subnet_to_output_distrs)
self.subnet_to_output_distrs_argmaxed = torch.argmax(self.subnet_to_output_distrs, -1)
self.subnet_to_logit_gaps = torch.tensor(logit_gaps_all) + 1e-7 # float16 => some are zeroes, and that breaks the 0 threshold
subnet_to_logit_gaps_fp32 = torch.tensor(self.subnet_to_logit_gaps, dtype=torch.float32)
self.subnet_to_logit_gaps_unique_sorted = {}
for i in range(len(self.subnet_to_logit_gaps)):
tmp = torch.sort(torch.unique(subnet_to_logit_gaps_fp32[i]))[0]
self.subnet_to_logit_gaps_unique_sorted[i] = tmp
self.labels = torch.tensor(labels)
self.subnet_to_flops = subnet_to_flops
self.size_to_pad_to = ensemble_size # to pad to this size - doesn't influence the algorithm, only the ease of saving
# In a bunch of places in the code I make the assumption that noop is included
@staticmethod
def acc_constraint_holds(preds1, preds2, labels, multiplier_ref_acc):
def accuracy(preds, labels):
return torch.sum(labels == preds) / len(labels) * 100
acc1 = accuracy(preds1, labels)
acc2 = accuracy(preds2, labels)
if acc1 >= multiplier_ref_acc * acc2 and acc1 != 0:
return acc1.item()
return None
@staticmethod
def confident_model_set(validation_unpredicted_idx, idx_subnet_ref_acc, multiplier_ref_acc, already_used_subnets,
subnet_to_output_distrs_argmaxed, subnet_to_logit_gaps, subnet_to_logit_gaps_unique_sorted, labels):
subnet_to_predicted_idx = {}
subnet_to_threshold = {}
subnet_to_acc = {}
for i_model in range(subnet_to_output_distrs_argmaxed.shape[0]):
if i_model in already_used_subnets:
continue
logit_gaps_cur = subnet_to_logit_gaps[i_model] # (n_samples, 1)
logit_gaps_cur_unique_sorted = subnet_to_logit_gaps_unique_sorted[i_model]
# first check zero, for speed
t = 0.0
predicted_idx = logit_gaps_cur >= t
predicted_idx = predicted_idx * validation_unpredicted_idx # only interested in predictions on yet-unpredicted images
cur_subnet_to_output_distr_argmaxed = subnet_to_output_distrs_argmaxed[i_model]
ref_subnet_to_output_distr_argmaxed = subnet_to_output_distrs_argmaxed[idx_subnet_ref_acc]
acc = GreedySearchWrapperEnsembleClassification.acc_constraint_holds(cur_subnet_to_output_distr_argmaxed[predicted_idx],
ref_subnet_to_output_distr_argmaxed[predicted_idx],
labels[predicted_idx], multiplier_ref_acc)
if acc is None:
for ind in range(logit_gaps_cur_unique_sorted.shape[0]):
t = logit_gaps_cur_unique_sorted[ind]
predicted_idx = logit_gaps_cur >= t
predicted_idx = predicted_idx * validation_unpredicted_idx # only interested in predictions on yet-unpredicted images
acc = GreedySearchWrapperEnsembleClassification.acc_constraint_holds(cur_subnet_to_output_distr_argmaxed[predicted_idx],
ref_subnet_to_output_distr_argmaxed[predicted_idx],
labels[predicted_idx], multiplier_ref_acc)
if acc is not None:
if ind > 0:
t = 1e-7 + logit_gaps_cur_unique_sorted[ind - 1].item()
else:
t = t.item()
break
subnet_to_predicted_idx[i_model] = predicted_idx
subnet_to_threshold[i_model] = t
subnet_to_acc[i_model] = acc
return subnet_to_predicted_idx, subnet_to_threshold, subnet_to_acc
@staticmethod
def _search_for_model_and_multiplier(kwargs):
torch.set_num_threads(1)
self, multiplier_ref_acc, idx_subnet_ref_acc = kwargs['self'], kwargs['multiplier_ref_acc'], kwargs['idx_subnet_ref_acc']
utils.setup_logging(self.log_file_path)
st = time.time()
subnet_to_output_distrs = self.subnet_to_output_distrs
subnet_to_output_distrs_argmaxed = self.subnet_to_output_distrs_argmaxed
subnet_to_logit_gaps = self.subnet_to_logit_gaps
labels = self.labels
all_solutions = []
all_objectives = []
validation_unpredicted_idx = torch.ones_like(labels, dtype=bool)
cur_cascade = [0] # don't actually need noop, but helpful for saving (i.e. this is a crutch)
cur_thresholds = []
cur_flops = 0
cur_predictions = torch.zeros_like(
subnet_to_output_distrs[idx_subnet_ref_acc]) # idx is not important, they all have the same shape
while torch.sum(validation_unpredicted_idx) > 0:
subnet_to_predicted_idx, subnet_to_threshold, subnet_to_acc = \
GreedySearchWrapperEnsembleClassification.confident_model_set(validation_unpredicted_idx,
idx_subnet_ref_acc,
multiplier_ref_acc, set(cur_cascade),
subnet_to_output_distrs_argmaxed,
subnet_to_logit_gaps, self.subnet_to_logit_gaps_unique_sorted, labels)
best_new_subnet_index = -1
best_r = 0
for i_model in subnet_to_predicted_idx.keys():
n_predicted_cur = torch.sum(subnet_to_predicted_idx[i_model])
if n_predicted_cur == 0:
continue
# filter to M_useful
if subnet_to_acc[i_model] is None:
continue
# the "confident_model_set", as described in the paper, already generates only models that satisfy
# the accuracy constraint, thus M_useful and M_accurate are the same
r = n_predicted_cur / self.subnet_to_flops[i_model]
if r > best_r:
best_r = r
best_new_subnet_index = i_model
cur_cascade.append(best_new_subnet_index)
cur_thresholds.append(subnet_to_threshold[best_new_subnet_index])
cur_flops += (torch.sum(validation_unpredicted_idx) / len(labels)) * self.subnet_to_flops[
best_new_subnet_index]
predicted_by_best_subnet_idx = subnet_to_predicted_idx[best_new_subnet_index]
cur_predictions[predicted_by_best_subnet_idx] = subnet_to_output_distrs[best_new_subnet_index][
predicted_by_best_subnet_idx]
validation_unpredicted_idx[predicted_by_best_subnet_idx] = False
cur_flops = cur_flops.item()
cur_cascade = cur_cascade[1:] # I think this should work
if len(cur_cascade) < self.size_to_pad_to:
n_to_add = self.size_to_pad_to - len(cur_cascade)
cur_cascade += [0] * n_to_add
cur_thresholds += [0] * n_to_add
all_solutions.append(cur_cascade + cur_thresholds)
# compute true error for the constructed cascade
output = torch.argmax(cur_predictions, 1)
true_err = (torch.sum(labels != output) / len(labels) * 100).item()
all_objectives.append((true_err, cur_flops))
ed = time.time()
print(f'{multiplier_ref_acc=} {idx_subnet_ref_acc=} {cur_cascade=} {cur_thresholds=} {cur_flops=:.2f} time={ed-st}')
return all_solutions, all_objectives
def search(self, seed):
# Seed is not useful and not used because the algorithm is deterministic.
st = time.time()
all_solutions = []
all_objectives = []
kwargs = []
for idx_subnet_ref_acc in range(1, len(self.subnet_to_output_distrs)):
for multiplier_ref_acc in [1 - i / 100 for i in range(0, 5 + 1)]:
kwargs.append({'self': self, 'multiplier_ref_acc': multiplier_ref_acc,
'idx_subnet_ref_acc': idx_subnet_ref_acc})
n_workers = 32
with ProcessPoolExecutor(max_workers=n_workers) as executor:
futures = [executor.submit(GreedySearchWrapperEnsembleClassification._search_for_model_and_multiplier, kws) for kws in kwargs]
for f in futures:
cur_solutions, cur_objectives = f.result() # the order is not important
all_solutions += cur_solutions
all_objectives += cur_objectives
all_solutions = np.vstack(all_solutions)
all_objectives = np.array(all_objectives)
print(all_solutions)
print(all_objectives)
ed = time.time()
print(f'GreedyCascade time = {ed - st}')
return all_solutions, all_objectives | 9,745 | 50.294737 | 148 | py |
ENCAS | ENCAS-main/data_utils/store_labels.py | import numpy as np
import torch
import yaml
from run_manager import get_run_config
from utils import NAT_PATH, NAT_DATA_PATH
import os
'''
Store labels of val & test sets for all the datasets.
To not specify all data-related information again, simply use any NAT config that uses the appropriate dataset
'''
def get_val_and_test_labels(nat_config_name):
nat_config = yaml.safe_load(open(os.path.join(NAT_PATH, 'configs_nat', nat_config_name), 'r'))
run_config = get_run_config(dataset=nat_config['dataset'], data_path=nat_config['data'],
train_batch_size=nat_config['trn_batch_size'], total_epochs=0,
test_batch_size=nat_config['vld_batch_size'], n_worker=4,
cutout_size=nat_config['cutout_size'], image_size=32,
valid_size=nat_config['vld_size'], dataset_name=nat_config['dataset'])
run_config.valid_loader.collate_fn.set_resolutions([32]) # images are not used, but need to set some size.
lbls_val = [b[1] for b in run_config.valid_loader]
lbls_val = torch.cat(lbls_val).detach().numpy()
lbls_test = [b[1] for b in run_config.test_loader]
lbls_test = torch.cat(lbls_test).detach().numpy()
return lbls_val, lbls_test
if __name__ == '__main__':
lbls_val, lbls_test = get_val_and_test_labels('cifar10_r0_ofa10_sep.yml')
np.save(os.path.join(NAT_DATA_PATH, 'labels_cifar10_val10000'), lbls_val)
np.save(os.path.join(NAT_DATA_PATH, 'labels_cifar10_test'), lbls_test)
lbls_val, lbls_test = get_val_and_test_labels('cifar100_r0_ofa10_sep.yml')
np.save(os.path.join(NAT_DATA_PATH, 'labels_cifar100_val10000'), lbls_val)
np.save(os.path.join(NAT_DATA_PATH, 'labels_cifar100_test'), lbls_test)
lbls_val, lbls_test = get_val_and_test_labels('imagenet_r0_ofa10_sep.yml')
np.save(os.path.join(NAT_DATA_PATH, 'labels_imagenet_val20683'), lbls_val)
np.save(os.path.join(NAT_DATA_PATH, 'labels_imagenet_test'), lbls_test) | 2,011 | 48.073171 | 110 | py |
ENCAS | ENCAS-main/data_providers/auto_augment_tf.py | # taken verbatim from https://github.com/facebookresearch/AttentiveNAS
""" Auto Augment
Implementation adapted from timm: https://github.com/rwightman/pytorch-image-models
"""
import random
import math
from PIL import Image, ImageOps, ImageEnhance
import PIL
_PIL_VER = tuple([int(x) for x in PIL.__version__.split('.')[:2]])
_FILL = (128, 128, 128)
# This signifies the max integer that the controller RNN could predict for the
# augmentation scheme.
_MAX_LEVEL = 10.
_HPARAMS_DEFAULT = dict(
translate_const=250,
img_mean=_FILL,
)
_RANDOM_INTERPOLATION = (Image.NEAREST, Image.BILINEAR, Image.BICUBIC)
def _interpolation(kwargs):
interpolation = kwargs.pop('resample', Image.NEAREST)
if isinstance(interpolation, (list, tuple)):
return random.choice(interpolation)
else:
return interpolation
def _check_args_tf(kwargs):
if 'fillcolor' in kwargs and _PIL_VER < (5, 0):
kwargs.pop('fillcolor')
kwargs['resample'] = _interpolation(kwargs)
def shear_x(img, factor, **kwargs):
_check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, factor, 0, 0, 1, 0), **kwargs)
def shear_y(img, factor, **kwargs):
_check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, 0, 0, factor, 1, 0), **kwargs)
def translate_x_rel(img, pct, **kwargs):
pixels = pct * img.size[0]
_check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs)
def translate_y_rel(img, pct, **kwargs):
pixels = pct * img.size[1]
_check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs)
def translate_x_abs(img, pixels, **kwargs):
_check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs)
def translate_y_abs(img, pixels, **kwargs):
_check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs)
def rotate(img, degrees, **kwargs):
_check_args_tf(kwargs)
if _PIL_VER >= (5, 2):
return img.rotate(degrees, **kwargs)
elif _PIL_VER >= (5, 0):
w, h = img.size
post_trans = (0, 0)
rotn_center = (w / 2.0, h / 2.0)
angle = -math.radians(degrees)
matrix = [
round(math.cos(angle), 15),
round(math.sin(angle), 15),
0.0,
round(-math.sin(angle), 15),
round(math.cos(angle), 15),
0.0,
]
def transform(x, y, matrix):
(a, b, c, d, e, f) = matrix
return a * x + b * y + c, d * x + e * y + f
matrix[2], matrix[5] = transform(
-rotn_center[0] - post_trans[0], -rotn_center[1] - post_trans[1], matrix
)
matrix[2] += rotn_center[0]
matrix[5] += rotn_center[1]
return img.transform(img.size, Image.AFFINE, matrix, **kwargs)
else:
return img.rotate(degrees, resample=kwargs['resample'])
def auto_contrast(img, **__):
return ImageOps.autocontrast(img)
def invert(img, **__):
return ImageOps.invert(img)
def equalize(img, **__):
return ImageOps.equalize(img)
def solarize(img, thresh, **__):
return ImageOps.solarize(img, thresh)
def solarize_add(img, add, thresh=128, **__):
lut = []
for i in range(256):
if i < thresh:
lut.append(min(255, i + add))
else:
lut.append(i)
if img.mode in ("L", "RGB"):
if img.mode == "RGB" and len(lut) == 256:
lut = lut + lut + lut
return img.point(lut)
else:
return img
def posterize(img, bits_to_keep, **__):
if bits_to_keep >= 8:
return img
bits_to_keep = max(1, bits_to_keep) # prevent all 0 images
return ImageOps.posterize(img, bits_to_keep)
def contrast(img, factor, **__):
return ImageEnhance.Contrast(img).enhance(factor)
def color(img, factor, **__):
return ImageEnhance.Color(img).enhance(factor)
def brightness(img, factor, **__):
return ImageEnhance.Brightness(img).enhance(factor)
def sharpness(img, factor, **__):
return ImageEnhance.Sharpness(img).enhance(factor)
def _randomly_negate(v):
"""With 50% prob, negate the value"""
return -v if random.random() > 0.5 else v
def _rotate_level_to_arg(level):
# range [-30, 30]
level = (level / _MAX_LEVEL) * 30.
level = _randomly_negate(level)
return (level,)
def _enhance_level_to_arg(level):
# range [0.1, 1.9]
return ((level / _MAX_LEVEL) * 1.8 + 0.1,)
def _shear_level_to_arg(level):
# range [-0.3, 0.3]
level = (level / _MAX_LEVEL) * 0.3
level = _randomly_negate(level)
return (level,)
def _translate_abs_level_to_arg(level, translate_const):
level = (level / _MAX_LEVEL) * float(translate_const)
level = _randomly_negate(level)
return (level,)
def _translate_abs_level_to_arg2(level):
level = (level / _MAX_LEVEL) * float(_HPARAMS_DEFAULT['translate_const'])
level = _randomly_negate(level)
return (level,)
def _translate_rel_level_to_arg(level):
# range [-0.45, 0.45]
level = (level / _MAX_LEVEL) * 0.45
level = _randomly_negate(level)
return (level,)
# def level_to_arg(hparams):
# return {
# 'AutoContrast': lambda level: (),
# 'Equalize': lambda level: (),
# 'Invert': lambda level: (),
# 'Rotate': _rotate_level_to_arg,
# # FIXME these are both different from original impl as I believe there is a bug,
# # not sure what is the correct alternative, hence 2 options that look better
# 'Posterize': lambda level: (int((level / _MAX_LEVEL) * 4) + 4,), # range [4, 8]
# 'Posterize2': lambda level: (4 - int((level / _MAX_LEVEL) * 4),), # range [4, 0]
# 'Solarize': lambda level: (int((level / _MAX_LEVEL) * 256),), # range [0, 256]
# 'SolarizeAdd': lambda level: (int((level / _MAX_LEVEL) * 110),), # range [0, 110]
# 'Color': _enhance_level_to_arg,
# 'Contrast': _enhance_level_to_arg,
# 'Brightness': _enhance_level_to_arg,
# 'Sharpness': _enhance_level_to_arg,
# 'ShearX': _shear_level_to_arg,
# 'ShearY': _shear_level_to_arg,
# 'TranslateX': lambda level: _translate_abs_level_to_arg(level, hparams['translate_const']),
# 'TranslateY': lambda level: _translate_abs_level_to_arg(level, hparams['translate_const']),
# 'TranslateXRel': lambda level: _translate_rel_level_to_arg(level),
# 'TranslateYRel': lambda level: _translate_rel_level_to_arg(level),
# }
NAME_TO_OP = {
'AutoContrast': auto_contrast,
'Equalize': equalize,
'Invert': invert,
'Rotate': rotate,
'Posterize': posterize,
'Posterize2': posterize,
'Solarize': solarize,
'SolarizeAdd': solarize_add,
'Color': color,
'Contrast': contrast,
'Brightness': brightness,
'Sharpness': sharpness,
'ShearX': shear_x,
'ShearY': shear_y,
'TranslateX': translate_x_abs,
'TranslateY': translate_y_abs,
'TranslateXRel': translate_x_rel,
'TranslateYRel': translate_y_rel,
}
def pass_fn(input):
return ()
def _conversion0(input):
return (int((input / _MAX_LEVEL) * 4) + 4,)
def _conversion1(input):
return (4 - int((input / _MAX_LEVEL) * 4),)
def _conversion2(input):
return (int((input / _MAX_LEVEL) * 256),)
def _conversion3(input):
return (int((input / _MAX_LEVEL) * 110),)
class AutoAugmentOp:
def __init__(self, name, prob, magnitude, hparams={}):
self.aug_fn = NAME_TO_OP[name]
# self.level_fn = level_to_arg(hparams)[name]
if name == 'AutoContrast' or name == 'Equalize' or name == 'Invert':
self.level_fn = pass_fn
elif name == 'Rotate':
self.level_fn = _rotate_level_to_arg
elif name == 'Posterize':
self.level_fn = _conversion0
elif name == 'Posterize2':
self.level_fn = _conversion1
elif name == 'Solarize':
self.level_fn = _conversion2
elif name == 'SolarizeAdd':
self.level_fn = _conversion3
elif name == 'Color' or name == 'Contrast' or name == 'Brightness' or name == 'Sharpness':
self.level_fn = _enhance_level_to_arg
elif name == 'ShearX' or name == 'ShearY':
self.level_fn = _shear_level_to_arg
elif name == 'TranslateX' or name == 'TranslateY':
self.level_fn = _translate_abs_level_to_arg2
elif name == 'TranslateXRel' or name == 'TranslateYRel':
self.level_fn = _translate_rel_level_to_arg
else:
print("{} not recognized".format({}))
self.prob = prob
self.magnitude = magnitude
# If std deviation of magnitude is > 0, we introduce some randomness
# in the usually fixed policy and sample magnitude from normal dist
# with mean magnitude and std-dev of magnitude_std.
# NOTE This is being tested as it's not in paper or reference impl.
self.magnitude_std = 0.5 # FIXME add arg/hparam
self.kwargs = {
'fillcolor': hparams['img_mean'] if 'img_mean' in hparams else _FILL,
'resample': hparams['interpolation'] if 'interpolation' in hparams else _RANDOM_INTERPOLATION
}
def __call__(self, img):
if self.prob < random.random():
return img
magnitude = self.magnitude
if self.magnitude_std and self.magnitude_std > 0:
magnitude = random.gauss(magnitude, self.magnitude_std)
magnitude = min(_MAX_LEVEL, max(0, magnitude))
level_args = self.level_fn(magnitude)
return self.aug_fn(img, *level_args, **self.kwargs)
def auto_augment_policy_v0(hparams=_HPARAMS_DEFAULT):
# ImageNet policy from TPU EfficientNet impl, cannot find
# a paper reference.
policy = [
[('Equalize', 0.8, 1), ('ShearY', 0.8, 4)],
[('Color', 0.4, 9), ('Equalize', 0.6, 3)],
[('Color', 0.4, 1), ('Rotate', 0.6, 8)],
[('Solarize', 0.8, 3), ('Equalize', 0.4, 7)],
[('Solarize', 0.4, 2), ('Solarize', 0.6, 2)],
[('Color', 0.2, 0), ('Equalize', 0.8, 8)],
[('Equalize', 0.4, 8), ('SolarizeAdd', 0.8, 3)],
[('ShearX', 0.2, 9), ('Rotate', 0.6, 8)],
[('Color', 0.6, 1), ('Equalize', 1.0, 2)],
[('Invert', 0.4, 9), ('Rotate', 0.6, 0)],
[('Equalize', 1.0, 9), ('ShearY', 0.6, 3)],
[('Color', 0.4, 7), ('Equalize', 0.6, 0)],
[('Posterize', 0.4, 6), ('AutoContrast', 0.4, 7)],
[('Solarize', 0.6, 8), ('Color', 0.6, 9)],
[('Solarize', 0.2, 4), ('Rotate', 0.8, 9)],
[('Rotate', 1.0, 7), ('TranslateYRel', 0.8, 9)],
[('ShearX', 0.0, 0), ('Solarize', 0.8, 4)],
[('ShearY', 0.8, 0), ('Color', 0.6, 4)],
[('Color', 1.0, 0), ('Rotate', 0.6, 2)],
[('Equalize', 0.8, 4), ('Equalize', 0.0, 8)],
[('Equalize', 1.0, 4), ('AutoContrast', 0.6, 2)],
[('ShearY', 0.4, 7), ('SolarizeAdd', 0.6, 7)],
[('Posterize', 0.8, 2), ('Solarize', 0.6, 10)],
[('Solarize', 0.6, 8), ('Equalize', 0.6, 1)],
[('Color', 0.8, 6), ('Rotate', 0.4, 5)],
]
pc = [[AutoAugmentOp(*a, hparams) for a in sp] for sp in policy]
return pc
def auto_augment_policy_original(hparams=_HPARAMS_DEFAULT):
# ImageNet policy from https://arxiv.org/abs/1805.09501
policy = [
[('Posterize', 0.4, 8), ('Rotate', 0.6, 9)],
[('Solarize', 0.6, 5), ('AutoContrast', 0.6, 5)],
[('Equalize', 0.8, 8), ('Equalize', 0.6, 3)],
[('Posterize', 0.6, 7), ('Posterize', 0.6, 6)],
[('Equalize', 0.4, 7), ('Solarize', 0.2, 4)],
[('Equalize', 0.4, 4), ('Rotate', 0.8, 8)],
[('Solarize', 0.6, 3), ('Equalize', 0.6, 7)],
[('Posterize', 0.8, 5), ('Equalize', 1.0, 2)],
[('Rotate', 0.2, 3), ('Solarize', 0.6, 8)],
[('Equalize', 0.6, 8), ('Posterize', 0.4, 6)],
[('Rotate', 0.8, 8), ('Color', 0.4, 0)],
[('Rotate', 0.4, 9), ('Equalize', 0.6, 2)],
[('Equalize', 0.0, 7), ('Equalize', 0.8, 8)],
[('Invert', 0.6, 4), ('Equalize', 1.0, 8)],
[('Color', 0.6, 4), ('Contrast', 1.0, 8)],
[('Rotate', 0.8, 8), ('Color', 1.0, 2)],
[('Color', 0.8, 8), ('Solarize', 0.8, 7)],
[('Sharpness', 0.4, 7), ('Invert', 0.6, 8)],
[('ShearX', 0.6, 5), ('Equalize', 1.0, 9)],
[('Color', 0.4, 0), ('Equalize', 0.6, 3)],
[('Equalize', 0.4, 7), ('Solarize', 0.2, 4)],
[('Solarize', 0.6, 5), ('AutoContrast', 0.6, 5)],
[('Invert', 0.6, 4), ('Equalize', 1.0, 8)],
[('Color', 0.6, 4), ('Contrast', 1.0, 8)],
[('Equalize', 0.8, 8), ('Equalize', 0.6, 3)],
]
pc = [[AutoAugmentOp(*a, hparams) for a in sp] for sp in policy]
return pc
def auto_augment_policy(name='v0', hparams=_HPARAMS_DEFAULT):
if name == 'original':
return auto_augment_policy_original(hparams)
elif name == 'v0':
return auto_augment_policy_v0(hparams)
else:
print("Unknown auto_augmentation policy {}".format(name))
raise AssertionError()
class AutoAugment:
def __init__(self, policy):
self.policy = policy
def __call__(self, img):
sub_policy = random.choice(self.policy)
for op in sub_policy:
img = op(img)
return img
| 13,406 | 32.185644 | 105 | py |
ENCAS | ENCAS-main/data_providers/cifar.py | import functools
import math
import numpy as np
import torchvision
import torch.utils.data
import torchvision.transforms as transforms
from ofa.imagenet_classification.data_providers.imagenet import DataProvider
from timm.data import rand_augment_transform
import utils_train
from utils import _pil_interp
from dynamic_resolution_collator import DynamicResolutionCollator
import utils
from utils_train import Cutout
class CIFARBaseDataProvider(DataProvider):
def __init__(self, save_path=None, train_batch_size=96, test_batch_size=256, valid_size=None,
n_worker=2, resize_scale=0.08, distort_color=None, image_size=224, num_replicas=None, rank=None,
total_size=None, **kwargs):
self._save_path = save_path
self.image_size = image_size
self.distort_color = distort_color
self.resize_scale = resize_scale
self.cutout_size = kwargs['cutout_size']
self.auto_augment = kwargs.get('auto_augment', 'rand-m9-mstd0.5')
self.if_flip = kwargs.get('if_flip', True)
self.if_center_crop = kwargs.get('if_center_crop', True)
self.if_cutmix = kwargs.get('if_cutmix', False)
self._valid_transform_dict = {}
self._train_transform_dict = {}
self.active_img_size = self.image_size
valid_transforms = self.build_valid_transform()
train_transforms = self.build_train_transform()
self.train_dataset_actual = self.train_dataset(train_transforms)
n_datapoints = len(self.train_dataset_actual.data)
self.cutmix_kwargs = None
if self.if_cutmix:
self.cutmix_kwargs = {'beta': 1.0, 'prob': 0.5, 'n_classes': self.n_classes}
# depending on combinations of flags may need even more than a 1000:
self.collator_train = DynamicResolutionCollator(1000, if_cutmix=self.if_cutmix, cutmix_kwargs=self.cutmix_kwargs)
self.collator_val = DynamicResolutionCollator(1)
self.collator_subtrain = DynamicResolutionCollator(1, if_return_target_idx=False, if_cutmix=self.if_cutmix,
cutmix_kwargs=self.cutmix_kwargs)
assert valid_size is not None
if total_size is not None:
n_datapoints = total_size
if not isinstance(valid_size, int):
assert isinstance(valid_size, float) and 0 < valid_size < 1
valid_size = int(n_datapoints * valid_size)
self.valid_dataset_actual = self.train_dataset(valid_transforms)
train_indexes, valid_indexes = self.random_sample_valid_set(n_datapoints, valid_size)
train_sampler = torch.utils.data.sampler.SubsetRandomSampler(train_indexes)
# for validation use Subset instead of SubsetRandomSampler to keep the ordering the same
self.valid_dataset_actual = torch.utils.data.Subset(self.valid_dataset_actual, valid_indexes)
self.train = torch.utils.data.DataLoader(
self.train_dataset_actual, batch_size=train_batch_size, sampler=train_sampler,
num_workers=n_worker, pin_memory=True, collate_fn=self.collator_train,
worker_init_fn=utils_train.init_dataloader_worker_state, persistent_workers=True
)
self.valid = torch.utils.data.DataLoader(
self.valid_dataset_actual, batch_size=test_batch_size, num_workers=n_worker, pin_memory=True,
prefetch_factor=1, persistent_workers=True, collate_fn=self.collator_val
)
test_dataset = self.test_dataset(valid_transforms)
self.test = torch.utils.data.DataLoader(
test_dataset, batch_size=test_batch_size, shuffle=False, num_workers=n_worker, pin_memory=True,
collate_fn=self.collator_val, prefetch_factor=1
)
self.collator_train.set_info_for_transforms(self.resize_class_lambda_train,
self.train_transforms_after_resize)
self.collator_val.set_info_for_transforms(self.resize_class_lambda_val,
self.val_transforms_after_resize)
self.collator_subtrain.set_info_for_transforms(self.resize_class_lambda_train,
self.train_transforms_after_resize)
def set_collator_train_resolutions(self, resolutions):
self.collator_train.set_resolutions(resolutions)
@staticmethod
def name():
raise NotImplementedError
@property
def n_classes(self):
raise NotImplementedError
def train_dataset(self, _transforms):
raise NotImplementedError
def test_dataset(self, _transforms):
raise NotImplementedError
@property
def data_shape(self):
return 3, self.active_img_size, self.active_img_size # C, H, W
@property
def save_path(self):
return self._save_path
@property
def data_url(self):
raise ValueError('unable to download %s' % self.name())
@property
def train_path(self):
return self.save_path
@property
def valid_path(self):
return self.save_path
@property
def normalize(self):
return transforms.Normalize(
mean=[0.49139968, 0.48215827, 0.44653124], std=[0.24703233, 0.24348505, 0.26158768])
def build_train_transform(self, image_size=None, print_log=True):
self.active_img_size = image_size
if image_size is None:
image_size = self.image_size
if print_log:
print('Color jitter: %s, resize_scale: %s, img_size: %s' %
(self.distort_color, self.resize_scale, image_size))
if self.active_img_size in self._train_transform_dict:
return self._train_transform_dict[self.active_img_size]
if self.distort_color == 'torch':
color_transform = transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.1)
elif self.distort_color == 'tf':
color_transform = transforms.ColorJitter(brightness=32. / 255., saturation=0.5)
else:
color_transform = None
if self.resize_scale:
resize_scale = self.resize_scale
self.resize_class_lambda_train = functools.partial(utils_train.create_resize_class_lambda_train,
transforms.RandomResizedCrop, scale=[resize_scale, 1.0],
interpolation=torchvision.transforms.InterpolationMode.BICUBIC)
else:
self.resize_class_lambda_train = functools.partial(utils_train.create_resize_class_lambda_train,
transforms.Resize,
interpolation=torchvision.transforms.InterpolationMode.BICUBIC)
train_transforms = []
if self.if_flip:
train_transforms.append(transforms.RandomHorizontalFlip())
if color_transform is not None:
train_transforms.append(color_transform)
if self.auto_augment:
aa_params = dict(
translate_const=int(image_size * 0.45),
img_mean=tuple([min(255, round(255 * x)) for x in [0.49139968, 0.48215827, 0.44653124]]),
)
aa_params['interpolation'] = _pil_interp('bicubic')
train_transforms += [rand_augment_transform(self.auto_augment, aa_params)]
train_transforms += [
transforms.ToTensor(),
self.normalize,
Cutout(length=self.cutout_size)
]
self.train_transforms_after_resize = train_transforms
train_transforms = []
# these transforms are irrelevant
train_transforms = transforms.Compose(train_transforms)
self._train_transform_dict[self.active_img_size] = train_transforms
return train_transforms
@staticmethod
def resize_class_lambda_val(if_center_crop, image_size):
if if_center_crop:
return transforms.Compose([
transforms.Resize(int(math.ceil(image_size / 0.875)),
interpolation=torchvision.transforms.InterpolationMode.BICUBIC),
transforms.CenterCrop(image_size)])
return transforms.Resize(image_size, interpolation=torchvision.transforms.InterpolationMode.BICUBIC)
def build_valid_transform(self, image_size=None):
self.resize_class_lambda_val = functools.partial(CIFAR100DataProvider.resize_class_lambda_val, self.if_center_crop)
val_transforms = [transforms.ToTensor(),
self.normalize]
self.val_transforms_after_resize = val_transforms
val_transforms = []
return transforms.Compose(val_transforms)
def assign_active_img_size(self, new_img_size):
self.active_img_size = new_img_size
if self.active_img_size not in self._valid_transform_dict:
self._valid_transform_dict[self.active_img_size] = self.build_valid_transform()
self.valid.dataset.transform = self._valid_transform_dict[self.active_img_size]
self.test.dataset.transform = self._valid_transform_dict[self.active_img_size]
def build_sub_train_loader(self, n_images, batch_size, img_size, num_worker=None, num_replicas=None, rank=None):
# used for resetting running statistics of BN
if not hasattr(self, 'sub_data_loader'):
if num_worker is None:
num_worker = self.train.num_workers
new_train_dataset = self.train_dataset(self.build_train_transform(image_size=img_size, print_log=False))
g = torch.Generator()
g.manual_seed(DataProvider.SUB_SEED)
indices_train = self.train.sampler.indices
n_indices = len(indices_train)
rand_permutation = torch.randperm(n_indices, generator=g).numpy()
indices_train = np.array(indices_train)[rand_permutation]
chosen_indexes = indices_train[:n_images].tolist()
sub_sampler = torch.utils.data.sampler.SubsetRandomSampler(chosen_indexes)
self.collator_subtrain.set_resolutions([img_size])
self.sub_data_loader = torch.utils.data.DataLoader(
new_train_dataset, batch_size=batch_size, sampler=sub_sampler,
num_workers=num_worker, pin_memory=False, collate_fn=self.collator_subtrain, persistent_workers=True
)
else:
self.collator_subtrain.set_resolutions([img_size])
return self.sub_data_loader
class CIFAR10DataProvider(CIFARBaseDataProvider):
@staticmethod
def name():
return 'cifar10'
@property
def n_classes(self):
return 10
def train_dataset(self, _transforms):
dataset = torchvision.datasets.CIFAR10(root=self.train_path, train=True,
download=True, transform=_transforms)
return dataset
def test_dataset(self, _transforms):
dataset = torchvision.datasets.CIFAR10(root=self.valid_path, train=False,
download=True, transform=_transforms)
return dataset
class CIFAR100DataProvider(CIFARBaseDataProvider):
@staticmethod
def name():
return 'cifar100'
@property
def n_classes(self):
return 100
def train_dataset(self, _transforms):
dataset = torchvision.datasets.CIFAR100(root=self.train_path, train=True,
download=True, transform=_transforms)
return dataset
def test_dataset(self, _transforms):
dataset = torchvision.datasets.CIFAR100(root=self.valid_path, train=False,
download=True, transform=_transforms)
return dataset
| 11,902 | 40.618881 | 126 | py |
ENCAS | ENCAS-main/data_providers/imagenet.py | import functools
import warnings
import os
import math
import numpy as np
import torch.utils.data
import torchvision
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from ofa.imagenet_classification.data_providers.imagenet import DataProvider
import utils
import utils_train
from dynamic_resolution_collator import DynamicResolutionCollator
from .auto_augment_tf import auto_augment_policy, AutoAugment
class ImagenetDataProvider(DataProvider):
def __init__(self, save_path=None, train_batch_size=256, test_batch_size=512, valid_size=None, n_worker=32,
resize_scale=0.08, distort_color=None, image_size=224,
num_replicas=None, rank=None, total_size=None, **kwargs):
warnings.filterwarnings('ignore')
self._save_path = save_path
self.image_size = image_size
self.distort_color = distort_color
self.resize_scale = resize_scale
self.if_flip = kwargs.get('if_flip', True)
self.auto_augment = kwargs.get('auto_augment', 'v0')
self.crop_pct = kwargs.get('crop_pct', None)
self.if_timm = self.crop_pct is not None
self.preproc_alphanet = kwargs.get('preproc_alphanet', False)
self._train_transform_dict = {}
self._valid_transform_dict = {}
self.active_img_size = self.image_size
valid_transforms = self.build_valid_transform()
train_loader_class = torch.utils.data.DataLoader
train_transforms = self.build_train_transform()
self.train_dataset_actual = self.train_dataset(train_transforms)
self.if_segmentation=False
self.collator_train = DynamicResolutionCollator(1000)
self.collator_val = DynamicResolutionCollator(1)
self.collator_subtrain = DynamicResolutionCollator(1, if_return_target_idx=False)
self.valid_dataset_actual = self.val_dataset(valid_transforms)
self.train = train_loader_class(
self.train_dataset_actual, batch_size=train_batch_size, shuffle=True,
num_workers=n_worker, pin_memory=True, collate_fn=self.collator_train,
worker_init_fn=utils_train.init_dataloader_worker_state, persistent_workers=True
)
self.valid = torch.utils.data.DataLoader(
self.valid_dataset_actual, batch_size=test_batch_size,
num_workers=n_worker
, pin_memory=True, collate_fn=self.collator_val,
persistent_workers=True
)
test_dataset = self.test_dataset(valid_transforms)
self.test = torch.utils.data.DataLoader(
test_dataset, batch_size=test_batch_size, shuffle=False, num_workers=n_worker*2, pin_memory=True,
collate_fn=self.collator_val, persistent_workers=True
)
self.collator_train.set_info_for_transforms(self.resize_class_lambda_train,
self.train_transforms_after_resize,
self.train_transforms_pre_resize)
self.collator_val.set_info_for_transforms(self.resize_class_lambda_val,
self.val_transforms_after_resize, self.val_transforms_pre_resize)
self.collator_subtrain.set_info_for_transforms(self.resize_class_lambda_train,
self.train_transforms_after_resize,
self.train_transforms_pre_resize)
def set_collator_train_resolutions(self, resolutions):
self.collator_train.set_resolutions(resolutions)
@staticmethod
def name():
return 'imagenet'
@property
def data_shape(self):
return 3, self.active_img_size, self.active_img_size # C, H, W
@property
def n_classes(self):
return 1000
@property
def save_path(self):
return self._save_path
@property
def data_url(self):
raise ValueError('unable to download %s' % self.name())
def train_dataset(self, _transforms):
dataset = datasets.ImageFolder(os.path.join(self.save_path, 'train_part'), _transforms)
return dataset
def val_dataset(self, _transforms):
dataset = datasets.ImageFolder(os.path.join(self.save_path, 'imagenetv2_all'), _transforms)
return dataset
def test_dataset(self, _transforms):
dataset = datasets.ImageFolder(os.path.join(self.save_path, 'val'), _transforms)
return dataset
@property
def normalize(self):
return transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
@staticmethod
def create_resize_class_lambda_train(resize_transform_class, image_size, **kwargs): # pickle can't handle lambdas
return resize_transform_class((image_size, image_size), **kwargs)
def build_train_transform(self, image_size=None, print_log=True):
self.active_img_size = image_size
default_image_size = 224
if print_log:
print('Color jitter: %s, resize_scale: %s, img_size: %s' %
(self.distort_color, self.resize_scale, default_image_size))
if self.active_img_size in self._train_transform_dict:
return self._train_transform_dict[self.active_img_size]
if self.distort_color == 'torch':
color_transform = transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.1)
elif self.distort_color == 'tf':
color_transform = transforms.ColorJitter(brightness=32. / 255., saturation=0.5)
else:
color_transform = None
# OFA and AlphaNet preprocess ImageNet differently, and leads to significant performance changes
# => I match the preprocessing to the supernetwork.
#
# In Alphanet, there are 2 resizes: when loading, images are always resized to 224*224;
# and in the call to forward they are resized to the proper resolution of the current subnetwork.
# I want to resize in advance, but still need to do the 2 resizes to match the preprocessing & performance
# => for Alphanet, I add the first resize to 224 to the train_transforms, and I run all the transforms before
# the final resize.
if not self.preproc_alphanet:
if self.resize_scale:
resize_scale = self.resize_scale
self.resize_class_lambda_train = functools.partial(ImagenetDataProvider.create_resize_class_lambda_train,
transforms.RandomResizedCrop, scale=[resize_scale, 1.0],
interpolation=torchvision.transforms.InterpolationMode.BICUBIC)
else:
self.resize_class_lambda_train = functools.partial(utils_train.create_resize_class_lambda_train,
transforms.Resize,
interpolation=torchvision.transforms.InterpolationMode.BICUBIC)
train_transforms = []
else:
resize_scale = self.resize_scale
train_transforms = [transforms.RandomResizedCrop((default_image_size, default_image_size), scale=[resize_scale, 1.0],
interpolation=torchvision.transforms.InterpolationMode.BICUBIC)]
self.resize_class_lambda_train = functools.partial(ImagenetDataProvider.resize_class_lambda_val, self.preproc_alphanet)
if self.if_flip:
train_transforms.append(transforms.RandomHorizontalFlip())
if color_transform is not None:
train_transforms.append(color_transform)
if self.auto_augment:
IMAGENET_PIXEL_MEAN = [123.675, 116.280, 103.530]
aa_params = {
"translate_const": int(default_image_size * 0.45),
"img_mean": tuple(round(x) for x in IMAGENET_PIXEL_MEAN),
}
aa_policy = AutoAugment(auto_augment_policy(self.auto_augment, aa_params))
train_transforms.append(aa_policy)
train_transforms += [
transforms.ToTensor(),
self.normalize,
]
if not self.preproc_alphanet:
self.train_transforms_pre_resize = []
self.train_transforms_after_resize = train_transforms
else:
self.train_transforms_pre_resize = train_transforms
self.train_transforms_after_resize = []
train_transforms = []
# the transforms below are irrelevant (actual transforms will be in the collator)
train_transforms = transforms.Compose(train_transforms)
self._train_transform_dict[self.active_img_size] = train_transforms
return train_transforms
@staticmethod
def resize_class_lambda_val(preproc_alphanet, image_size):
if preproc_alphanet:
return transforms.Resize((image_size, image_size),
interpolation=torchvision.transforms.InterpolationMode.BICUBIC)
return transforms.Compose([transforms.Resize(int(math.ceil(image_size / 0.875))),
transforms.CenterCrop(image_size)])
def build_valid_transform(self, image_size=None):
self.resize_class_lambda_val = functools.partial(ImagenetDataProvider.resize_class_lambda_val, self.preproc_alphanet)
if not self.preproc_alphanet: # see the comment about "self.preproc_alphanet" in build_train_transform
val_transforms = [
transforms.ToTensor(),
self.normalize
]
self.val_transforms_pre_resize = []
self.val_transforms_after_resize = val_transforms
else:
val_transforms = [
transforms.Resize(256, interpolation=torchvision.transforms.InterpolationMode.BICUBIC),
transforms.CenterCrop(224),
transforms.ToTensor(),
self.normalize
]
self.val_transforms_pre_resize = val_transforms
self.val_transforms_after_resize = []
val_transforms = []
return transforms.Compose(val_transforms)
def assign_active_img_size(self, new_img_size):
self.active_img_size = new_img_size
if self.active_img_size not in self._valid_transform_dict:
self._valid_transform_dict[self.active_img_size] = self.build_valid_transform()
# change the transform of the valid and test set
self.valid.dataset.transform = self._valid_transform_dict[self.active_img_size]
self.test.dataset.transform = self._valid_transform_dict[self.active_img_size]
# Presumably, this also leads to OOM, but I haven't verified it on ImageNet, just assuming it's the same as with CIFARs.
# def build_sub_train_loader(self, n_images, batch_size, img_size, num_worker=None, num_replicas=None, rank=None):
# # used for resetting running statistics
# if self.__dict__.get('sub_train_%d' % img_size, None) is None:
# if not hasattr(self, 'sub_data_loader'):
# if num_worker is None:
# num_worker = self.train.num_workers
#
# new_train_dataset = self.train_dataset(
# self.build_train_transform(image_size=img_size, print_log=False))
#
# g = torch.Generator()
# g.manual_seed(DataProvider.SUB_SEED)
#
# # don't need to change sampling here (unlike in cifars) because val is not part of train
# n_samples = len(self.train.dataset.samples)
# rand_indexes = torch.randperm(n_samples, generator=g).tolist()
# chosen_indexes = rand_indexes[:n_images]
#
# if num_replicas is not None:
# sub_sampler = MyDistributedSampler(new_train_dataset, num_replicas, rank, np.array(chosen_indexes))
# else:
# sub_sampler = torch.utils.data.sampler.SubsetRandomSampler(chosen_indexes)
# self.collator_subtrain.set_resolutions([img_size])
# self.sub_data_loader = torch.utils.data.DataLoader(
# new_train_dataset, batch_size=batch_size, sampler=sub_sampler,
# num_workers=0, pin_memory=False, collate_fn=self.collator_subtrain
# )
#
# self.collator_subtrain.set_resolutions([img_size])
# self.__dict__['sub_train_%d' % img_size] = []
# for images, labels, *_ in self.sub_data_loader:
# self.__dict__['sub_train_%d' % img_size].append((images, labels))
# return self.__dict__['sub_train_%d' % img_size]
def build_sub_train_loader(self, n_images, batch_size, img_size, num_worker=None, num_replicas=None, rank=None):
# used for resetting running statistics of BN
if not hasattr(self, 'sub_data_loader'):
if num_worker is None:
num_worker = self.train.num_workers
new_train_dataset = self.train_dataset(self.build_train_transform(image_size=img_size, print_log=False))
g = torch.Generator()
g.manual_seed(DataProvider.SUB_SEED)
# don't need to change sampling here (unlike in cifars) because val is not part of train
n_samples = len(self.train.dataset.samples)
rand_indexes = torch.randperm(n_samples, generator=g).tolist()
chosen_indexes = rand_indexes[:n_images]
sub_sampler = torch.utils.data.sampler.SubsetRandomSampler(chosen_indexes)
self.collator_subtrain.set_resolutions([img_size])
self.sub_data_loader = torch.utils.data.DataLoader(
new_train_dataset, batch_size=batch_size, sampler=sub_sampler,
num_workers=num_worker, pin_memory=False, collate_fn=self.collator_subtrain, persistent_workers=True
)
else:
self.collator_subtrain.set_resolutions([img_size])
return self.sub_data_loader | 14,256 | 46.052805 | 131 | py |
Likelihood-free_OICA | Likelihood-free_OICA-master/LFOICA.py | import numpy as np
import torch
import torch.nn as nn
from torch import Tensor
import torch.optim as optim
import torch.nn.functional as F
from libs.distance_measure.mmd import mix_rbf_mmd2
from libs.common_utils import cos_act, normalize_mixing_matrix
from scipy.stats import semicircular
from scipy.stats import hypsecant
import math
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
import argparse
from libs.pytorch_pgm import PGM, prox_soft, prox_plus
# standard pytorch dataset
class dataset_simul(Dataset):
def __init__(self, data_path):
data_arrs = np.load(data_path)
self.mixtures = Tensor(data_arrs['arr_0'])
self.components = data_arrs['arr_1']
self.A = Tensor(data_arrs['arr_2'])
self.data_size = self.mixtures.shape[0]
def __len__(self):
return self.data_size
def __getitem__(self, idx):
mixtures_sample = self.mixtures[idx, :]
components_sample = self.components[idx, :]
return mixtures_sample, components_sample
def get_real_A(self):
return self.A
def get_real_components(self, batch_size):
assert batch_size <= self.data_size
np.random.shuffle(self.components)
return self.components[0:batch_size, :]
# transform random noise into components
class Gaussian_transformer(nn.Module):
def __init__(self, num_components):
super().__init__()
self.num_components = num_components
self.m = 1 # number of gaussian components for each channel in our non-gaussian noise generation model
self.random_feature_mapping = nn.ModuleList()
self.D = 8
self.models = nn.ModuleList()
for i in range(num_components):
random_feature_mapping = nn.Linear(self.m, self.D)
torch.nn.init.normal_(random_feature_mapping.weight, mean=0, std=1)
torch.nn.init.uniform_(random_feature_mapping.bias, a=0, b=2 * math.pi)
random_feature_mapping.weight.requires_grad = False
random_feature_mapping.bias.requires_grad = False
self.random_feature_mapping.append(random_feature_mapping)
for i in range(num_components): # different channels have different networks to guarantee independent
model = nn.Sequential(
nn.Linear(self.D, 2 * self.D),
nn.ELU(),
# nn.Linear(2*self.D, 4*self.D),
# nn.ELU(),
# nn.Linear(4 * self.D, 2*self.D),
# nn.ELU(),
nn.Linear(2 * self.D, 1)
)
self.models.append(model)
def forward(self, batch_size):
# gaussianNoise = Tensor(np.random.normal(0, 1, [batch_size, num_components, self.m])).to(device)
gaussianNoise = Tensor(np.random.uniform(-1, 1, [batch_size, self.num_components, self.m])).to(device)
output = Tensor(np.zeros([batch_size, self.num_components])).to(device) # batchSize * k * channels
cos_act_func = cos_act()
for i in range(self.num_components): # output shape [batchSize, k, n]
tmp = self.random_feature_mapping[i](gaussianNoise[:, i, :])
tmp = cos_act_func(tmp)
output[:, i] = self.models[i](tmp).squeeze()
return output
# the generative process mimic the mixing procedure from components to mixtures
class Generative_net(nn.Module):
def __init__(self, num_mixtures, num_components, A):
super().__init__()
# for simulation exp, we initialize A with it's true value added with some large noise to avoid local optimum.
# all methods are compared under the same initialization
self.A = nn.Parameter(A + torch.Tensor(np.random.uniform(-0.2, 0.2, [num_mixtures, num_components])))
def forward(self, components):
batch_size = components.shape[0]
result = torch.mm(components, self.A.t())
return result
parser = argparse.ArgumentParser()
parser.add_argument('--num_mixtures', type=int, default=5)
parser.add_argument('--num_components', type=int, default=10)
parser.add_argument('--batch_size', type=int, default=2000)
parser.add_argument('--num_epochs', type=int, default=1000)
parser.add_argument('--lr_T', type=float, default=0.01)
parser.add_argument('--lr_G', type=float, default=0.001)
parser.add_argument('--reg_lambda', type=float, default=0)
parser.add_argument('--print_int', type=int, default=50)
parser.add_argument('--verbose', action='store_true')
parser.add_argument('--plot', action='store_true')
parser.add_argument('--cuda', action='store_true')
args = parser.parse_args()
sigmaList = [0.001, 0.01]
if (args.cuda):
assert torch.cuda.is_available()
device = torch.device('cuda:0')
else:
device = torch.device('cpu')
dataset = dataset_simul(
'data/OICA_data/OICA_data_{}mixtures_{}components.npz'.format(args.num_mixtures, args.num_components))
dataloader = DataLoader(dataset, batch_size=args.batch_size, shuffle=True)
transformer = Gaussian_transformer(args.num_components).to(device)
generator = Generative_net(args.num_mixtures, args.num_components, dataset.get_real_A()).to(device)
transformer_optimizer = optim.Adam(transformer.parameters(), lr=args.lr_T)
generator_optimizer = optim.Adam(generator.parameters(), lr=args.lr_G)
for epoch in range(args.num_epochs):
for step, (real_mixtures, real_components) in enumerate(dataloader):
generator.zero_grad()
transformer.zero_grad()
real_mixtures = real_mixtures.to(device)
batch_size_i = real_mixtures.shape[0]
fake_components = transformer.forward(batch_size_i)
fake_mixtures = generator.forward(fake_components)
MMD = torch.sqrt(F.relu(mix_rbf_mmd2(fake_mixtures, real_mixtures, sigmaList)))
MMD.backward()
transformer_optimizer.step()
generator_optimizer.step()
if epoch % args.print_int == 0 and epoch != 0:
print('##########epoch{}##########'.format(epoch))
MSE_func = nn.MSELoss()
real_A = torch.abs(dataset.get_real_A())
fake_A = torch.abs(list(generator.parameters())[0]).detach().cpu()
real_A, fake_A = normalize_mixing_matrix(real_A, fake_A)
# for i in range(num_components):
# fake_A[:, i]/=normalize_factor[i]
print('estimated A', fake_A)
print('real A', real_A)
MSE = MSE_func(real_A, fake_A)
print('MSE: {}, MMD: {}'.format(MSE, MMD))
| 6,446 | 41.695364 | 118 | py |
Likelihood-free_OICA | Likelihood-free_OICA-master/libs/pytorch_pgm.py | from torch.optim.sgd import SGD
import torch
from torch.optim.optimizer import required
class PGM(SGD):
def __init__(self, params, proxs, lr=required, reg_lambda=0, momentum=0, dampening=0,
nesterov=False):
kwargs = dict(lr=lr, momentum=momentum, dampening=dampening, weight_decay=0, nesterov=nesterov)
super().__init__(params, **kwargs)
self.lr = lr
self.reg_lambda = reg_lambda
if len(proxs) != len(self.param_groups):
raise ValueError("Invalid length of argument proxs: {} instead of {}".format(len(proxs), len(self.param_groups)))
for group, prox in zip(self.param_groups, list(proxs)):
group.setdefault('prox', prox)
def step(self, closure=None):
# this performs a gradient step
# optionally with momentum or nesterov acceleration
super().step(closure=closure)
for group in self.param_groups:
prox = group['prox']
# here we apply the proximal operator to each parameter in a group
for p in group['params']:
p.data = prox(p.data, self.lr, self.reg_lambda)
def prox_soft(X, step, thresh=0):
"""Soft thresholding proximal operator
"""
thresh_ = step_gamma(step, thresh)
return torch.sign(X)*prox_plus(torch.abs(X) - thresh_)
def prox_plus(X):
"""Projection onto non-negative numbers
"""
below = X < 0
X[below] = 0
return X
def step_gamma(step, gamma):
"""Update gamma parameter for use inside of continuous proximal operator.
Every proximal operator for a function with a continuous parameter,
e.g. gamma ||x||_1, needs to update that parameter to account for the
stepsize of the algorithm.
Returns:
gamma * step
"""
return gamma * step | 1,798 | 34.98 | 125 | py |
Likelihood-free_OICA | Likelihood-free_OICA-master/libs/common_utils.py | import torch
import numpy as np
def gumbel_softmax(p, device, sample_num=1, tau=0.2):
g = np.random.uniform(0.0001, 0.9999, [sample_num, p.shape[0]])
g = -torch.log(-torch.log(torch.Tensor(g)))
numerator = torch.exp((torch.log(p).repeat(sample_num, 1) + g.to(device)) / tau)
denominator = torch.sum(numerator, dim=1)
denominator_repeat = denominator.repeat(p.shape[0], 1).t()
return numerator / denominator_repeat
class cos_act(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
D = x.shape[1]
x = (2.0 / D) ** 0.5 * torch.cos(x)
return x
def normalize_components(components, fake_components):
num_components = components.shape[1]
normalize_factor = torch.zeros([num_components])
components = torch.Tensor(components.float())
for i in range(num_components):
normalize_factor[i] = torch.sqrt(torch.var(fake_components[i, :]) / torch.var(components[i, :]))
return normalize_factor
def normalize_mixing_matrix(real_A, estimated_A):
real_A = torch.abs(real_A)
estimated_A = torch.abs(estimated_A)
factor_real_A = 1/torch.norm(real_A[:, 0])
factor_estimated_A = 1/torch.norm(estimated_A[:, 0])
real_A_normalized = real_A * factor_real_A
estimated_A_normalized = estimated_A * factor_estimated_A
return real_A_normalized, estimated_A_normalized
if __name__ == '__main__':
print(gumbel_softmax(torch.Tensor(np.array([0.1, 0.9])), 100, 0.05))
| 1,497 | 33.045455 | 104 | py |
Likelihood-free_OICA | Likelihood-free_OICA-master/libs/distance_measure/kernel_density.py | import torch
def kernel_density_loss(X, Y, sigma_list):
assert (X.size(0) == Y.size(0))
m = X.size(0)
n = X.size(1)
expand_X = X.unsqueeze(2).expand(-1, -1, m).permute(0, 2, 1)
expand_Y = Y.unsqueeze(2).expand(-1, -1, m).permute(2, 0, 1)
X_Y = expand_X - expand_Y
X_Y_norm = torch.zeros((m, m))
for i in range(n):
X_Y_norm = X_Y_norm + X_Y[:, :, i]**2
X_Y_norm_rbf = torch.zeros((m, m))
for sigma in sigma_list:
X_Y_norm_rbf = X_Y_norm_rbf + torch.exp(-1 * X_Y_norm / (2 * (sigma ** 2))) / np.sqrt(2*np.pi*(sigma**2))
print("X_Y", X_Y)
print("X_Y_norm_rbf", X_Y_norm_rbf)
loss = -1 * torch.sum(torch.log(torch.sum(X_Y_norm_rbf, dim=0) / m)) / m
return loss | 729 | 37.421053 | 113 | py |
Likelihood-free_OICA | Likelihood-free_OICA-master/libs/distance_measure/mmd.py | #!/usr/bin/env python
# encoding: utf-8
import torch
min_var_est = 1e-8
# Consider linear time MMD with a linear kernel:
# K(f(x), f(y)) = f(x)^Tf(y)
# h(z_i, z_j) = k(x_i, x_j) + k(y_i, y_j) - k(x_i, y_j) - k(x_j, y_i)
# = [f(x_i) - f(y_i)]^T[f(x_j) - f(y_j)]
#
# f_of_X: batch_size * k
# f_of_Y: batch_size * k
def linear_mmd2(f_of_X, f_of_Y):
loss = 0.0
delta = f_of_X - f_of_Y
loss = torch.mean((delta[:-1] * delta[1:]).sum(1))
return loss
# Consider linear time MMD with a polynomial kernel:
# K(f(x), f(y)) = (alpha*f(x)^Tf(y) + c)^d
# f_of_X: batch_size * k
# f_of_Y: batch_size * k
def poly_mmd2(f_of_X, f_of_Y, d=2, alpha=1.0, c=2.0):
K_XX = (alpha * (f_of_X[:-1] * f_of_X[1:]).sum(1) + c)
K_XX_mean = torch.mean(K_XX.pow(d))
K_YY = (alpha * (f_of_Y[:-1] * f_of_Y[1:]).sum(1) + c)
K_YY_mean = torch.mean(K_YY.pow(d))
K_XY = (alpha * (f_of_X[:-1] * f_of_Y[1:]).sum(1) + c)
K_XY_mean = torch.mean(K_XY.pow(d))
K_YX = (alpha * (f_of_Y[:-1] * f_of_X[1:]).sum(1) + c)
K_YX_mean = torch.mean(K_YX.pow(d))
return K_XX_mean + K_YY_mean - K_XY_mean - K_YX_mean
def _mix_rbf_kernel(X, Y, sigma_list):
assert(X.size(0) == Y.size(0))
m = X.size(0)
Z = torch.cat((X, Y), 0)
ZZT = torch.mm(Z, Z.t())
diag_ZZT = torch.diag(ZZT).unsqueeze(1)
Z_norm_sqr = diag_ZZT.expand_as(ZZT)
exponent = Z_norm_sqr - 2 * ZZT + Z_norm_sqr.t()
K = 0.0
for sigma in sigma_list:
# gamma = 1.0 / (2 * sigma**2)
# K += torch.exp(-gamma * exponent)
K += torch.pow(1+exponent/(2*sigma), -sigma) # rational quadratic kernel
return K[:m, :m], K[:m, m:], K[m:, m:], len(sigma_list)
def mix_rbf_mmd2(X, Y, sigma_list, biased=True):
K_XX, K_XY, K_YY, d = _mix_rbf_kernel(X, Y, sigma_list)
# return _mmd2(K_XX, K_XY, K_YY, const_diagonal=d, biased=biased)
return _mmd2(K_XX, K_XY, K_YY, const_diagonal=False, biased=biased)
def mix_rbf_mmd2_and_ratio(X, Y, sigma_list, biased=True):
K_XX, K_XY, K_YY, d = _mix_rbf_kernel(X, Y, sigma_list)
# return _mmd2_and_ratio(K_XX, K_XY, K_YY, const_diagonal=d, biased=biased)
return _mmd2_and_ratio(K_XX, K_XY, K_YY, const_diagonal=False, biased=biased)
################################################################################
# Helper functions to compute variances based on kernel matrices
################################################################################
def _mmd2(K_XX, K_XY, K_YY, const_diagonal=False, biased=False):
m = K_XX.size(0) # assume X, Y are same shape
if biased:
mmd2 = K_XX + K_YY - K_XY - K_XY.transpose(0, 1)
mmd2 = mmd2.sum()
mmd2 = mmd2/(m*m)
else:
diag_X = torch.diag(K_XX) # (m,)
diag_Y = torch.diag(K_YY) # (m,)
sum_diag_X = torch.sum(diag_X)/(m*(m-1))
sum_diag_Y = torch.sum(diag_Y)/(m*(m-1))
mmd2 = K_XX/(m*(m-1)) + K_YY(m*(m-1)) - K_XY - K_XY.transpose(0, 1)/(m*m)
mmd2 = mmd2.sum()
mmd2 = mmd2 - sum_diag_X - sum_diag_X
return mmd2
# def _mmd2(K_XX, K_XY, K_YY, const_diagonal=False, biased=False):
# m = K_XX.size(0) # assume X, Y are same shape
# # Get the various sums of kernels that we'll use
# # Kts drop the diagonal, but we don't need to compute them explicitly
# if const_diagonal is not False:
# diag_X = diag_Y = const_diagonal
# sum_diag_X = sum_diag_Y = m * const_diagonal
# else:
# diag_X = torch.diag(K_XX) # (m,)
# diag_Y = torch.diag(K_YY) # (m,)
# sum_diag_X = torch.sum(diag_X)
# sum_diag_Y = torch.sum(diag_Y)
# Kt_XX_sums = K_XX.sum(dim=1) - diag_X # \tilde{K}_XX * e = K_XX * e - diag_X
# Kt_YY_sums = K_YY.sum(dim=1) - diag_Y # \tilde{K}_YY * e = K_YY * e - diag_Y
# K_XY_sums_0 = K_XY.sum(dim=0) # K_{XY}^T * e
# Kt_XX_sum = Kt_XX_sums.sum() # e^T * \tilde{K}_XX * e
# Kt_YY_sum = Kt_YY_sums.sum() # e^T * \tilde{K}_YY * e
# K_XY_sum = K_XY_sums_0.sum() # e^T * K_{XY} * e
# if biased:
# mmd2 = ((Kt_XX_sum + sum_diag_X) / (m * m)
# + (Kt_YY_sum + sum_diag_Y) / (m * m)
# - 2.0 * K_XY_sum / (m * m))
# else:
# mmd2 = (Kt_XX_sum / (m * (m - 1))
# + Kt_YY_sum / (m * (m - 1))
# - 2.0 * K_XY_sum / (m * m))
# return mmd2
def _mmd2_and_ratio(K_XX, K_XY, K_YY, const_diagonal=False, biased=False):
mmd2, var_est = _mmd2_and_variance(K_XX, K_XY, K_YY, const_diagonal=const_diagonal, biased=biased)
loss = mmd2 / torch.sqrt(torch.clamp(var_est, min=min_var_est))
return loss, mmd2, var_est
def _mmd2_and_variance(K_XX, K_XY, K_YY, const_diagonal=False, biased=False):
m = K_XX.size(0) # assume X, Y are same shape
# Get the various sums of kernels that we'll use
# Kts drop the diagonal, but we don't need to compute them explicitly
if const_diagonal is not False:
diag_X = diag_Y = const_diagonal
sum_diag_X = sum_diag_Y = m * const_diagonal
sum_diag2_X = sum_diag2_Y = m * const_diagonal**2
else:
diag_X = torch.diag(K_XX) # (m,)
diag_Y = torch.diag(K_YY) # (m,)
sum_diag_X = torch.sum(diag_X)
sum_diag_Y = torch.sum(diag_Y)
sum_diag2_X = diag_X.dot(diag_X)
sum_diag2_Y = diag_Y.dot(diag_Y)
Kt_XX_sums = K_XX.sum(dim=1) - diag_X # \tilde{K}_XX * e = K_XX * e - diag_X
Kt_YY_sums = K_YY.sum(dim=1) - diag_Y # \tilde{K}_YY * e = K_YY * e - diag_Y
K_XY_sums_0 = K_XY.sum(dim=0) # K_{XY}^T * e
K_XY_sums_1 = K_XY.sum(dim=1) # K_{XY} * e
Kt_XX_sum = Kt_XX_sums.sum() # e^T * \tilde{K}_XX * e
Kt_YY_sum = Kt_YY_sums.sum() # e^T * \tilde{K}_YY * e
K_XY_sum = K_XY_sums_0.sum() # e^T * K_{XY} * e
Kt_XX_2_sum = (K_XX ** 2).sum() - sum_diag2_X # \| \tilde{K}_XX \|_F^2
Kt_YY_2_sum = (K_YY ** 2).sum() - sum_diag2_Y # \| \tilde{K}_YY \|_F^2
K_XY_2_sum = (K_XY ** 2).sum() # \| K_{XY} \|_F^2
if biased:
mmd2 = ((Kt_XX_sum + sum_diag_X) / (m * m)
+ (Kt_YY_sum + sum_diag_Y) / (m * m)
- 2.0 * K_XY_sum / (m * m))
else:
mmd2 = (Kt_XX_sum / (m * (m - 1))
+ Kt_YY_sum / (m * (m - 1))
- 2.0 * K_XY_sum / (m * m))
var_est = (
2.0 / (m**2 * (m - 1.0)**2) * (2 * Kt_XX_sums.dot(Kt_XX_sums) - Kt_XX_2_sum + 2 * Kt_YY_sums.dot(Kt_YY_sums) - Kt_YY_2_sum)
- (4.0*m - 6.0) / (m**3 * (m - 1.0)**3) * (Kt_XX_sum**2 + Kt_YY_sum**2)
+ 4.0*(m - 2.0) / (m**3 * (m - 1.0)**2) * (K_XY_sums_1.dot(K_XY_sums_1) + K_XY_sums_0.dot(K_XY_sums_0))
- 4.0*(m - 3.0) / (m**3 * (m - 1.0)**2) * (K_XY_2_sum) - (8 * m - 12) / (m**5 * (m - 1)) * K_XY_sum**2
+ 8.0 / (m**3 * (m - 1.0)) * (
1.0 / m * (Kt_XX_sum + Kt_YY_sum) * K_XY_sum
- Kt_XX_sums.dot(K_XY_sums_1)
- Kt_YY_sums.dot(K_XY_sums_0))
)
return mmd2, var_est
| 7,282 | 37.739362 | 131 | py |
DSLA-DSLA | DSLA-DSLA/setup.py | #!/usr/bin/env python
# Copyright (c) OpenMMLab. All rights reserved.
import os
import os.path as osp
import platform
import shutil
import sys
import warnings
from setuptools import find_packages, setup
import torch
from torch.utils.cpp_extension import (BuildExtension, CppExtension,
CUDAExtension)
def readme():
with open('README.md', encoding='utf-8') as f:
content = f.read()
return content
version_file = 'mmdet/version.py'
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
def make_cuda_ext(name, module, sources, sources_cuda=[]):
define_macros = []
extra_compile_args = {'cxx': []}
if torch.cuda.is_available() or os.getenv('FORCE_CUDA', '0') == '1':
define_macros += [('WITH_CUDA', None)]
extension = CUDAExtension
extra_compile_args['nvcc'] = [
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
]
sources += sources_cuda
else:
print(f'Compiling {name} without CUDA')
extension = CppExtension
return extension(
name=f'{module}.{name}',
sources=[os.path.join(*module.split('.'), p) for p in sources],
define_macros=define_macros,
extra_compile_args=extra_compile_args)
def parse_requirements(fname='requirements.txt', with_version=True):
"""Parse the package dependencies listed in a requirements file but strips
specific versioning information.
Args:
fname (str): path to requirements file
with_version (bool, default=False): if True include version specs
Returns:
List[str]: list of requirements items
CommandLine:
python -c "import setup; print(setup.parse_requirements())"
"""
import sys
from os.path import exists
import re
require_fpath = fname
def parse_line(line):
"""Parse information from a line in a requirements text file."""
if line.startswith('-r '):
# Allow specifying requirements in other files
target = line.split(' ')[1]
for info in parse_require_file(target):
yield info
else:
info = {'line': line}
if line.startswith('-e '):
info['package'] = line.split('#egg=')[1]
elif '@git+' in line:
info['package'] = line
else:
# Remove versioning from the package
pat = '(' + '|'.join(['>=', '==', '>']) + ')'
parts = re.split(pat, line, maxsplit=1)
parts = [p.strip() for p in parts]
info['package'] = parts[0]
if len(parts) > 1:
op, rest = parts[1:]
if ';' in rest:
# Handle platform specific dependencies
# http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies
version, platform_deps = map(str.strip,
rest.split(';'))
info['platform_deps'] = platform_deps
else:
version = rest # NOQA
info['version'] = (op, version)
yield info
def parse_require_file(fpath):
with open(fpath, 'r') as f:
for line in f.readlines():
line = line.strip()
if line and not line.startswith('#'):
for info in parse_line(line):
yield info
def gen_packages_items():
if exists(require_fpath):
for info in parse_require_file(require_fpath):
parts = [info['package']]
if with_version and 'version' in info:
parts.extend(info['version'])
if not sys.version.startswith('3.4'):
# apparently package_deps are broken in 3.4
platform_deps = info.get('platform_deps')
if platform_deps is not None:
parts.append(';' + platform_deps)
item = ''.join(parts)
yield item
packages = list(gen_packages_items())
return packages
def add_mim_extension():
"""Add extra files that are required to support MIM into the package.
These files will be added by creating a symlink to the originals if the
package is installed in `editable` mode (e.g. pip install -e .), or by
copying from the originals otherwise.
"""
# parse installment mode
if 'develop' in sys.argv:
# installed by `pip install -e .`
if platform.system() == 'Windows':
# set `copy` mode here since symlink fails on Windows.
mode = 'copy'
else:
mode = 'symlink'
elif 'sdist' in sys.argv or 'bdist_wheel' in sys.argv:
# installed by `pip install .`
# or create source distribution by `python setup.py sdist`
mode = 'copy'
else:
return
filenames = ['tools', 'configs', 'demo', 'model-index.yml']
repo_path = osp.dirname(__file__)
mim_path = osp.join(repo_path, 'mmdet', '.mim')
os.makedirs(mim_path, exist_ok=True)
for filename in filenames:
if osp.exists(filename):
src_path = osp.join(repo_path, filename)
tar_path = osp.join(mim_path, filename)
if osp.isfile(tar_path) or osp.islink(tar_path):
os.remove(tar_path)
elif osp.isdir(tar_path):
shutil.rmtree(tar_path)
if mode == 'symlink':
src_relpath = osp.relpath(src_path, osp.dirname(tar_path))
os.symlink(src_relpath, tar_path)
elif mode == 'copy':
if osp.isfile(src_path):
shutil.copyfile(src_path, tar_path)
elif osp.isdir(src_path):
shutil.copytree(src_path, tar_path)
else:
warnings.warn(f'Cannot copy file {src_path}.')
else:
raise ValueError(f'Invalid mode {mode}')
if __name__ == '__main__':
add_mim_extension()
setup(
name='mmdet',
version=get_version(),
description='OpenMMLab Detection Toolbox and Benchmark',
long_description=readme(),
long_description_content_type='text/markdown',
author='MMDetection Contributors',
author_email='openmmlab@gmail.com',
keywords='computer vision, object detection',
url='https://github.com/open-mmlab/mmdetection',
packages=find_packages(exclude=('configs', 'tools', 'demo')),
include_package_data=True,
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
license='Apache License 2.0',
setup_requires=parse_requirements('requirements/build.txt'),
tests_require=parse_requirements('requirements/tests.txt'),
install_requires=parse_requirements('requirements/runtime.txt'),
extras_require={
'all': parse_requirements('requirements.txt'),
'tests': parse_requirements('requirements/tests.txt'),
'build': parse_requirements('requirements/build.txt'),
'optional': parse_requirements('requirements/optional.txt'),
},
ext_modules=[],
cmdclass={'build_ext': BuildExtension},
zip_safe=False)
| 8,008 | 34.914798 | 125 | py |
DSLA-DSLA | DSLA-DSLA/tools/test.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
import time
import warnings
import mmcv
import torch
from mmcv import Config, DictAction
from mmcv.cnn import fuse_conv_bn
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import (get_dist_info, init_dist, load_checkpoint,
wrap_fp16_model)
from mmdet.apis import multi_gpu_test, single_gpu_test
from mmdet.datasets import (build_dataloader, build_dataset,
replace_ImageToTensor)
from mmdet.models import build_detector
def parse_args():
parser = argparse.ArgumentParser(
description='MMDet test (and eval) a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument(
'--work-dir',
help='the directory to save the file containing evaluation metrics')
parser.add_argument('--out', help='output result file in pickle format')
parser.add_argument(
'--fuse-conv-bn',
action='store_true',
help='Whether to fuse conv and bn, this will slightly increase'
'the inference speed')
parser.add_argument(
'--gpu-ids',
type=int,
nargs='+',
help='ids of gpus to use '
'(only applicable to non-distributed testing)')
parser.add_argument(
'--format-only',
action='store_true',
help='Format the output results without perform evaluation. It is'
'useful when you want to format the result to a specific format and '
'submit it to the test server')
parser.add_argument(
'--eval',
type=str,
nargs='+',
help='evaluation metrics, which depends on the dataset, e.g., "bbox",'
' "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument(
'--show-dir', help='directory where painted images will be saved')
parser.add_argument(
'--show-score-thr',
type=float,
default=0.3,
help='score threshold (default: 0.3)')
parser.add_argument(
'--gpu-collect',
action='store_true',
help='whether to use gpu to collect results.')
parser.add_argument(
'--tmpdir',
help='tmp directory used for collecting results from multiple '
'workers, available when gpu-collect is not specified')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
help='custom options for evaluation, the key-value pair in xxx=yyy '
'format will be kwargs for dataset.evaluate() function (deprecate), '
'change to --eval-options instead.')
parser.add_argument(
'--eval-options',
nargs='+',
action=DictAction,
help='custom options for evaluation, the key-value pair in xxx=yyy '
'format will be kwargs for dataset.evaluate() function')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
if args.options and args.eval_options:
raise ValueError(
'--options and --eval-options cannot be both '
'specified, --options is deprecated in favor of --eval-options')
if args.options:
warnings.warn('--options is deprecated in favor of --eval-options')
args.eval_options = args.options
return args
def main():
args = parse_args()
assert args.out or args.eval or args.format_only or args.show \
or args.show_dir, \
('Please specify at least one operation (save/eval/format/show the '
'results / save the results) with the argument "--out", "--eval"'
', "--format-only", "--show" or "--show-dir"')
if args.eval and args.format_only:
raise ValueError('--eval and --format_only cannot be both specified')
if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
raise ValueError('The output file must be a pkl file.')
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
if cfg.model.get('neck'):
if isinstance(cfg.model.neck, list):
for neck_cfg in cfg.model.neck:
if neck_cfg.get('rfp_backbone'):
if neck_cfg.rfp_backbone.get('pretrained'):
neck_cfg.rfp_backbone.pretrained = None
elif cfg.model.neck.get('rfp_backbone'):
if cfg.model.neck.rfp_backbone.get('pretrained'):
cfg.model.neck.rfp_backbone.pretrained = None
# in case the test dataset is concatenated
samples_per_gpu = 1
if isinstance(cfg.data.test, dict):
cfg.data.test.test_mode = True
samples_per_gpu = cfg.data.test.pop('samples_per_gpu', 1)
if samples_per_gpu > 1:
# Replace 'ImageToTensor' to 'DefaultFormatBundle'
cfg.data.test.pipeline = replace_ImageToTensor(
cfg.data.test.pipeline)
elif isinstance(cfg.data.test, list):
for ds_cfg in cfg.data.test:
ds_cfg.test_mode = True
samples_per_gpu = max(
[ds_cfg.pop('samples_per_gpu', 1) for ds_cfg in cfg.data.test])
if samples_per_gpu > 1:
for ds_cfg in cfg.data.test:
ds_cfg.pipeline = replace_ImageToTensor(ds_cfg.pipeline)
if args.gpu_ids is not None:
cfg.gpu_ids = args.gpu_ids
else:
cfg.gpu_ids = range(1)
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
if len(cfg.gpu_ids) > 1:
warnings.warn(
f'We treat {cfg.gpu_ids} as gpu-ids, and reset to '
f'{cfg.gpu_ids[0:1]} as gpu-ids to avoid potential error in '
'non-distribute testing time.')
cfg.gpu_ids = cfg.gpu_ids[0:1]
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
rank, _ = get_dist_info()
# allows not to create
if args.work_dir is not None and rank == 0:
mmcv.mkdir_or_exist(osp.abspath(args.work_dir))
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
json_file = osp.join(args.work_dir, f'eval_{timestamp}.json')
# build the dataloader
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(
dataset,
samples_per_gpu=samples_per_gpu,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False)
# build the model and load checkpoint
cfg.model.train_cfg = None
model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg'))
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
wrap_fp16_model(model)
checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
if args.fuse_conv_bn:
model = fuse_conv_bn(model)
# old versions did not save class info in checkpoints, this walkaround is
# for backward compatibility
if 'CLASSES' in checkpoint.get('meta', {}):
model.CLASSES = checkpoint['meta']['CLASSES']
else:
model.CLASSES = dataset.CLASSES
if not distributed:
model = MMDataParallel(model, device_ids=cfg.gpu_ids)
outputs = single_gpu_test(model, data_loader, args.show, args.show_dir,
args.show_score_thr)
else:
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False)
outputs = multi_gpu_test(model, data_loader, args.tmpdir,
args.gpu_collect)
rank, _ = get_dist_info()
if rank == 0:
if args.out:
print(f'\nwriting results to {args.out}')
mmcv.dump(outputs, args.out)
kwargs = {} if args.eval_options is None else args.eval_options
if args.format_only:
dataset.format_results(outputs, **kwargs)
if args.eval:
eval_kwargs = cfg.get('evaluation', {}).copy()
# hard-code way to remove EvalHook args
for key in [
'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best',
'rule', 'dynamic_intervals'
]:
eval_kwargs.pop(key, None)
eval_kwargs.update(dict(metric=args.eval, **kwargs))
metric = dataset.evaluate(outputs, **eval_kwargs)
print(metric)
metric_dict = dict(config=args.config, metric=metric)
if args.work_dir is not None and rank == 0:
mmcv.dump(metric_dict, json_file)
if __name__ == '__main__':
main()
| 9,774 | 37.789683 | 79 | py |
DSLA-DSLA | DSLA-DSLA/tools/train.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import copy
import os
import os.path as osp
import time
import warnings
import mmcv
import torch
from mmcv import Config, DictAction
from mmcv.runner import get_dist_info, init_dist
from mmcv.utils import get_git_hash
from mmdet import __version__
from mmdet.apis import init_random_seed, set_random_seed, train_detector
from mmdet.datasets import build_dataset
from mmdet.models import build_detector
from mmdet.utils import collect_env, get_root_logger
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument(
'--resume-from', help='the checkpoint file to resume from')
parser.add_argument(
'--auto-resume',
action='store_true',
help='resume from the latest checkpoint automatically')
parser.add_argument(
'--no-validate',
action='store_true',
help='whether not to evaluate the checkpoint during training')
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument(
'--gpus',
type=int,
help='number of gpus to use '
'(only applicable to non-distributed training)')
group_gpus.add_argument(
'--gpu-ids',
type=int,
nargs='+',
help='ids of gpus to use '
'(only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument(
'--deterministic',
action='store_true',
help='whether to set deterministic options for CUDNN backend.')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file (deprecate), '
'change to --cfg-options instead.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
if args.options and args.cfg_options:
raise ValueError(
'--options and --cfg-options cannot be both '
'specified, --options is deprecated in favor of --cfg-options')
if args.options:
warnings.warn('--options is deprecated in favor of --cfg-options')
args.cfg_options = args.options
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir + '_' + time.strftime('%Y%m%d_%H%M00', time.localtime()) # 给dir加上一个时间戳 ------
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
if args.resume_from is not None:
cfg.resume_from = args.resume_from
cfg.auto_resume = args.auto_resume
if args.gpu_ids is not None:
cfg.gpu_ids = args.gpu_ids
else:
cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus)
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
if len(cfg.gpu_ids) > 1:
warnings.warn(
f'We treat {cfg.gpu_ids} as gpu-ids, and reset to '
f'{cfg.gpu_ids[0:1]} as gpu-ids to avoid potential error in '
'non-distribute training time.')
cfg.gpu_ids = cfg.gpu_ids[0:1]
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# re-set gpu_ids with distributed training mode
_, world_size = get_dist_info()
cfg.gpu_ids = range(world_size)
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# dump config
cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
# init the logger before other steps
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
# init the meta dict to record some important information such as
# environment info and seed, which will be logged
meta = dict()
# log env info
env_info_dict = collect_env()
env_info = '\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()])
dash_line = '-' * 60 + '\n'
logger.info('Environment info:\n' + dash_line + env_info + '\n' +
dash_line)
meta['env_info'] = env_info
meta['config'] = cfg.pretty_text
# log some basic info
logger.info(f'Distributed training: {distributed}')
logger.info(f'Config:\n{cfg.pretty_text}')
# set random seeds
seed = init_random_seed(args.seed)
logger.info(f'Set random seed to {seed}, '
f'deterministic: {args.deterministic}')
set_random_seed(seed, deterministic=args.deterministic)
cfg.seed = seed
meta['seed'] = seed
meta['exp_name'] = osp.basename(args.config)
model = build_detector(
cfg.model,
train_cfg=cfg.get('train_cfg'),
test_cfg=cfg.get('test_cfg'))
model.init_weights()
datasets = [build_dataset(cfg.data.train)]
if len(cfg.workflow) == 2:
val_dataset = copy.deepcopy(cfg.data.val)
val_dataset.pipeline = cfg.data.train.pipeline
datasets.append(build_dataset(val_dataset))
if cfg.checkpoint_config is not None:
# save mmdet version, config file content and class names in
# checkpoints as meta data
cfg.checkpoint_config.meta = dict(
mmdet_version=__version__ + get_git_hash()[:7],
CLASSES=datasets[0].CLASSES)
# add an attribute for visualization convenience
model.CLASSES = datasets[0].CLASSES
train_detector(
model,
datasets,
cfg,
distributed=distributed,
validate=(not args.no_validate),
timestamp=timestamp,
meta=meta)
if __name__ == '__main__':
main()
| 7,317 | 36.147208 | 115 | py |
DSLA-DSLA | DSLA-DSLA/tools/deployment/test_torchserver.py | from argparse import ArgumentParser
import numpy as np
import requests
from mmdet.apis import inference_detector, init_detector, show_result_pyplot
from mmdet.core import bbox2result
def parse_args():
parser = ArgumentParser()
parser.add_argument('img', help='Image file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument('model_name', help='The model name in the server')
parser.add_argument(
'--inference-addr',
default='127.0.0.1:8080',
help='Address and port of the inference server')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--score-thr', type=float, default=0.5, help='bbox score threshold')
args = parser.parse_args()
return args
def parse_result(input, model_class):
bbox = []
label = []
score = []
for anchor in input:
bbox.append(anchor['bbox'])
label.append(model_class.index(anchor['class_name']))
score.append([anchor['score']])
bboxes = np.append(bbox, score, axis=1)
labels = np.array(label)
result = bbox2result(bboxes, labels, len(model_class))
return result
def main(args):
# build the model from a config file and a checkpoint file
model = init_detector(args.config, args.checkpoint, device=args.device)
# test a single image
model_result = inference_detector(model, args.img)
for i, anchor_set in enumerate(model_result):
anchor_set = anchor_set[anchor_set[:, 4] >= 0.5]
model_result[i] = anchor_set
# show the results
show_result_pyplot(
model,
args.img,
model_result,
score_thr=args.score_thr,
title='pytorch_result')
url = 'http://' + args.inference_addr + '/predictions/' + args.model_name
with open(args.img, 'rb') as image:
response = requests.post(url, image)
server_result = parse_result(response.json(), model.CLASSES)
show_result_pyplot(
model,
args.img,
server_result,
score_thr=args.score_thr,
title='server_result')
for i in range(len(model.CLASSES)):
assert np.allclose(model_result[i], server_result[i])
if __name__ == '__main__':
args = parse_args()
main(args)
| 2,357 | 30.44 | 77 | py |
DSLA-DSLA | DSLA-DSLA/tools/deployment/mmdet2torchserve.py | # Copyright (c) OpenMMLab. All rights reserved.
from argparse import ArgumentParser, Namespace
from pathlib import Path
from tempfile import TemporaryDirectory
import mmcv
try:
from model_archiver.model_packaging import package_model
from model_archiver.model_packaging_utils import ModelExportUtils
except ImportError:
package_model = None
def mmdet2torchserve(
config_file: str,
checkpoint_file: str,
output_folder: str,
model_name: str,
model_version: str = '1.0',
force: bool = False,
):
"""Converts MMDetection model (config + checkpoint) to TorchServe `.mar`.
Args:
config_file:
In MMDetection config format.
The contents vary for each task repository.
checkpoint_file:
In MMDetection checkpoint format.
The contents vary for each task repository.
output_folder:
Folder where `{model_name}.mar` will be created.
The file created will be in TorchServe archive format.
model_name:
If not None, used for naming the `{model_name}.mar` file
that will be created under `output_folder`.
If None, `{Path(checkpoint_file).stem}` will be used.
model_version:
Model's version.
force:
If True, if there is an existing `{model_name}.mar`
file under `output_folder` it will be overwritten.
"""
mmcv.mkdir_or_exist(output_folder)
config = mmcv.Config.fromfile(config_file)
with TemporaryDirectory() as tmpdir:
config.dump(f'{tmpdir}/config.py')
args = Namespace(
**{
'model_file': f'{tmpdir}/config.py',
'serialized_file': checkpoint_file,
'handler': f'{Path(__file__).parent}/mmdet_handler.py',
'model_name': model_name or Path(checkpoint_file).stem,
'version': model_version,
'export_path': output_folder,
'force': force,
'requirements_file': None,
'extra_files': None,
'runtime': 'python',
'archive_format': 'default'
})
manifest = ModelExportUtils.generate_manifest_json(args)
package_model(args, manifest)
def parse_args():
parser = ArgumentParser(
description='Convert MMDetection models to TorchServe `.mar` format.')
parser.add_argument('config', type=str, help='config file path')
parser.add_argument('checkpoint', type=str, help='checkpoint file path')
parser.add_argument(
'--output-folder',
type=str,
required=True,
help='Folder where `{model_name}.mar` will be created.')
parser.add_argument(
'--model-name',
type=str,
default=None,
help='If not None, used for naming the `{model_name}.mar`'
'file that will be created under `output_folder`.'
'If None, `{Path(checkpoint_file).stem}` will be used.')
parser.add_argument(
'--model-version',
type=str,
default='1.0',
help='Number used for versioning.')
parser.add_argument(
'-f',
'--force',
action='store_true',
help='overwrite the existing `{model_name}.mar`')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
if package_model is None:
raise ImportError('`torch-model-archiver` is required.'
'Try: pip install torch-model-archiver')
mmdet2torchserve(args.config, args.checkpoint, args.output_folder,
args.model_name, args.model_version, args.force)
| 3,693 | 32.279279 | 78 | py |
DSLA-DSLA | DSLA-DSLA/tools/deployment/onnx2tensorrt.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
import warnings
import numpy as np
import onnx
import torch
from mmcv import Config
from mmcv.tensorrt import is_tensorrt_plugin_loaded, onnx2trt, save_trt_engine
from mmdet.core.export import preprocess_example_input
from mmdet.core.export.model_wrappers import (ONNXRuntimeDetector,
TensorRTDetector)
from mmdet.datasets import DATASETS
def get_GiB(x: int):
"""return x GiB."""
return x * (1 << 30)
def onnx2tensorrt(onnx_file,
trt_file,
input_config,
verify=False,
show=False,
workspace_size=1,
verbose=False):
import tensorrt as trt
onnx_model = onnx.load(onnx_file)
max_shape = input_config['max_shape']
min_shape = input_config['min_shape']
opt_shape = input_config['opt_shape']
fp16_mode = False
# create trt engine and wrapper
opt_shape_dict = {'input': [min_shape, opt_shape, max_shape]}
max_workspace_size = get_GiB(workspace_size)
trt_engine = onnx2trt(
onnx_model,
opt_shape_dict,
log_level=trt.Logger.VERBOSE if verbose else trt.Logger.ERROR,
fp16_mode=fp16_mode,
max_workspace_size=max_workspace_size)
save_dir, _ = osp.split(trt_file)
if save_dir:
os.makedirs(save_dir, exist_ok=True)
save_trt_engine(trt_engine, trt_file)
print(f'Successfully created TensorRT engine: {trt_file}')
if verify:
# prepare input
one_img, one_meta = preprocess_example_input(input_config)
img_list, img_meta_list = [one_img], [[one_meta]]
img_list = [_.cuda().contiguous() for _ in img_list]
# wrap ONNX and TensorRT model
onnx_model = ONNXRuntimeDetector(onnx_file, CLASSES, device_id=0)
trt_model = TensorRTDetector(trt_file, CLASSES, device_id=0)
# inference with wrapped model
with torch.no_grad():
onnx_results = onnx_model(
img_list, img_metas=img_meta_list, return_loss=False)[0]
trt_results = trt_model(
img_list, img_metas=img_meta_list, return_loss=False)[0]
if show:
out_file_ort, out_file_trt = None, None
else:
out_file_ort, out_file_trt = 'show-ort.png', 'show-trt.png'
show_img = one_meta['show_img']
score_thr = 0.3
onnx_model.show_result(
show_img,
onnx_results,
score_thr=score_thr,
show=True,
win_name='ONNXRuntime',
out_file=out_file_ort)
trt_model.show_result(
show_img,
trt_results,
score_thr=score_thr,
show=True,
win_name='TensorRT',
out_file=out_file_trt)
with_mask = trt_model.with_masks
# compare a part of result
if with_mask:
compare_pairs = list(zip(onnx_results, trt_results))
else:
compare_pairs = [(onnx_results, trt_results)]
err_msg = 'The numerical values are different between Pytorch' + \
' and ONNX, but it does not necessarily mean the' + \
' exported ONNX model is problematic.'
# check the numerical value
for onnx_res, pytorch_res in compare_pairs:
for o_res, p_res in zip(onnx_res, pytorch_res):
np.testing.assert_allclose(
o_res, p_res, rtol=1e-03, atol=1e-05, err_msg=err_msg)
print('The numerical values are the same between Pytorch and ONNX')
def parse_normalize_cfg(test_pipeline):
transforms = None
for pipeline in test_pipeline:
if 'transforms' in pipeline:
transforms = pipeline['transforms']
break
assert transforms is not None, 'Failed to find `transforms`'
norm_config_li = [_ for _ in transforms if _['type'] == 'Normalize']
assert len(norm_config_li) == 1, '`norm_config` should only have one'
norm_config = norm_config_li[0]
return norm_config
def parse_args():
parser = argparse.ArgumentParser(
description='Convert MMDetection models from ONNX to TensorRT')
parser.add_argument('config', help='test config file path')
parser.add_argument('model', help='Filename of input ONNX model')
parser.add_argument(
'--trt-file',
type=str,
default='tmp.trt',
help='Filename of output TensorRT engine')
parser.add_argument(
'--input-img', type=str, default='', help='Image for test')
parser.add_argument(
'--show', action='store_true', help='Whether to show output results')
parser.add_argument(
'--dataset',
type=str,
default='coco',
help='Dataset name. This argument is deprecated and will be \
removed in future releases.')
parser.add_argument(
'--verify',
action='store_true',
help='Verify the outputs of ONNXRuntime and TensorRT')
parser.add_argument(
'--verbose',
action='store_true',
help='Whether to verbose logging messages while creating \
TensorRT engine. Defaults to False.')
parser.add_argument(
'--to-rgb',
action='store_false',
help='Feed model with RGB or BGR image. Default is RGB. This \
argument is deprecated and will be removed in future releases.')
parser.add_argument(
'--shape',
type=int,
nargs='+',
default=[400, 600],
help='Input size of the model')
parser.add_argument(
'--mean',
type=float,
nargs='+',
default=[123.675, 116.28, 103.53],
help='Mean value used for preprocess input data. This argument \
is deprecated and will be removed in future releases.')
parser.add_argument(
'--std',
type=float,
nargs='+',
default=[58.395, 57.12, 57.375],
help='Variance value used for preprocess input data. \
This argument is deprecated and will be removed in future releases.')
parser.add_argument(
'--min-shape',
type=int,
nargs='+',
default=None,
help='Minimum input size of the model in TensorRT')
parser.add_argument(
'--max-shape',
type=int,
nargs='+',
default=None,
help='Maximum input size of the model in TensorRT')
parser.add_argument(
'--workspace-size',
type=int,
default=1,
help='Max workspace size in GiB')
args = parser.parse_args()
return args
if __name__ == '__main__':
assert is_tensorrt_plugin_loaded(), 'TensorRT plugin should be compiled.'
args = parse_args()
warnings.warn(
'Arguments like `--to-rgb`, `--mean`, `--std`, `--dataset` would be \
parsed directly from config file and are deprecated and will be \
removed in future releases.')
if not args.input_img:
args.input_img = osp.join(osp.dirname(__file__), '../demo/demo.jpg')
cfg = Config.fromfile(args.config)
def parse_shape(shape):
if len(shape) == 1:
shape = (1, 3, shape[0], shape[0])
elif len(args.shape) == 2:
shape = (1, 3) + tuple(shape)
else:
raise ValueError('invalid input shape')
return shape
if args.shape:
input_shape = parse_shape(args.shape)
else:
img_scale = cfg.test_pipeline[1]['img_scale']
input_shape = (1, 3, img_scale[1], img_scale[0])
if not args.max_shape:
max_shape = input_shape
else:
max_shape = parse_shape(args.max_shape)
if not args.min_shape:
min_shape = input_shape
else:
min_shape = parse_shape(args.min_shape)
dataset = DATASETS.get(cfg.data.test['type'])
assert (dataset is not None)
CLASSES = dataset.CLASSES
normalize_cfg = parse_normalize_cfg(cfg.test_pipeline)
input_config = {
'min_shape': min_shape,
'opt_shape': input_shape,
'max_shape': max_shape,
'input_shape': input_shape,
'input_path': args.input_img,
'normalize_cfg': normalize_cfg
}
# Create TensorRT engine
onnx2tensorrt(
args.model,
args.trt_file,
input_config,
verify=args.verify,
show=args.show,
workspace_size=args.workspace_size,
verbose=args.verbose)
| 8,516 | 32.4 | 78 | py |
DSLA-DSLA | DSLA-DSLA/tools/deployment/mmdet_handler.py | # Copyright (c) OpenMMLab. All rights reserved.
import base64
import os
import mmcv
import torch
from ts.torch_handler.base_handler import BaseHandler
from mmdet.apis import inference_detector, init_detector
class MMdetHandler(BaseHandler):
threshold = 0.5
def initialize(self, context):
properties = context.system_properties
self.map_location = 'cuda' if torch.cuda.is_available() else 'cpu'
self.device = torch.device(self.map_location + ':' +
str(properties.get('gpu_id')) if torch.cuda.
is_available() else self.map_location)
self.manifest = context.manifest
model_dir = properties.get('model_dir')
serialized_file = self.manifest['model']['serializedFile']
checkpoint = os.path.join(model_dir, serialized_file)
self.config_file = os.path.join(model_dir, 'config.py')
self.model = init_detector(self.config_file, checkpoint, self.device)
self.initialized = True
def preprocess(self, data):
images = []
for row in data:
image = row.get('data') or row.get('body')
if isinstance(image, str):
image = base64.b64decode(image)
image = mmcv.imfrombytes(image)
images.append(image)
return images
def inference(self, data, *args, **kwargs):
results = inference_detector(self.model, data)
return results
def postprocess(self, data):
# Format output following the example ObjectDetectionHandler format
output = []
for image_index, image_result in enumerate(data):
output.append([])
if isinstance(image_result, tuple):
bbox_result, segm_result = image_result
if isinstance(segm_result, tuple):
segm_result = segm_result[0] # ms rcnn
else:
bbox_result, segm_result = image_result, None
for class_index, class_result in enumerate(bbox_result):
class_name = self.model.CLASSES[class_index]
for bbox in class_result:
bbox_coords = bbox[:-1].tolist()
score = float(bbox[-1])
if score >= self.threshold:
output[image_index].append({
'class_name': class_name,
'bbox': bbox_coords,
'score': score
})
return output
| 2,560 | 34.569444 | 79 | py |
DSLA-DSLA | DSLA-DSLA/tools/deployment/pytorch2onnx.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os.path as osp
import warnings
from functools import partial
import numpy as np
import onnx
import torch
from mmcv import Config, DictAction
from mmdet.core.export import build_model_from_cfg, preprocess_example_input
from mmdet.core.export.model_wrappers import ONNXRuntimeDetector
def pytorch2onnx(model,
input_img,
input_shape,
normalize_cfg,
opset_version=11,
show=False,
output_file='tmp.onnx',
verify=False,
test_img=None,
do_simplify=False,
dynamic_export=None,
skip_postprocess=False):
input_config = {
'input_shape': input_shape,
'input_path': input_img,
'normalize_cfg': normalize_cfg
}
# prepare input
one_img, one_meta = preprocess_example_input(input_config)
img_list, img_meta_list = [one_img], [[one_meta]]
if skip_postprocess:
warnings.warn('Not all models support export onnx without post '
'process, especially two stage detectors!')
model.forward = model.forward_dummy
torch.onnx.export(
model,
one_img,
output_file,
input_names=['input'],
export_params=True,
keep_initializers_as_inputs=True,
do_constant_folding=True,
verbose=show,
opset_version=opset_version)
print(f'Successfully exported ONNX model without '
f'post process: {output_file}')
return
# replace original forward function
origin_forward = model.forward
model.forward = partial(
model.forward,
img_metas=img_meta_list,
return_loss=False,
rescale=False)
output_names = ['dets', 'labels']
if model.with_mask:
output_names.append('masks')
input_name = 'input'
dynamic_axes = None
if dynamic_export:
dynamic_axes = {
input_name: {
0: 'batch',
2: 'height',
3: 'width'
},
'dets': {
0: 'batch',
1: 'num_dets',
},
'labels': {
0: 'batch',
1: 'num_dets',
},
}
if model.with_mask:
dynamic_axes['masks'] = {0: 'batch', 1: 'num_dets'}
torch.onnx.export(
model,
img_list,
output_file,
input_names=[input_name],
output_names=output_names,
export_params=True,
keep_initializers_as_inputs=True,
do_constant_folding=True,
verbose=show,
opset_version=opset_version,
dynamic_axes=dynamic_axes)
model.forward = origin_forward
# get the custom op path
ort_custom_op_path = ''
try:
from mmcv.ops import get_onnxruntime_op_path
ort_custom_op_path = get_onnxruntime_op_path()
except (ImportError, ModuleNotFoundError):
warnings.warn('If input model has custom op from mmcv, \
you may have to build mmcv with ONNXRuntime from source.')
if do_simplify:
import onnxsim
from mmdet import digit_version
min_required_version = '0.3.0'
assert digit_version(onnxsim.__version__) >= digit_version(
min_required_version
), f'Requires to install onnx-simplify>={min_required_version}'
input_dic = {'input': img_list[0].detach().cpu().numpy()}
model_opt, check_ok = onnxsim.simplify(
output_file,
input_data=input_dic,
custom_lib=ort_custom_op_path,
dynamic_input_shape=dynamic_export)
if check_ok:
onnx.save(model_opt, output_file)
print(f'Successfully simplified ONNX model: {output_file}')
else:
warnings.warn('Failed to simplify ONNX model.')
print(f'Successfully exported ONNX model: {output_file}')
if verify:
# check by onnx
onnx_model = onnx.load(output_file)
onnx.checker.check_model(onnx_model)
# wrap onnx model
onnx_model = ONNXRuntimeDetector(output_file, model.CLASSES, 0)
if dynamic_export:
# scale up to test dynamic shape
h, w = [int((_ * 1.5) // 32 * 32) for _ in input_shape[2:]]
h, w = min(1344, h), min(1344, w)
input_config['input_shape'] = (1, 3, h, w)
if test_img is None:
input_config['input_path'] = input_img
# prepare input once again
one_img, one_meta = preprocess_example_input(input_config)
img_list, img_meta_list = [one_img], [[one_meta]]
# get pytorch output
with torch.no_grad():
pytorch_results = model(
img_list,
img_metas=img_meta_list,
return_loss=False,
rescale=True)[0]
img_list = [_.cuda().contiguous() for _ in img_list]
if dynamic_export:
img_list = img_list + [_.flip(-1).contiguous() for _ in img_list]
img_meta_list = img_meta_list * 2
# get onnx output
onnx_results = onnx_model(
img_list, img_metas=img_meta_list, return_loss=False)[0]
# visualize predictions
score_thr = 0.3
if show:
out_file_ort, out_file_pt = None, None
else:
out_file_ort, out_file_pt = 'show-ort.png', 'show-pt.png'
show_img = one_meta['show_img']
model.show_result(
show_img,
pytorch_results,
score_thr=score_thr,
show=True,
win_name='PyTorch',
out_file=out_file_pt)
onnx_model.show_result(
show_img,
onnx_results,
score_thr=score_thr,
show=True,
win_name='ONNXRuntime',
out_file=out_file_ort)
# compare a part of result
if model.with_mask:
compare_pairs = list(zip(onnx_results, pytorch_results))
else:
compare_pairs = [(onnx_results, pytorch_results)]
err_msg = 'The numerical values are different between Pytorch' + \
' and ONNX, but it does not necessarily mean the' + \
' exported ONNX model is problematic.'
# check the numerical value
for onnx_res, pytorch_res in compare_pairs:
for o_res, p_res in zip(onnx_res, pytorch_res):
np.testing.assert_allclose(
o_res, p_res, rtol=1e-03, atol=1e-05, err_msg=err_msg)
print('The numerical values are the same between Pytorch and ONNX')
def parse_normalize_cfg(test_pipeline):
transforms = None
for pipeline in test_pipeline:
if 'transforms' in pipeline:
transforms = pipeline['transforms']
break
assert transforms is not None, 'Failed to find `transforms`'
norm_config_li = [_ for _ in transforms if _['type'] == 'Normalize']
assert len(norm_config_li) == 1, '`norm_config` should only have one'
norm_config = norm_config_li[0]
return norm_config
def parse_args():
parser = argparse.ArgumentParser(
description='Convert MMDetection models to ONNX')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--input-img', type=str, help='Images for input')
parser.add_argument(
'--show',
action='store_true',
help='Show onnx graph and detection outputs')
parser.add_argument('--output-file', type=str, default='tmp.onnx')
parser.add_argument('--opset-version', type=int, default=11)
parser.add_argument(
'--test-img', type=str, default=None, help='Images for test')
parser.add_argument(
'--dataset',
type=str,
default='coco',
help='Dataset name. This argument is deprecated and will be removed \
in future releases.')
parser.add_argument(
'--verify',
action='store_true',
help='verify the onnx model output against pytorch output')
parser.add_argument(
'--simplify',
action='store_true',
help='Whether to simplify onnx model.')
parser.add_argument(
'--shape',
type=int,
nargs='+',
default=[800, 1216],
help='input image size')
parser.add_argument(
'--mean',
type=float,
nargs='+',
default=[123.675, 116.28, 103.53],
help='mean value used for preprocess input data.This argument \
is deprecated and will be removed in future releases.')
parser.add_argument(
'--std',
type=float,
nargs='+',
default=[58.395, 57.12, 57.375],
help='variance value used for preprocess input data. '
'This argument is deprecated and will be removed in future releases.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='Override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--dynamic-export',
action='store_true',
help='Whether to export onnx with dynamic axis.')
parser.add_argument(
'--skip-postprocess',
action='store_true',
help='Whether to export model without post process. Experimental '
'option. We do not guarantee the correctness of the exported '
'model.')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
warnings.warn('Arguments like `--mean`, `--std`, `--dataset` would be \
parsed directly from config file and are deprecated and \
will be removed in future releases.')
assert args.opset_version == 11, 'MMDet only support opset 11 now'
try:
from mmcv.onnx.symbolic import register_extra_symbolics
except ModuleNotFoundError:
raise NotImplementedError('please update mmcv to version>=v1.0.4')
register_extra_symbolics(args.opset_version)
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
if args.shape is None:
img_scale = cfg.test_pipeline[1]['img_scale']
input_shape = (1, 3, img_scale[1], img_scale[0])
elif len(args.shape) == 1:
input_shape = (1, 3, args.shape[0], args.shape[0])
elif len(args.shape) == 2:
input_shape = (1, 3) + tuple(args.shape)
else:
raise ValueError('invalid input shape')
# build the model and load checkpoint
model = build_model_from_cfg(args.config, args.checkpoint,
args.cfg_options)
if not args.input_img:
args.input_img = osp.join(osp.dirname(__file__), '../../demo/demo.jpg')
normalize_cfg = parse_normalize_cfg(cfg.test_pipeline)
# convert model to onnx file
pytorch2onnx(
model,
args.input_img,
input_shape,
normalize_cfg,
opset_version=args.opset_version,
show=args.show,
output_file=args.output_file,
verify=args.verify,
test_img=args.test_img,
do_simplify=args.simplify,
dynamic_export=args.dynamic_export,
skip_postprocess=args.skip_postprocess)
| 11,781 | 33.052023 | 79 | py |
DSLA-DSLA | DSLA-DSLA/tools/model_converters/selfsup2mmdet.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
from collections import OrderedDict
import torch
def moco_convert(src, dst):
"""Convert keys in pycls pretrained moco models to mmdet style."""
# load caffe model
moco_model = torch.load(src)
blobs = moco_model['state_dict']
# convert to pytorch style
state_dict = OrderedDict()
for k, v in blobs.items():
if not k.startswith('module.encoder_q.'):
continue
old_k = k
k = k.replace('module.encoder_q.', '')
state_dict[k] = v
print(old_k, '->', k)
# save checkpoint
checkpoint = dict()
checkpoint['state_dict'] = state_dict
torch.save(checkpoint, dst)
def main():
parser = argparse.ArgumentParser(description='Convert model keys')
parser.add_argument('src', help='src detectron model path')
parser.add_argument('dst', help='save path')
parser.add_argument(
'--selfsup', type=str, choices=['moco', 'swav'], help='save path')
args = parser.parse_args()
if args.selfsup == 'moco':
moco_convert(args.src, args.dst)
elif args.selfsup == 'swav':
print('SWAV does not need to convert the keys')
if __name__ == '__main__':
main()
| 1,243 | 27.930233 | 74 | py |
DSLA-DSLA | DSLA-DSLA/tools/model_converters/publish_model.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import subprocess
import torch
def parse_args():
parser = argparse.ArgumentParser(
description='Process a checkpoint to be published')
parser.add_argument('in_file', help='input checkpoint filename')
parser.add_argument('out_file', help='output checkpoint filename')
args = parser.parse_args()
return args
def process_checkpoint(in_file, out_file):
checkpoint = torch.load(in_file, map_location='cpu')
# remove optimizer for smaller file size
if 'optimizer' in checkpoint:
del checkpoint['optimizer']
# if it is necessary to remove some sensitive data in checkpoint['meta'],
# add the code here.
if torch.__version__ >= '1.6':
torch.save(checkpoint, out_file, _use_new_zipfile_serialization=False)
else:
torch.save(checkpoint, out_file)
sha = subprocess.check_output(['sha256sum', out_file]).decode()
if out_file.endswith('.pth'):
out_file_name = out_file[:-4]
else:
out_file_name = out_file
final_file = out_file_name + f'-{sha[:8]}.pth'
subprocess.Popen(['mv', out_file, final_file])
def main():
args = parse_args()
process_checkpoint(args.in_file, args.out_file)
if __name__ == '__main__':
main()
| 1,301 | 28.590909 | 78 | py |
DSLA-DSLA | DSLA-DSLA/tools/model_converters/regnet2mmdet.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
from collections import OrderedDict
import torch
def convert_stem(model_key, model_weight, state_dict, converted_names):
new_key = model_key.replace('stem.conv', 'conv1')
new_key = new_key.replace('stem.bn', 'bn1')
state_dict[new_key] = model_weight
converted_names.add(model_key)
print(f'Convert {model_key} to {new_key}')
def convert_head(model_key, model_weight, state_dict, converted_names):
new_key = model_key.replace('head.fc', 'fc')
state_dict[new_key] = model_weight
converted_names.add(model_key)
print(f'Convert {model_key} to {new_key}')
def convert_reslayer(model_key, model_weight, state_dict, converted_names):
split_keys = model_key.split('.')
layer, block, module = split_keys[:3]
block_id = int(block[1:])
layer_name = f'layer{int(layer[1:])}'
block_name = f'{block_id - 1}'
if block_id == 1 and module == 'bn':
new_key = f'{layer_name}.{block_name}.downsample.1.{split_keys[-1]}'
elif block_id == 1 and module == 'proj':
new_key = f'{layer_name}.{block_name}.downsample.0.{split_keys[-1]}'
elif module == 'f':
if split_keys[3] == 'a_bn':
module_name = 'bn1'
elif split_keys[3] == 'b_bn':
module_name = 'bn2'
elif split_keys[3] == 'c_bn':
module_name = 'bn3'
elif split_keys[3] == 'a':
module_name = 'conv1'
elif split_keys[3] == 'b':
module_name = 'conv2'
elif split_keys[3] == 'c':
module_name = 'conv3'
new_key = f'{layer_name}.{block_name}.{module_name}.{split_keys[-1]}'
else:
raise ValueError(f'Unsupported conversion of key {model_key}')
print(f'Convert {model_key} to {new_key}')
state_dict[new_key] = model_weight
converted_names.add(model_key)
def convert(src, dst):
"""Convert keys in pycls pretrained RegNet models to mmdet style."""
# load caffe model
regnet_model = torch.load(src)
blobs = regnet_model['model_state']
# convert to pytorch style
state_dict = OrderedDict()
converted_names = set()
for key, weight in blobs.items():
if 'stem' in key:
convert_stem(key, weight, state_dict, converted_names)
elif 'head' in key:
convert_head(key, weight, state_dict, converted_names)
elif key.startswith('s'):
convert_reslayer(key, weight, state_dict, converted_names)
# check if all layers are converted
for key in blobs:
if key not in converted_names:
print(f'not converted: {key}')
# save checkpoint
checkpoint = dict()
checkpoint['state_dict'] = state_dict
torch.save(checkpoint, dst)
def main():
parser = argparse.ArgumentParser(description='Convert model keys')
parser.add_argument('src', help='src detectron model path')
parser.add_argument('dst', help='save path')
args = parser.parse_args()
convert(args.src, args.dst)
if __name__ == '__main__':
main()
| 3,063 | 32.67033 | 77 | py |
DSLA-DSLA | DSLA-DSLA/tools/model_converters/upgrade_model_version.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import re
import tempfile
from collections import OrderedDict
import torch
from mmcv import Config
def is_head(key):
valid_head_list = [
'bbox_head', 'mask_head', 'semantic_head', 'grid_head', 'mask_iou_head'
]
return any(key.startswith(h) for h in valid_head_list)
def parse_config(config_strings):
temp_file = tempfile.NamedTemporaryFile()
config_path = f'{temp_file.name}.py'
with open(config_path, 'w') as f:
f.write(config_strings)
config = Config.fromfile(config_path)
is_two_stage = True
is_ssd = False
is_retina = False
reg_cls_agnostic = False
if 'rpn_head' not in config.model:
is_two_stage = False
# check whether it is SSD
if config.model.bbox_head.type == 'SSDHead':
is_ssd = True
elif config.model.bbox_head.type == 'RetinaHead':
is_retina = True
elif isinstance(config.model['bbox_head'], list):
reg_cls_agnostic = True
elif 'reg_class_agnostic' in config.model.bbox_head:
reg_cls_agnostic = config.model.bbox_head \
.reg_class_agnostic
temp_file.close()
return is_two_stage, is_ssd, is_retina, reg_cls_agnostic
def reorder_cls_channel(val, num_classes=81):
# bias
if val.dim() == 1:
new_val = torch.cat((val[1:], val[:1]), dim=0)
# weight
else:
out_channels, in_channels = val.shape[:2]
# conv_cls for softmax output
if out_channels != num_classes and out_channels % num_classes == 0:
new_val = val.reshape(-1, num_classes, in_channels, *val.shape[2:])
new_val = torch.cat((new_val[:, 1:], new_val[:, :1]), dim=1)
new_val = new_val.reshape(val.size())
# fc_cls
elif out_channels == num_classes:
new_val = torch.cat((val[1:], val[:1]), dim=0)
# agnostic | retina_cls | rpn_cls
else:
new_val = val
return new_val
def truncate_cls_channel(val, num_classes=81):
# bias
if val.dim() == 1:
if val.size(0) % num_classes == 0:
new_val = val[:num_classes - 1]
else:
new_val = val
# weight
else:
out_channels, in_channels = val.shape[:2]
# conv_logits
if out_channels % num_classes == 0:
new_val = val.reshape(num_classes, in_channels, *val.shape[2:])[1:]
new_val = new_val.reshape(-1, *val.shape[1:])
# agnostic
else:
new_val = val
return new_val
def truncate_reg_channel(val, num_classes=81):
# bias
if val.dim() == 1:
# fc_reg | rpn_reg
if val.size(0) % num_classes == 0:
new_val = val.reshape(num_classes, -1)[:num_classes - 1]
new_val = new_val.reshape(-1)
# agnostic
else:
new_val = val
# weight
else:
out_channels, in_channels = val.shape[:2]
# fc_reg | rpn_reg
if out_channels % num_classes == 0:
new_val = val.reshape(num_classes, -1, in_channels,
*val.shape[2:])[1:]
new_val = new_val.reshape(-1, *val.shape[1:])
# agnostic
else:
new_val = val
return new_val
def convert(in_file, out_file, num_classes):
"""Convert keys in checkpoints.
There can be some breaking changes during the development of mmdetection,
and this tool is used for upgrading checkpoints trained with old versions
to the latest one.
"""
checkpoint = torch.load(in_file)
in_state_dict = checkpoint.pop('state_dict')
out_state_dict = OrderedDict()
meta_info = checkpoint['meta']
is_two_stage, is_ssd, is_retina, reg_cls_agnostic = parse_config(
'#' + meta_info['config'])
if meta_info['mmdet_version'] <= '0.5.3' and is_retina:
upgrade_retina = True
else:
upgrade_retina = False
# MMDetection v2.5.0 unifies the class order in RPN
# if the model is trained in version<v2.5.0
# The RPN model should be upgraded to be used in version>=2.5.0
if meta_info['mmdet_version'] < '2.5.0':
upgrade_rpn = True
else:
upgrade_rpn = False
for key, val in in_state_dict.items():
new_key = key
new_val = val
if is_two_stage and is_head(key):
new_key = 'roi_head.{}'.format(key)
# classification
if upgrade_rpn:
m = re.search(
r'(conv_cls|retina_cls|rpn_cls|fc_cls|fcos_cls|'
r'fovea_cls).(weight|bias)', new_key)
else:
m = re.search(
r'(conv_cls|retina_cls|fc_cls|fcos_cls|'
r'fovea_cls).(weight|bias)', new_key)
if m is not None:
print(f'reorder cls channels of {new_key}')
new_val = reorder_cls_channel(val, num_classes)
# regression
if upgrade_rpn:
m = re.search(r'(fc_reg).(weight|bias)', new_key)
else:
m = re.search(r'(fc_reg|rpn_reg).(weight|bias)', new_key)
if m is not None and not reg_cls_agnostic:
print(f'truncate regression channels of {new_key}')
new_val = truncate_reg_channel(val, num_classes)
# mask head
m = re.search(r'(conv_logits).(weight|bias)', new_key)
if m is not None:
print(f'truncate mask prediction channels of {new_key}')
new_val = truncate_cls_channel(val, num_classes)
m = re.search(r'(cls_convs|reg_convs).\d.(weight|bias)', key)
# Legacy issues in RetinaNet since V1.x
# Use ConvModule instead of nn.Conv2d in RetinaNet
# cls_convs.0.weight -> cls_convs.0.conv.weight
if m is not None and upgrade_retina:
param = m.groups()[1]
new_key = key.replace(param, f'conv.{param}')
out_state_dict[new_key] = val
print(f'rename the name of {key} to {new_key}')
continue
m = re.search(r'(cls_convs).\d.(weight|bias)', key)
if m is not None and is_ssd:
print(f'reorder cls channels of {new_key}')
new_val = reorder_cls_channel(val, num_classes)
out_state_dict[new_key] = new_val
checkpoint['state_dict'] = out_state_dict
torch.save(checkpoint, out_file)
def main():
parser = argparse.ArgumentParser(description='Upgrade model version')
parser.add_argument('in_file', help='input checkpoint file')
parser.add_argument('out_file', help='output checkpoint file')
parser.add_argument(
'--num-classes',
type=int,
default=81,
help='number of classes of the original model')
args = parser.parse_args()
convert(args.in_file, args.out_file, args.num_classes)
if __name__ == '__main__':
main()
| 6,848 | 31.459716 | 79 | py |
DSLA-DSLA | DSLA-DSLA/tools/model_converters/upgrade_ssd_version.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import tempfile
from collections import OrderedDict
import torch
from mmcv import Config
def parse_config(config_strings):
temp_file = tempfile.NamedTemporaryFile()
config_path = f'{temp_file.name}.py'
with open(config_path, 'w') as f:
f.write(config_strings)
config = Config.fromfile(config_path)
# check whether it is SSD
if config.model.bbox_head.type != 'SSDHead':
raise AssertionError('This is not a SSD model.')
def convert(in_file, out_file):
checkpoint = torch.load(in_file)
in_state_dict = checkpoint.pop('state_dict')
out_state_dict = OrderedDict()
meta_info = checkpoint['meta']
parse_config('#' + meta_info['config'])
for key, value in in_state_dict.items():
if 'extra' in key:
layer_idx = int(key.split('.')[2])
new_key = 'neck.extra_layers.{}.{}.conv.'.format(
layer_idx // 2, layer_idx % 2) + key.split('.')[-1]
elif 'l2_norm' in key:
new_key = 'neck.l2_norm.weight'
elif 'bbox_head' in key:
new_key = key[:21] + '.0' + key[21:]
else:
new_key = key
out_state_dict[new_key] = value
checkpoint['state_dict'] = out_state_dict
if torch.__version__ >= '1.6':
torch.save(checkpoint, out_file, _use_new_zipfile_serialization=False)
else:
torch.save(checkpoint, out_file)
def main():
parser = argparse.ArgumentParser(description='Upgrade SSD version')
parser.add_argument('in_file', help='input checkpoint file')
parser.add_argument('out_file', help='output checkpoint file')
args = parser.parse_args()
convert(args.in_file, args.out_file)
if __name__ == '__main__':
main()
| 1,789 | 29.338983 | 78 | py |
DSLA-DSLA | DSLA-DSLA/tools/model_converters/detectron2pytorch.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
from collections import OrderedDict
import mmcv
import torch
arch_settings = {50: (3, 4, 6, 3), 101: (3, 4, 23, 3)}
def convert_bn(blobs, state_dict, caffe_name, torch_name, converted_names):
# detectron replace bn with affine channel layer
state_dict[torch_name + '.bias'] = torch.from_numpy(blobs[caffe_name +
'_b'])
state_dict[torch_name + '.weight'] = torch.from_numpy(blobs[caffe_name +
'_s'])
bn_size = state_dict[torch_name + '.weight'].size()
state_dict[torch_name + '.running_mean'] = torch.zeros(bn_size)
state_dict[torch_name + '.running_var'] = torch.ones(bn_size)
converted_names.add(caffe_name + '_b')
converted_names.add(caffe_name + '_s')
def convert_conv_fc(blobs, state_dict, caffe_name, torch_name,
converted_names):
state_dict[torch_name + '.weight'] = torch.from_numpy(blobs[caffe_name +
'_w'])
converted_names.add(caffe_name + '_w')
if caffe_name + '_b' in blobs:
state_dict[torch_name + '.bias'] = torch.from_numpy(blobs[caffe_name +
'_b'])
converted_names.add(caffe_name + '_b')
def convert(src, dst, depth):
"""Convert keys in detectron pretrained ResNet models to pytorch style."""
# load arch_settings
if depth not in arch_settings:
raise ValueError('Only support ResNet-50 and ResNet-101 currently')
block_nums = arch_settings[depth]
# load caffe model
caffe_model = mmcv.load(src, encoding='latin1')
blobs = caffe_model['blobs'] if 'blobs' in caffe_model else caffe_model
# convert to pytorch style
state_dict = OrderedDict()
converted_names = set()
convert_conv_fc(blobs, state_dict, 'conv1', 'conv1', converted_names)
convert_bn(blobs, state_dict, 'res_conv1_bn', 'bn1', converted_names)
for i in range(1, len(block_nums) + 1):
for j in range(block_nums[i - 1]):
if j == 0:
convert_conv_fc(blobs, state_dict, f'res{i + 1}_{j}_branch1',
f'layer{i}.{j}.downsample.0', converted_names)
convert_bn(blobs, state_dict, f'res{i + 1}_{j}_branch1_bn',
f'layer{i}.{j}.downsample.1', converted_names)
for k, letter in enumerate(['a', 'b', 'c']):
convert_conv_fc(blobs, state_dict,
f'res{i + 1}_{j}_branch2{letter}',
f'layer{i}.{j}.conv{k+1}', converted_names)
convert_bn(blobs, state_dict,
f'res{i + 1}_{j}_branch2{letter}_bn',
f'layer{i}.{j}.bn{k + 1}', converted_names)
# check if all layers are converted
for key in blobs:
if key not in converted_names:
print(f'Not Convert: {key}')
# save checkpoint
checkpoint = dict()
checkpoint['state_dict'] = state_dict
torch.save(checkpoint, dst)
def main():
parser = argparse.ArgumentParser(description='Convert model keys')
parser.add_argument('src', help='src detectron model path')
parser.add_argument('dst', help='save path')
parser.add_argument('depth', type=int, help='ResNet model depth')
args = parser.parse_args()
convert(args.src, args.dst, args.depth)
if __name__ == '__main__':
main()
| 3,578 | 41.607143 | 78 | py |
DSLA-DSLA | DSLA-DSLA/tools/analysis_tools/benchmark.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import copy
import os
import time
import torch
from mmcv import Config, DictAction
from mmcv.cnn import fuse_conv_bn
from mmcv.parallel import MMDistributedDataParallel
from mmcv.runner import init_dist, load_checkpoint, wrap_fp16_model
from mmdet.datasets import (build_dataloader, build_dataset,
replace_ImageToTensor)
from mmdet.models import build_detector
def parse_args():
parser = argparse.ArgumentParser(description='MMDet benchmark a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument(
'--repeat-num',
type=int,
default=1,
help='number of repeat times of measurement for averaging the results')
parser.add_argument(
'--max-iter', type=int, default=2000, help='num of max iter')
parser.add_argument(
'--log-interval', type=int, default=50, help='interval of logging')
parser.add_argument(
'--fuse-conv-bn',
action='store_true',
help='Whether to fuse conv and bn, this will slightly increase'
'the inference speed')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def measure_inference_speed(cfg, checkpoint, max_iter, log_interval,
is_fuse_conv_bn):
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
cfg.data.test.test_mode = True
# build the dataloader
samples_per_gpu = cfg.data.test.pop('samples_per_gpu', 1)
if samples_per_gpu > 1:
# Replace 'ImageToTensor' to 'DefaultFormatBundle'
cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline)
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(
dataset,
samples_per_gpu=1,
# Because multiple processes will occupy additional CPU resources,
# FPS statistics will be more unstable when workers_per_gpu is not 0.
# It is reasonable to set workers_per_gpu to 0.
workers_per_gpu=0,
dist=True,
shuffle=False)
# build the model and load checkpoint
cfg.model.train_cfg = None
model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg'))
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
wrap_fp16_model(model)
load_checkpoint(model, checkpoint, map_location='cpu')
if is_fuse_conv_bn:
model = fuse_conv_bn(model)
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False)
model.eval()
# the first several iterations may be very slow so skip them
num_warmup = 5
pure_inf_time = 0
fps = 0
# benchmark with 2000 image and take the average
for i, data in enumerate(data_loader):
torch.cuda.synchronize()
start_time = time.perf_counter()
with torch.no_grad():
model(return_loss=False, rescale=True, **data)
torch.cuda.synchronize()
elapsed = time.perf_counter() - start_time
if i >= num_warmup:
pure_inf_time += elapsed
if (i + 1) % log_interval == 0:
fps = (i + 1 - num_warmup) / pure_inf_time
print(
f'Done image [{i + 1:<3}/ {max_iter}], '
f'fps: {fps:.1f} img / s, '
f'times per image: {1000 / fps:.1f} ms / img',
flush=True)
if (i + 1) == max_iter:
fps = (i + 1 - num_warmup) / pure_inf_time
print(
f'Overall fps: {fps:.1f} img / s, '
f'times per image: {1000 / fps:.1f} ms / img',
flush=True)
break
return fps
def repeat_measure_inference_speed(cfg,
checkpoint,
max_iter,
log_interval,
is_fuse_conv_bn,
repeat_num=1):
assert repeat_num >= 1
fps_list = []
for _ in range(repeat_num):
#
cp_cfg = copy.deepcopy(cfg)
fps_list.append(
measure_inference_speed(cp_cfg, checkpoint, max_iter, log_interval,
is_fuse_conv_bn))
if repeat_num > 1:
fps_list_ = [round(fps, 1) for fps in fps_list]
times_pre_image_list_ = [round(1000 / fps, 1) for fps in fps_list]
mean_fps_ = sum(fps_list_) / len(fps_list_)
mean_times_pre_image_ = sum(times_pre_image_list_) / len(
times_pre_image_list_)
print(
f'Overall fps: {fps_list_}[{mean_fps_:.1f}] img / s, '
f'times per image: '
f'{times_pre_image_list_}[{mean_times_pre_image_:.1f}] ms / img',
flush=True)
return fps_list
return fps_list[0]
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
if args.launcher == 'none':
raise NotImplementedError('Only supports distributed mode')
else:
init_dist(args.launcher, **cfg.dist_params)
repeat_measure_inference_speed(cfg, args.checkpoint, args.max_iter,
args.log_interval, args.fuse_conv_bn,
args.repeat_num)
if __name__ == '__main__':
main()
| 6,416 | 33.132979 | 79 | py |
DSLA-DSLA | DSLA-DSLA/tools/analysis_tools/optimize_anchors.py | # Copyright (c) OpenMMLab. All rights reserved.
"""Optimize anchor settings on a specific dataset.
This script provides two method to optimize YOLO anchors including k-means
anchor cluster and differential evolution. You can use ``--algorithm k-means``
and ``--algorithm differential_evolution`` to switch two method.
Example:
Use k-means anchor cluster::
python tools/analysis_tools/optimize_anchors.py ${CONFIG} \
--algorithm k-means --input-shape ${INPUT_SHAPE [WIDTH HEIGHT]} \
--output-dir ${OUTPUT_DIR}
Use differential evolution to optimize anchors::
python tools/analysis_tools/optimize_anchors.py ${CONFIG} \
--algorithm differential_evolution \
--input-shape ${INPUT_SHAPE [WIDTH HEIGHT]} \
--output-dir ${OUTPUT_DIR}
"""
import argparse
import os.path as osp
import mmcv
import numpy as np
import torch
from mmcv import Config
from scipy.optimize import differential_evolution
from mmdet.core import bbox_cxcywh_to_xyxy, bbox_overlaps, bbox_xyxy_to_cxcywh
from mmdet.datasets import build_dataset
from mmdet.utils import get_root_logger
def parse_args():
parser = argparse.ArgumentParser(description='Optimize anchor parameters.')
parser.add_argument('config', help='Train config file path.')
parser.add_argument(
'--device', default='cuda:0', help='Device used for calculating.')
parser.add_argument(
'--input-shape',
type=int,
nargs='+',
default=[608, 608],
help='input image size')
parser.add_argument(
'--algorithm',
default='differential_evolution',
help='Algorithm used for anchor optimizing.'
'Support k-means and differential_evolution for YOLO.')
parser.add_argument(
'--iters',
default=1000,
type=int,
help='Maximum iterations for optimizer.')
parser.add_argument(
'--output-dir',
default=None,
type=str,
help='Path to save anchor optimize result.')
args = parser.parse_args()
return args
class BaseAnchorOptimizer:
"""Base class for anchor optimizer.
Args:
dataset (obj:`Dataset`): Dataset object.
input_shape (list[int]): Input image shape of the model.
Format in [width, height].
logger (obj:`logging.Logger`): The logger for logging.
device (str, optional): Device used for calculating.
Default: 'cuda:0'
out_dir (str, optional): Path to save anchor optimize result.
Default: None
"""
def __init__(self,
dataset,
input_shape,
logger,
device='cuda:0',
out_dir=None):
self.dataset = dataset
self.input_shape = input_shape
self.logger = logger
self.device = device
self.out_dir = out_dir
bbox_whs, img_shapes = self.get_whs_and_shapes()
ratios = img_shapes.max(1, keepdims=True) / np.array([input_shape])
# resize to input shape
self.bbox_whs = bbox_whs / ratios
def get_whs_and_shapes(self):
"""Get widths and heights of bboxes and shapes of images.
Returns:
tuple[np.ndarray]: Array of bbox shapes and array of image
shapes with shape (num_bboxes, 2) in [width, height] format.
"""
self.logger.info('Collecting bboxes from annotation...')
bbox_whs = []
img_shapes = []
prog_bar = mmcv.ProgressBar(len(self.dataset))
for idx in range(len(self.dataset)):
ann = self.dataset.get_ann_info(idx)
data_info = self.dataset.data_infos[idx]
img_shape = np.array([data_info['width'], data_info['height']])
gt_bboxes = ann['bboxes']
for bbox in gt_bboxes:
wh = bbox[2:4] - bbox[0:2]
img_shapes.append(img_shape)
bbox_whs.append(wh)
prog_bar.update()
print('\n')
bbox_whs = np.array(bbox_whs)
img_shapes = np.array(img_shapes)
self.logger.info(f'Collected {bbox_whs.shape[0]} bboxes.')
return bbox_whs, img_shapes
def get_zero_center_bbox_tensor(self):
"""Get a tensor of bboxes centered at (0, 0).
Returns:
Tensor: Tensor of bboxes with shape (num_bboxes, 4)
in [xmin, ymin, xmax, ymax] format.
"""
whs = torch.from_numpy(self.bbox_whs).to(
self.device, dtype=torch.float32)
bboxes = bbox_cxcywh_to_xyxy(
torch.cat([torch.zeros_like(whs), whs], dim=1))
return bboxes
def optimize(self):
raise NotImplementedError
def save_result(self, anchors, path=None):
anchor_results = []
for w, h in anchors:
anchor_results.append([round(w), round(h)])
self.logger.info(f'Anchor optimize result:{anchor_results}')
if path:
json_path = osp.join(path, 'anchor_optimize_result.json')
mmcv.dump(anchor_results, json_path)
self.logger.info(f'Result saved in {json_path}')
class YOLOKMeansAnchorOptimizer(BaseAnchorOptimizer):
r"""YOLO anchor optimizer using k-means. Code refer to `AlexeyAB/darknet.
<https://github.com/AlexeyAB/darknet/blob/master/src/detector.c>`_.
Args:
num_anchors (int) : Number of anchors.
iters (int): Maximum iterations for k-means.
"""
def __init__(self, num_anchors, iters, **kwargs):
super(YOLOKMeansAnchorOptimizer, self).__init__(**kwargs)
self.num_anchors = num_anchors
self.iters = iters
def optimize(self):
anchors = self.kmeans_anchors()
self.save_result(anchors, self.out_dir)
def kmeans_anchors(self):
self.logger.info(
f'Start cluster {self.num_anchors} YOLO anchors with K-means...')
bboxes = self.get_zero_center_bbox_tensor()
cluster_center_idx = torch.randint(
0, bboxes.shape[0], (self.num_anchors, )).to(self.device)
assignments = torch.zeros((bboxes.shape[0], )).to(self.device)
cluster_centers = bboxes[cluster_center_idx]
if self.num_anchors == 1:
cluster_centers = self.kmeans_maximization(bboxes, assignments,
cluster_centers)
anchors = bbox_xyxy_to_cxcywh(cluster_centers)[:, 2:].cpu().numpy()
anchors = sorted(anchors, key=lambda x: x[0] * x[1])
return anchors
prog_bar = mmcv.ProgressBar(self.iters)
for i in range(self.iters):
converged, assignments = self.kmeans_expectation(
bboxes, assignments, cluster_centers)
if converged:
self.logger.info(f'K-means process has converged at iter {i}.')
break
cluster_centers = self.kmeans_maximization(bboxes, assignments,
cluster_centers)
prog_bar.update()
print('\n')
avg_iou = bbox_overlaps(bboxes,
cluster_centers).max(1)[0].mean().item()
anchors = bbox_xyxy_to_cxcywh(cluster_centers)[:, 2:].cpu().numpy()
anchors = sorted(anchors, key=lambda x: x[0] * x[1])
self.logger.info(f'Anchor cluster finish. Average IOU: {avg_iou}')
return anchors
def kmeans_maximization(self, bboxes, assignments, centers):
"""Maximization part of EM algorithm(Expectation-Maximization)"""
new_centers = torch.zeros_like(centers)
for i in range(centers.shape[0]):
mask = (assignments == i)
if mask.sum():
new_centers[i, :] = bboxes[mask].mean(0)
return new_centers
def kmeans_expectation(self, bboxes, assignments, centers):
"""Expectation part of EM algorithm(Expectation-Maximization)"""
ious = bbox_overlaps(bboxes, centers)
closest = ious.argmax(1)
converged = (closest == assignments).all()
return converged, closest
class YOLODEAnchorOptimizer(BaseAnchorOptimizer):
"""YOLO anchor optimizer using differential evolution algorithm.
Args:
num_anchors (int) : Number of anchors.
iters (int): Maximum iterations for k-means.
strategy (str): The differential evolution strategy to use.
Should be one of:
- 'best1bin'
- 'best1exp'
- 'rand1exp'
- 'randtobest1exp'
- 'currenttobest1exp'
- 'best2exp'
- 'rand2exp'
- 'randtobest1bin'
- 'currenttobest1bin'
- 'best2bin'
- 'rand2bin'
- 'rand1bin'
Default: 'best1bin'.
population_size (int): Total population size of evolution algorithm.
Default: 15.
convergence_thr (float): Tolerance for convergence, the
optimizing stops when ``np.std(pop) <= abs(convergence_thr)
+ convergence_thr * np.abs(np.mean(population_energies))``,
respectively. Default: 0.0001.
mutation (tuple[float]): Range of dithering randomly changes the
mutation constant. Default: (0.5, 1).
recombination (float): Recombination constant of crossover probability.
Default: 0.7.
"""
def __init__(self,
num_anchors,
iters,
strategy='best1bin',
population_size=15,
convergence_thr=0.0001,
mutation=(0.5, 1),
recombination=0.7,
**kwargs):
super(YOLODEAnchorOptimizer, self).__init__(**kwargs)
self.num_anchors = num_anchors
self.iters = iters
self.strategy = strategy
self.population_size = population_size
self.convergence_thr = convergence_thr
self.mutation = mutation
self.recombination = recombination
def optimize(self):
anchors = self.differential_evolution()
self.save_result(anchors, self.out_dir)
def differential_evolution(self):
bboxes = self.get_zero_center_bbox_tensor()
bounds = []
for i in range(self.num_anchors):
bounds.extend([(0, self.input_shape[0]), (0, self.input_shape[1])])
result = differential_evolution(
func=self.avg_iou_cost,
bounds=bounds,
args=(bboxes, ),
strategy=self.strategy,
maxiter=self.iters,
popsize=self.population_size,
tol=self.convergence_thr,
mutation=self.mutation,
recombination=self.recombination,
updating='immediate',
disp=True)
self.logger.info(
f'Anchor evolution finish. Average IOU: {1 - result.fun}')
anchors = [(w, h) for w, h in zip(result.x[::2], result.x[1::2])]
anchors = sorted(anchors, key=lambda x: x[0] * x[1])
return anchors
@staticmethod
def avg_iou_cost(anchor_params, bboxes):
assert len(anchor_params) % 2 == 0
anchor_whs = torch.tensor(
[[w, h]
for w, h in zip(anchor_params[::2], anchor_params[1::2])]).to(
bboxes.device, dtype=bboxes.dtype)
anchor_boxes = bbox_cxcywh_to_xyxy(
torch.cat([torch.zeros_like(anchor_whs), anchor_whs], dim=1))
ious = bbox_overlaps(bboxes, anchor_boxes)
max_ious, _ = ious.max(1)
cost = 1 - max_ious.mean().item()
return cost
def main():
logger = get_root_logger()
args = parse_args()
cfg = args.config
cfg = Config.fromfile(cfg)
input_shape = args.input_shape
assert len(input_shape) == 2
anchor_type = cfg.model.bbox_head.anchor_generator.type
assert anchor_type == 'YOLOAnchorGenerator', \
f'Only support optimize YOLOAnchor, but get {anchor_type}.'
base_sizes = cfg.model.bbox_head.anchor_generator.base_sizes
num_anchors = sum([len(sizes) for sizes in base_sizes])
train_data_cfg = cfg.data.train
while 'dataset' in train_data_cfg:
train_data_cfg = train_data_cfg['dataset']
dataset = build_dataset(train_data_cfg)
if args.algorithm == 'k-means':
optimizer = YOLOKMeansAnchorOptimizer(
dataset=dataset,
input_shape=input_shape,
device=args.device,
num_anchors=num_anchors,
iters=args.iters,
logger=logger,
out_dir=args.output_dir)
elif args.algorithm == 'differential_evolution':
optimizer = YOLODEAnchorOptimizer(
dataset=dataset,
input_shape=input_shape,
device=args.device,
num_anchors=num_anchors,
iters=args.iters,
logger=logger,
out_dir=args.output_dir)
else:
raise NotImplementedError(
f'Only support k-means and differential_evolution, '
f'but get {args.algorithm}')
optimizer.optimize()
if __name__ == '__main__':
main()
| 13,161 | 34.477089 | 79 | py |
DSLA-DSLA | DSLA-DSLA/tools/analysis_tools/get_flops.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import numpy as np
import torch
from mmcv import Config, DictAction
from mmdet.models import build_detector
try:
from mmcv.cnn import get_model_complexity_info
except ImportError:
raise ImportError('Please upgrade mmcv to >0.6.2')
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--shape',
type=int,
nargs='+',
default=[1280, 800],
help='input image size')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--size-divisor',
type=int,
default=32,
help='Pad the input image, the minimum size that is divisible '
'by size_divisor, -1 means do not pad the image.')
args = parser.parse_args()
return args
def main():
args = parse_args()
if len(args.shape) == 1:
h = w = args.shape[0]
elif len(args.shape) == 2:
h, w = args.shape
else:
raise ValueError('invalid input shape')
orig_shape = (3, h, w)
divisor = args.size_divisor
if divisor > 0:
h = int(np.ceil(h / divisor)) * divisor
w = int(np.ceil(w / divisor)) * divisor
input_shape = (3, h, w)
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
model = build_detector(
cfg.model,
train_cfg=cfg.get('train_cfg'),
test_cfg=cfg.get('test_cfg'))
if torch.cuda.is_available():
model.cuda()
model.eval()
if hasattr(model, 'forward_dummy'):
model.forward = model.forward_dummy
else:
raise NotImplementedError(
'FLOPs counter is currently not currently supported with {}'.
format(model.__class__.__name__))
flops, params = get_model_complexity_info(model, input_shape)
split_line = '=' * 30
if divisor > 0 and \
input_shape != orig_shape:
print(f'{split_line}\nUse size divisor set input shape '
f'from {orig_shape} to {input_shape}\n')
print(f'{split_line}\nInput shape: {input_shape}\n'
f'Flops: {flops}\nParams: {params}\n{split_line}')
print('!!!Please be cautious if you use the results in papers. '
'You may need to check if all ops are supported and verify that the '
'flops computation is correct.')
if __name__ == '__main__':
main()
| 2,995 | 29.571429 | 79 | py |
DSLA-DSLA | DSLA-DSLA/tools/analysis_tools/test_robustness.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import copy
import os
import os.path as osp
import mmcv
import torch
from mmcv import DictAction
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import (get_dist_info, init_dist, load_checkpoint,
wrap_fp16_model)
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from tools.analysis_tools.robustness_eval import get_results
from mmdet import datasets
from mmdet.apis import multi_gpu_test, set_random_seed, single_gpu_test
from mmdet.core import eval_map
from mmdet.datasets import build_dataloader, build_dataset
from mmdet.models import build_detector
def coco_eval_with_return(result_files,
result_types,
coco,
max_dets=(100, 300, 1000)):
for res_type in result_types:
assert res_type in ['proposal', 'bbox', 'segm', 'keypoints']
if mmcv.is_str(coco):
coco = COCO(coco)
assert isinstance(coco, COCO)
eval_results = {}
for res_type in result_types:
result_file = result_files[res_type]
assert result_file.endswith('.json')
coco_dets = coco.loadRes(result_file)
img_ids = coco.getImgIds()
iou_type = 'bbox' if res_type == 'proposal' else res_type
cocoEval = COCOeval(coco, coco_dets, iou_type)
cocoEval.params.imgIds = img_ids
if res_type == 'proposal':
cocoEval.params.useCats = 0
cocoEval.params.maxDets = list(max_dets)
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
if res_type == 'segm' or res_type == 'bbox':
metric_names = [
'AP', 'AP50', 'AP75', 'APs', 'APm', 'APl', 'AR1', 'AR10',
'AR100', 'ARs', 'ARm', 'ARl'
]
eval_results[res_type] = {
metric_names[i]: cocoEval.stats[i]
for i in range(len(metric_names))
}
else:
eval_results[res_type] = cocoEval.stats
return eval_results
def voc_eval_with_return(result_file,
dataset,
iou_thr=0.5,
logger='print',
only_ap=True):
det_results = mmcv.load(result_file)
annotations = [dataset.get_ann_info(i) for i in range(len(dataset))]
if hasattr(dataset, 'year') and dataset.year == 2007:
dataset_name = 'voc07'
else:
dataset_name = dataset.CLASSES
mean_ap, eval_results = eval_map(
det_results,
annotations,
scale_ranges=None,
iou_thr=iou_thr,
dataset=dataset_name,
logger=logger)
if only_ap:
eval_results = [{
'ap': eval_results[i]['ap']
} for i in range(len(eval_results))]
return mean_ap, eval_results
def parse_args():
parser = argparse.ArgumentParser(description='MMDet test detector')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--out', help='output result file')
parser.add_argument(
'--corruptions',
type=str,
nargs='+',
default='benchmark',
choices=[
'all', 'benchmark', 'noise', 'blur', 'weather', 'digital',
'holdout', 'None', 'gaussian_noise', 'shot_noise', 'impulse_noise',
'defocus_blur', 'glass_blur', 'motion_blur', 'zoom_blur', 'snow',
'frost', 'fog', 'brightness', 'contrast', 'elastic_transform',
'pixelate', 'jpeg_compression', 'speckle_noise', 'gaussian_blur',
'spatter', 'saturate'
],
help='corruptions')
parser.add_argument(
'--severities',
type=int,
nargs='+',
default=[0, 1, 2, 3, 4, 5],
help='corruption severity levels')
parser.add_argument(
'--eval',
type=str,
nargs='+',
choices=['proposal', 'proposal_fast', 'bbox', 'segm', 'keypoints'],
help='eval types')
parser.add_argument(
'--iou-thr',
type=float,
default=0.5,
help='IoU threshold for pascal voc evaluation')
parser.add_argument(
'--summaries',
type=bool,
default=False,
help='Print summaries for every corruption and severity')
parser.add_argument(
'--workers', type=int, default=32, help='workers per gpu')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument(
'--show-dir', help='directory where painted images will be saved')
parser.add_argument(
'--show-score-thr',
type=float,
default=0.3,
help='score threshold (default: 0.3)')
parser.add_argument('--tmpdir', help='tmp dir for writing some results')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument(
'--final-prints',
type=str,
nargs='+',
choices=['P', 'mPC', 'rPC'],
default='mPC',
help='corruption benchmark metric to print at the end')
parser.add_argument(
'--final-prints-aggregate',
type=str,
choices=['all', 'benchmark'],
default='benchmark',
help='aggregate all results or only those for benchmark corruptions')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
assert args.out or args.show or args.show_dir, \
('Please specify at least one operation (save or show the results) '
'with the argument "--out", "--show" or "show-dir"')
if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
raise ValueError('The output file must be a pkl file.')
cfg = mmcv.Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
cfg.data.test.test_mode = True
if args.workers == 0:
args.workers = cfg.data.workers_per_gpu
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# set random seeds
if args.seed is not None:
set_random_seed(args.seed)
if 'all' in args.corruptions:
corruptions = [
'gaussian_noise', 'shot_noise', 'impulse_noise', 'defocus_blur',
'glass_blur', 'motion_blur', 'zoom_blur', 'snow', 'frost', 'fog',
'brightness', 'contrast', 'elastic_transform', 'pixelate',
'jpeg_compression', 'speckle_noise', 'gaussian_blur', 'spatter',
'saturate'
]
elif 'benchmark' in args.corruptions:
corruptions = [
'gaussian_noise', 'shot_noise', 'impulse_noise', 'defocus_blur',
'glass_blur', 'motion_blur', 'zoom_blur', 'snow', 'frost', 'fog',
'brightness', 'contrast', 'elastic_transform', 'pixelate',
'jpeg_compression'
]
elif 'noise' in args.corruptions:
corruptions = ['gaussian_noise', 'shot_noise', 'impulse_noise']
elif 'blur' in args.corruptions:
corruptions = [
'defocus_blur', 'glass_blur', 'motion_blur', 'zoom_blur'
]
elif 'weather' in args.corruptions:
corruptions = ['snow', 'frost', 'fog', 'brightness']
elif 'digital' in args.corruptions:
corruptions = [
'contrast', 'elastic_transform', 'pixelate', 'jpeg_compression'
]
elif 'holdout' in args.corruptions:
corruptions = ['speckle_noise', 'gaussian_blur', 'spatter', 'saturate']
elif 'None' in args.corruptions:
corruptions = ['None']
args.severities = [0]
else:
corruptions = args.corruptions
rank, _ = get_dist_info()
aggregated_results = {}
for corr_i, corruption in enumerate(corruptions):
aggregated_results[corruption] = {}
for sev_i, corruption_severity in enumerate(args.severities):
# evaluate severity 0 (= no corruption) only once
if corr_i > 0 and corruption_severity == 0:
aggregated_results[corruption][0] = \
aggregated_results[corruptions[0]][0]
continue
test_data_cfg = copy.deepcopy(cfg.data.test)
# assign corruption and severity
if corruption_severity > 0:
corruption_trans = dict(
type='Corrupt',
corruption=corruption,
severity=corruption_severity)
# TODO: hard coded "1", we assume that the first step is
# loading images, which needs to be fixed in the future
test_data_cfg['pipeline'].insert(1, corruption_trans)
# print info
print(f'\nTesting {corruption} at severity {corruption_severity}')
# build the dataloader
# TODO: support multiple images per gpu
# (only minor changes are needed)
dataset = build_dataset(test_data_cfg)
data_loader = build_dataloader(
dataset,
samples_per_gpu=1,
workers_per_gpu=args.workers,
dist=distributed,
shuffle=False)
# build the model and load checkpoint
cfg.model.train_cfg = None
model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg'))
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
wrap_fp16_model(model)
checkpoint = load_checkpoint(
model, args.checkpoint, map_location='cpu')
# old versions did not save class info in checkpoints,
# this walkaround is for backward compatibility
if 'CLASSES' in checkpoint.get('meta', {}):
model.CLASSES = checkpoint['meta']['CLASSES']
else:
model.CLASSES = dataset.CLASSES
if not distributed:
model = MMDataParallel(model, device_ids=[0])
show_dir = args.show_dir
if show_dir is not None:
show_dir = osp.join(show_dir, corruption)
show_dir = osp.join(show_dir, str(corruption_severity))
if not osp.exists(show_dir):
osp.makedirs(show_dir)
outputs = single_gpu_test(model, data_loader, args.show,
show_dir, args.show_score_thr)
else:
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False)
outputs = multi_gpu_test(model, data_loader, args.tmpdir)
if args.out and rank == 0:
eval_results_filename = (
osp.splitext(args.out)[0] + '_results' +
osp.splitext(args.out)[1])
mmcv.dump(outputs, args.out)
eval_types = args.eval
if cfg.dataset_type == 'VOCDataset':
if eval_types:
for eval_type in eval_types:
if eval_type == 'bbox':
test_dataset = mmcv.runner.obj_from_dict(
cfg.data.test, datasets)
logger = 'print' if args.summaries else None
mean_ap, eval_results = \
voc_eval_with_return(
args.out, test_dataset,
args.iou_thr, logger)
aggregated_results[corruption][
corruption_severity] = eval_results
else:
print('\nOnly "bbox" evaluation \
is supported for pascal voc')
else:
if eval_types:
print(f'Starting evaluate {" and ".join(eval_types)}')
if eval_types == ['proposal_fast']:
result_file = args.out
else:
if not isinstance(outputs[0], dict):
result_files = dataset.results2json(
outputs, args.out)
else:
for name in outputs[0]:
print(f'\nEvaluating {name}')
outputs_ = [out[name] for out in outputs]
result_file = args.out
+ f'.{name}'
result_files = dataset.results2json(
outputs_, result_file)
eval_results = coco_eval_with_return(
result_files, eval_types, dataset.coco)
aggregated_results[corruption][
corruption_severity] = eval_results
else:
print('\nNo task was selected for evaluation;'
'\nUse --eval to select a task')
# save results after each evaluation
mmcv.dump(aggregated_results, eval_results_filename)
if rank == 0:
# print final results
print('\nAggregated results:')
prints = args.final_prints
aggregate = args.final_prints_aggregate
if cfg.dataset_type == 'VOCDataset':
get_results(
eval_results_filename,
dataset='voc',
prints=prints,
aggregate=aggregate)
else:
get_results(
eval_results_filename,
dataset='coco',
prints=prints,
aggregate=aggregate)
if __name__ == '__main__':
main()
| 15,222 | 38.234536 | 79 | py |
DSLA-DSLA | DSLA-DSLA/.dev_scripts/benchmark_filter.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
def parse_args():
parser = argparse.ArgumentParser(description='Filter configs to train')
parser.add_argument(
'--basic-arch',
action='store_true',
help='to train models in basic arch')
parser.add_argument(
'--datasets', action='store_true', help='to train models in dataset')
parser.add_argument(
'--data-pipeline',
action='store_true',
help='to train models related to data pipeline, e.g. augmentations')
parser.add_argument(
'--nn-module',
action='store_true',
help='to train models related to neural network modules')
parser.add_argument(
'--model-options',
nargs='+',
help='custom options to special model benchmark')
parser.add_argument(
'--out',
type=str,
default='batch_train_list.txt',
help='output path of gathered metrics to be stored')
args = parser.parse_args()
return args
basic_arch_root = [
'atss', 'autoassign', 'cascade_rcnn', 'cascade_rpn', 'centripetalnet',
'cornernet', 'detectors', 'deformable_detr', 'detr', 'double_heads',
'dynamic_rcnn', 'faster_rcnn', 'fcos', 'foveabox', 'fp16', 'free_anchor',
'fsaf', 'gfl', 'ghm', 'grid_rcnn', 'guided_anchoring', 'htc', 'ld',
'libra_rcnn', 'mask_rcnn', 'ms_rcnn', 'nas_fcos', 'paa', 'pisa',
'point_rend', 'reppoints', 'retinanet', 'rpn', 'sabl', 'ssd', 'tridentnet',
'vfnet', 'yolact', 'yolo', 'sparse_rcnn', 'scnet', 'yolof', 'centernet'
]
datasets_root = [
'wider_face', 'pascal_voc', 'cityscapes', 'lvis', 'deepfashion'
]
data_pipeline_root = ['albu_example', 'instaboost']
nn_module_root = [
'carafe', 'dcn', 'empirical_attention', 'gcnet', 'gn', 'gn+ws', 'hrnet',
'pafpn', 'nas_fpn', 'regnet', 'resnest', 'res2net', 'groie'
]
benchmark_pool = [
'configs/albu_example/mask_rcnn_r50_fpn_albu_1x_coco.py',
'configs/atss/atss_r50_fpn_1x_coco.py',
'configs/autoassign/autoassign_r50_fpn_8x2_1x_coco.py',
'configs/carafe/mask_rcnn_r50_fpn_carafe_1x_coco.py',
'configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py',
'configs/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco.py',
'configs/centernet/centernet_resnet18_dcnv2_140e_coco.py',
'configs/centripetalnet/'
'centripetalnet_hourglass104_mstest_16x6_210e_coco.py',
'configs/cityscapes/mask_rcnn_r50_fpn_1x_cityscapes.py',
'configs/cornernet/'
'cornernet_hourglass104_mstest_8x6_210e_coco.py',
'configs/dcn/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py',
'configs/dcn/faster_rcnn_r50_fpn_dpool_1x_coco.py',
'configs/dcn/faster_rcnn_r50_fpn_mdpool_1x_coco.py',
'configs/dcn/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py',
'configs/deformable_detr/deformable_detr_r50_16x2_50e_coco.py',
'configs/detectors/detectors_htc_r50_1x_coco.py',
'configs/detr/detr_r50_8x2_150e_coco.py',
'configs/double_heads/dh_faster_rcnn_r50_fpn_1x_coco.py',
'configs/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x_coco.py',
'configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco.py', # noqa
'configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py',
'configs/faster_rcnn/faster_rcnn_r50_fpn_ohem_1x_coco.py',
'configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py',
'configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py',
'configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco.py',
'configs/fcos/fcos_center_r50_caffe_fpn_gn-head_4x4_1x_coco.py',
'configs/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco.py',
'configs/fp16/retinanet_r50_fpn_fp16_1x_coco.py',
'configs/mask_rcnn/mask_rcnn_r50_fpn_fp16_1x_coco.py',
'configs/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco.py',
'configs/fsaf/fsaf_r50_fpn_1x_coco.py',
'configs/gcnet/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco.py',
'configs/gfl/gfl_r50_fpn_1x_coco.py',
'configs/ghm/retinanet_ghm_r50_fpn_1x_coco.py',
'configs/gn/mask_rcnn_r50_fpn_gn-all_2x_coco.py',
'configs/gn+ws/mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py',
'configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco.py',
'configs/groie/faster_rcnn_r50_fpn_groie_1x_coco.py',
'configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py',
'configs/hrnet/mask_rcnn_hrnetv2p_w18_1x_coco.py',
'configs/htc/htc_r50_fpn_1x_coco.py',
'configs/instaboost/mask_rcnn_r50_fpn_instaboost_4x_coco.py',
'configs/ld/ld_r18_gflv1_r101_fpn_coco_1x.py',
'configs/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco.py',
'configs/lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1.py',
'configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py',
'configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco.py',
'configs/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py',
'configs/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py',
'configs/paa/paa_r50_fpn_1x_coco.py',
'configs/pafpn/faster_rcnn_r50_pafpn_1x_coco.py',
'configs/pisa/pisa_mask_rcnn_r50_fpn_1x_coco.py',
'configs/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py',
'configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py',
'configs/reppoints/reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py',
'configs/res2net/faster_rcnn_r2_101_fpn_2x_coco.py',
'configs/resnest/'
'mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py',
'configs/retinanet/retinanet_r50_caffe_fpn_1x_coco.py',
'configs/rpn/rpn_r50_fpn_1x_coco.py',
'configs/sabl/sabl_retinanet_r50_fpn_1x_coco.py',
'configs/ssd/ssd300_coco.py',
'configs/tridentnet/tridentnet_r50_caffe_1x_coco.py',
'configs/vfnet/vfnet_r50_fpn_1x_coco.py',
'configs/yolact/yolact_r50_1x8_coco.py',
'configs/yolo/yolov3_d53_320_273e_coco.py',
'configs/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py',
'configs/scnet/scnet_r50_fpn_1x_coco.py',
'configs/yolof/yolof_r50_c5_8x8_1x_coco.py',
]
def main():
args = parse_args()
benchmark_type = []
if args.basic_arch:
benchmark_type += basic_arch_root
if args.datasets:
benchmark_type += datasets_root
if args.data_pipeline:
benchmark_type += data_pipeline_root
if args.nn_module:
benchmark_type += nn_module_root
special_model = args.model_options
if special_model is not None:
benchmark_type += special_model
config_dpath = 'configs/'
benchmark_configs = []
for cfg_root in benchmark_type:
cfg_dir = osp.join(config_dpath, cfg_root)
configs = os.scandir(cfg_dir)
for cfg in configs:
config_path = osp.join(cfg_dir, cfg.name)
if (config_path in benchmark_pool
and config_path not in benchmark_configs):
benchmark_configs.append(config_path)
print(f'Totally found {len(benchmark_configs)} configs to benchmark')
with open(args.out, 'w') as f:
for config in benchmark_configs:
f.write(config + '\n')
if __name__ == '__main__':
main()
| 7,101 | 41.27381 | 92 | py |
DSLA-DSLA | DSLA-DSLA/.dev_scripts/gather_models.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import glob
import json
import os.path as osp
import shutil
import subprocess
from collections import OrderedDict
import mmcv
import torch
import yaml
def ordered_yaml_dump(data, stream=None, Dumper=yaml.SafeDumper, **kwds):
class OrderedDumper(Dumper):
pass
def _dict_representer(dumper, data):
return dumper.represent_mapping(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, data.items())
OrderedDumper.add_representer(OrderedDict, _dict_representer)
return yaml.dump(data, stream, OrderedDumper, **kwds)
def process_checkpoint(in_file, out_file):
checkpoint = torch.load(in_file, map_location='cpu')
# remove optimizer for smaller file size
if 'optimizer' in checkpoint:
del checkpoint['optimizer']
# remove ema state_dict
for key in list(checkpoint['state_dict']):
if key.startswith('ema_'):
checkpoint['state_dict'].pop(key)
# if it is necessary to remove some sensitive data in checkpoint['meta'],
# add the code here.
if torch.__version__ >= '1.6':
torch.save(checkpoint, out_file, _use_new_zipfile_serialization=False)
else:
torch.save(checkpoint, out_file)
sha = subprocess.check_output(['sha256sum', out_file]).decode()
final_file = out_file.rstrip('.pth') + '-{}.pth'.format(sha[:8])
subprocess.Popen(['mv', out_file, final_file])
return final_file
def get_final_epoch(config):
cfg = mmcv.Config.fromfile('./configs/' + config)
return cfg.runner.max_epochs
def get_best_epoch(exp_dir):
best_epoch_full_path = list(
sorted(glob.glob(osp.join(exp_dir, 'best_*.pth'))))[-1]
best_epoch_model_path = best_epoch_full_path.split('/')[-1]
best_epoch = best_epoch_model_path.split('_')[-1].split('.')[0]
return best_epoch_model_path, int(best_epoch)
def get_real_epoch(config):
cfg = mmcv.Config.fromfile('./configs/' + config)
epoch = cfg.runner.max_epochs
if cfg.data.train.type == 'RepeatDataset':
epoch *= cfg.data.train.times
return epoch
def get_final_results(log_json_path, epoch, results_lut):
result_dict = dict()
with open(log_json_path, 'r') as f:
for line in f.readlines():
log_line = json.loads(line)
if 'mode' not in log_line.keys():
continue
if log_line['mode'] == 'train' and log_line['epoch'] == epoch:
result_dict['memory'] = log_line['memory']
if log_line['mode'] == 'val' and log_line['epoch'] == epoch:
result_dict.update({
key: log_line[key]
for key in results_lut if key in log_line
})
return result_dict
def get_dataset_name(config):
# If there are more dataset, add here.
name_map = dict(
CityscapesDataset='Cityscapes',
CocoDataset='COCO',
CocoPanopticDataset='COCO',
DeepFashionDataset='Deep Fashion',
LVISV05Dataset='LVIS v0.5',
LVISV1Dataset='LVIS v1',
VOCDataset='Pascal VOC',
WIDERFaceDataset='WIDER Face')
cfg = mmcv.Config.fromfile('./configs/' + config)
return name_map[cfg.dataset_type]
def convert_model_info_to_pwc(model_infos):
pwc_files = {}
for model in model_infos:
cfg_folder_name = osp.split(model['config'])[-2]
pwc_model_info = OrderedDict()
pwc_model_info['Name'] = osp.split(model['config'])[-1].split('.')[0]
pwc_model_info['In Collection'] = 'Please fill in Collection name'
pwc_model_info['Config'] = osp.join('configs', model['config'])
# get metadata
memory = round(model['results']['memory'] / 1024, 1)
epochs = get_real_epoch(model['config'])
meta_data = OrderedDict()
meta_data['Training Memory (GB)'] = memory
meta_data['Epochs'] = epochs
pwc_model_info['Metadata'] = meta_data
# get dataset name
dataset_name = get_dataset_name(model['config'])
# get results
results = []
# if there are more metrics, add here.
if 'bbox_mAP' in model['results']:
metric = round(model['results']['bbox_mAP'] * 100, 1)
results.append(
OrderedDict(
Task='Object Detection',
Dataset=dataset_name,
Metrics={'box AP': metric}))
if 'segm_mAP' in model['results']:
metric = round(model['results']['segm_mAP'] * 100, 1)
results.append(
OrderedDict(
Task='Instance Segmentation',
Dataset=dataset_name,
Metrics={'mask AP': metric}))
if 'PQ' in model['results']:
metric = round(model['results']['PQ'], 1)
results.append(
OrderedDict(
Task='Panoptic Segmentation',
Dataset=dataset_name,
Metrics={'PQ': metric}))
pwc_model_info['Results'] = results
link_string = 'https://download.openmmlab.com/mmdetection/v2.0/'
link_string += '{}/{}'.format(model['config'].rstrip('.py'),
osp.split(model['model_path'])[-1])
pwc_model_info['Weights'] = link_string
if cfg_folder_name in pwc_files:
pwc_files[cfg_folder_name].append(pwc_model_info)
else:
pwc_files[cfg_folder_name] = [pwc_model_info]
return pwc_files
def parse_args():
parser = argparse.ArgumentParser(description='Gather benchmarked models')
parser.add_argument(
'root',
type=str,
help='root path of benchmarked models to be gathered')
parser.add_argument(
'out', type=str, help='output path of gathered models to be stored')
parser.add_argument(
'--best',
action='store_true',
help='whether to gather the best model.')
args = parser.parse_args()
return args
def main():
args = parse_args()
models_root = args.root
models_out = args.out
mmcv.mkdir_or_exist(models_out)
# find all models in the root directory to be gathered
raw_configs = list(mmcv.scandir('./configs', '.py', recursive=True))
# filter configs that is not trained in the experiments dir
used_configs = []
for raw_config in raw_configs:
if osp.exists(osp.join(models_root, raw_config)):
used_configs.append(raw_config)
print(f'Find {len(used_configs)} models to be gathered')
# find final_ckpt and log file for trained each config
# and parse the best performance
model_infos = []
for used_config in used_configs:
exp_dir = osp.join(models_root, used_config)
# check whether the exps is finished
if args.best is True:
final_model, final_epoch = get_best_epoch(exp_dir)
else:
final_epoch = get_final_epoch(used_config)
final_model = 'epoch_{}.pth'.format(final_epoch)
model_path = osp.join(exp_dir, final_model)
# skip if the model is still training
if not osp.exists(model_path):
continue
# get the latest logs
log_json_path = list(
sorted(glob.glob(osp.join(exp_dir, '*.log.json'))))[-1]
log_txt_path = list(sorted(glob.glob(osp.join(exp_dir, '*.log'))))[-1]
cfg = mmcv.Config.fromfile('./configs/' + used_config)
results_lut = cfg.evaluation.metric
if not isinstance(results_lut, list):
results_lut = [results_lut]
# case when using VOC, the evaluation key is only 'mAP'
# when using Panoptic Dataset, the evaluation key is 'PQ'.
for i, key in enumerate(results_lut):
if 'mAP' not in key and 'PQ' not in key:
results_lut[i] = key + 'm_AP'
model_performance = get_final_results(log_json_path, final_epoch,
results_lut)
if model_performance is None:
continue
model_time = osp.split(log_txt_path)[-1].split('.')[0]
model_infos.append(
dict(
config=used_config,
results=model_performance,
epochs=final_epoch,
model_time=model_time,
final_model=final_model,
log_json_path=osp.split(log_json_path)[-1]))
# publish model for each checkpoint
publish_model_infos = []
for model in model_infos:
model_publish_dir = osp.join(models_out, model['config'].rstrip('.py'))
mmcv.mkdir_or_exist(model_publish_dir)
model_name = osp.split(model['config'])[-1].split('.')[0]
model_name += '_' + model['model_time']
publish_model_path = osp.join(model_publish_dir, model_name)
trained_model_path = osp.join(models_root, model['config'],
model['final_model'])
# convert model
final_model_path = process_checkpoint(trained_model_path,
publish_model_path)
# copy log
shutil.copy(
osp.join(models_root, model['config'], model['log_json_path']),
osp.join(model_publish_dir, f'{model_name}.log.json'))
shutil.copy(
osp.join(models_root, model['config'],
model['log_json_path'].rstrip('.json')),
osp.join(model_publish_dir, f'{model_name}.log'))
# copy config to guarantee reproducibility
config_path = model['config']
config_path = osp.join(
'configs',
config_path) if 'configs' not in config_path else config_path
target_config_path = osp.split(config_path)[-1]
shutil.copy(config_path, osp.join(model_publish_dir,
target_config_path))
model['model_path'] = final_model_path
publish_model_infos.append(model)
models = dict(models=publish_model_infos)
print(f'Totally gathered {len(publish_model_infos)} models')
mmcv.dump(models, osp.join(models_out, 'model_info.json'))
pwc_files = convert_model_info_to_pwc(publish_model_infos)
for name in pwc_files:
with open(osp.join(models_out, name + '_metafile.yml'), 'w') as f:
ordered_yaml_dump(pwc_files[name], f, encoding='utf-8')
if __name__ == '__main__':
main()
| 10,489 | 34.924658 | 79 | py |
DSLA-DSLA | DSLA-DSLA/.dev_scripts/benchmark_inference_fps.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
import mmcv
from mmcv import Config, DictAction
from mmcv.runner import init_dist
from terminaltables import GithubFlavoredMarkdownTable
from tools.analysis_tools.benchmark import repeat_measure_inference_speed
def parse_args():
parser = argparse.ArgumentParser(
description='MMDet benchmark a model of FPS')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint_root', help='Checkpoint file root path')
parser.add_argument(
'--round-num',
type=int,
default=1,
help='round a number to a given precision in decimal digits')
parser.add_argument(
'--repeat-num',
type=int,
default=1,
help='number of repeat times of measurement for averaging the results')
parser.add_argument(
'--out', type=str, help='output path of gathered fps to be stored')
parser.add_argument(
'--max-iter', type=int, default=2000, help='num of max iter')
parser.add_argument(
'--log-interval', type=int, default=50, help='interval of logging')
parser.add_argument(
'--fuse-conv-bn',
action='store_true',
help='Whether to fuse conv and bn, this will slightly increase'
'the inference speed')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def results2markdown(result_dict):
table_data = []
is_multiple_results = False
for cfg_name, value in result_dict.items():
name = cfg_name.replace('configs/', '')
fps = value['fps']
ms_times_pre_image = value['ms_times_pre_image']
if isinstance(fps, list):
is_multiple_results = True
mean_fps = value['mean_fps']
mean_times_pre_image = value['mean_times_pre_image']
fps_str = ','.join([str(s) for s in fps])
ms_times_pre_image_str = ','.join(
[str(s) for s in ms_times_pre_image])
table_data.append([
name, fps_str, mean_fps, ms_times_pre_image_str,
mean_times_pre_image
])
else:
table_data.append([name, fps, ms_times_pre_image])
if is_multiple_results:
table_data.insert(0, [
'model', 'fps', 'mean_fps', 'times_pre_image(ms)',
'mean_times_pre_image(ms)'
])
else:
table_data.insert(0, ['model', 'fps', 'times_pre_image(ms)'])
table = GithubFlavoredMarkdownTable(table_data)
print(table.table, flush=True)
if __name__ == '__main__':
args = parse_args()
assert args.round_num >= 0
assert args.repeat_num >= 1
config = Config.fromfile(args.config)
if args.launcher == 'none':
raise NotImplementedError('Only supports distributed mode')
else:
init_dist(args.launcher)
result_dict = {}
for model_key in config:
model_infos = config[model_key]
if not isinstance(model_infos, list):
model_infos = [model_infos]
for model_info in model_infos:
record_metrics = model_info['metric']
cfg_path = model_info['config'].strip()
cfg = Config.fromfile(cfg_path)
checkpoint = osp.join(args.checkpoint_root,
model_info['checkpoint'].strip())
try:
fps = repeat_measure_inference_speed(cfg, checkpoint,
args.max_iter,
args.log_interval,
args.fuse_conv_bn,
args.repeat_num)
if args.repeat_num > 1:
fps_list = [round(fps_, args.round_num) for fps_ in fps]
times_pre_image_list = [
round(1000 / fps_, args.round_num) for fps_ in fps
]
mean_fps = round(
sum(fps_list) / len(fps_list), args.round_num)
mean_times_pre_image = round(
sum(times_pre_image_list) / len(times_pre_image_list),
args.round_num)
print(
f'{cfg_path} '
f'Overall fps: {fps_list}[{mean_fps}] img / s, '
f'times per image: '
f'{times_pre_image_list}[{mean_times_pre_image}] '
f'ms / img',
flush=True)
result_dict[cfg_path] = dict(
fps=fps_list,
mean_fps=mean_fps,
ms_times_pre_image=times_pre_image_list,
mean_times_pre_image=mean_times_pre_image)
else:
print(
f'{cfg_path} fps : {fps:.{args.round_num}f} img / s, '
f'times per image: {1000 / fps:.{args.round_num}f} '
f'ms / img',
flush=True)
result_dict[cfg_path] = dict(
fps=round(fps, args.round_num),
ms_times_pre_image=round(1000 / fps, args.round_num))
except Exception as e:
print(f'{cfg_path} error: {repr(e)}')
if args.repeat_num > 1:
result_dict[cfg_path] = dict(
fps=[0],
mean_fps=0,
ms_times_pre_image=[0],
mean_times_pre_image=0)
else:
result_dict[cfg_path] = dict(fps=0, ms_times_pre_image=0)
if args.out:
mmcv.mkdir_or_exist(args.out)
mmcv.dump(result_dict, osp.join(args.out, 'batch_inference_fps.json'))
results2markdown(result_dict)
| 6,763 | 38.788235 | 79 | py |
DSLA-DSLA | DSLA-DSLA/.dev_scripts/batch_test_list.py | # Copyright (c) OpenMMLab. All rights reserved.
# yapf: disable
atss = dict(
config='configs/atss/atss_r50_fpn_1x_coco.py',
checkpoint='atss_r50_fpn_1x_coco_20200209-985f7bd0.pth',
eval='bbox',
metric=dict(bbox_mAP=39.4),
)
autoassign = dict(
config='configs/autoassign/autoassign_r50_fpn_8x2_1x_coco.py',
checkpoint='auto_assign_r50_fpn_1x_coco_20210413_115540-5e17991f.pth',
eval='bbox',
metric=dict(bbox_mAP=40.4),
)
carafe = dict(
config='configs/carafe/faster_rcnn_r50_fpn_carafe_1x_coco.py',
checkpoint='faster_rcnn_r50_fpn_carafe_1x_coco_bbox_mAP-0.386_20200504_175733-385a75b7.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=38.6),
)
cascade_rcnn = [
dict(
config='configs/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py',
checkpoint='cascade_rcnn_r50_fpn_1x_coco_20200316-3dc56deb.pth',
eval='bbox',
metric=dict(bbox_mAP=40.3),
),
dict(
config='configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py',
checkpoint='cascade_mask_rcnn_r50_fpn_1x_coco_20200203-9d4dcb24.pth',
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=41.2, segm_mAP=35.9),
),
]
cascade_rpn = dict(
config='configs/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco.py',
checkpoint='crpn_faster_rcnn_r50_caffe_fpn_1x_coco-c8283cca.pth',
eval='bbox',
metric=dict(bbox_mAP=40.4),
)
centripetalnet = dict(
config='configs/centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco.py', # noqa
checkpoint='centripetalnet_hourglass104_mstest_16x6_210e_coco_20200915_204804-3ccc61e5.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=44.7),
)
cornernet = dict(
config='configs/cornernet/cornernet_hourglass104_mstest_8x6_210e_coco.py',
checkpoint='cornernet_hourglass104_mstest_8x6_210e_coco_20200825_150618-79b44c30.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=41.2),
)
dcn = dict(
config='configs/dcn/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py',
checkpoint='faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200130-d68aed1e.pth',
eval='bbox',
metric=dict(bbox_mAP=41.3),
)
deformable_detr = dict(
config='configs/deformable_detr/deformable_detr_r50_16x2_50e_coco.py',
checkpoint='deformable_detr_r50_16x2_50e_coco_20210419_220030-a12b9512.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=44.5),
)
detectors = dict(
config='configs/detectors/detectors_htc_r50_1x_coco.py',
checkpoint='detectors_htc_r50_1x_coco-329b1453.pth',
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=49.1, segm_mAP=42.6),
)
detr = dict(
config='configs/detr/detr_r50_8x2_150e_coco.py',
checkpoint='detr_r50_8x2_150e_coco_20201130_194835-2c4b8974.pth',
eval='bbox',
metric=dict(bbox_mAP=40.1),
)
double_heads = dict(
config='configs/double_heads/dh_faster_rcnn_r50_fpn_1x_coco.py',
checkpoint='dh_faster_rcnn_r50_fpn_1x_coco_20200130-586b67df.pth',
eval='bbox',
metric=dict(bbox_mAP=40.0),
)
dynamic_rcnn = dict(
config='configs/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x_coco.py',
checkpoint='dynamic_rcnn_r50_fpn_1x-62a3f276.pth',
eval='bbox',
metric=dict(bbox_mAP=38.9),
)
empirical_attention = dict(
config='configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_1x_coco.py', # noqa
checkpoint='faster_rcnn_r50_fpn_attention_1111_1x_coco_20200130-403cccba.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=40.0),
)
faster_rcnn = dict(
config='configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py',
checkpoint='faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth',
eval='bbox',
metric=dict(bbox_mAP=37.4),
)
fcos = dict(
config='configs/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco.py', # noqa
checkpoint='fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco-0a0d75a8.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=38.7),
)
foveabox = dict(
config='configs/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco.py',
checkpoint='fovea_align_r50_fpn_gn-head_4x4_2x_coco_20200203-8987880d.pth',
eval='bbox',
metric=dict(bbox_mAP=37.9),
)
free_anchor = dict(
config='configs/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco.py',
checkpoint='retinanet_free_anchor_r50_fpn_1x_coco_20200130-0f67375f.pth',
eval='bbox',
metric=dict(bbox_mAP=38.7),
)
fsaf = dict(
config='configs/fsaf/fsaf_r50_fpn_1x_coco.py',
checkpoint='fsaf_r50_fpn_1x_coco-94ccc51f.pth',
eval='bbox',
metric=dict(bbox_mAP=37.4),
)
gcnet = dict(
config='configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py', # noqa
checkpoint='mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200202-587b99aa.pth', # noqa
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=40.4, segm_mAP=36.2),
)
gfl = dict(
config='configs/gfl/gfl_r50_fpn_1x_coco.py',
checkpoint='gfl_r50_fpn_1x_coco_20200629_121244-25944287.pth',
eval='bbox',
metric=dict(bbox_mAP=40.2),
)
gn = dict(
config='configs/gn/mask_rcnn_r50_fpn_gn-all_2x_coco.py',
checkpoint='mask_rcnn_r50_fpn_gn-all_2x_coco_20200206-8eee02a6.pth',
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=40.1, segm_mAP=36.4),
)
gn_ws = dict(
config='configs/gn+ws/faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py',
checkpoint='faster_rcnn_r50_fpn_gn_ws-all_1x_coco_20200130-613d9fe2.pth',
eval='bbox',
metric=dict(bbox_mAP=39.7),
)
grid_rcnn = dict(
config='configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco.py',
checkpoint='grid_rcnn_r50_fpn_gn-head_2x_coco_20200130-6cca8223.pth',
eval='bbox',
metric=dict(bbox_mAP=40.4),
)
groie = dict(
config='configs/groie/faster_rcnn_r50_fpn_groie_1x_coco.py',
checkpoint='faster_rcnn_r50_fpn_groie_1x_coco_20200604_211715-66ee9516.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=38.3),
)
guided_anchoring = [
dict(
config='configs/guided_anchoring/ga_retinanet_r50_caffe_fpn_1x_coco.py', # noqa
checkpoint='ga_retinanet_r50_caffe_fpn_1x_coco_20201020-39581c6f.pth',
eval='bbox',
metric=dict(bbox_mAP=36.9),
),
dict(
config='configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py',
checkpoint='ga_faster_r50_caffe_fpn_1x_coco_20200702_000718-a11ccfe6.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=39.6),
),
]
hrnet = dict(
config='configs/hrnet/faster_rcnn_hrnetv2p_w18_1x_coco.py',
checkpoint='faster_rcnn_hrnetv2p_w18_1x_coco_20200130-56651a6d.pth',
eval='bbox',
metric=dict(bbox_mAP=36.9),
)
htc = dict(
config='configs/htc/htc_r50_fpn_1x_coco.py',
checkpoint='htc_r50_fpn_1x_coco_20200317-7332cf16.pth',
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=42.3, segm_mAP=37.4),
)
libra_rcnn = dict(
config='configs/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco.py',
checkpoint='libra_faster_rcnn_r50_fpn_1x_coco_20200130-3afee3a9.pth',
eval='bbox',
metric=dict(bbox_mAP=38.3),
)
mask_rcnn = dict(
config='configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py',
checkpoint='mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth',
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=38.2, segm_mAP=34.7),
)
ms_rcnn = dict(
config='configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco.py',
checkpoint='ms_rcnn_r50_caffe_fpn_1x_coco_20200702_180848-61c9355e.pth',
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=38.2, segm_mAP=36.0),
)
nas_fcos = dict(
config='configs/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py', # noqa
checkpoint='nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco_20200520-1bdba3ce.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=39.4),
)
nas_fpn = dict(
config='configs/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py',
checkpoint='retinanet_r50_nasfpn_crop640_50e_coco-0ad1f644.pth',
eval='bbox',
metric=dict(bbox_mAP=40.5),
)
paa = dict(
config='configs/paa/paa_r50_fpn_1x_coco.py',
checkpoint='paa_r50_fpn_1x_coco_20200821-936edec3.pth',
eval='bbox',
metric=dict(bbox_mAP=40.4),
)
pafpn = dict(
config='configs/pafpn/faster_rcnn_r50_pafpn_1x_coco.py',
checkpoint='faster_rcnn_r50_pafpn_1x_coco_bbox_mAP-0.375_20200503_105836-b7b4b9bd.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=37.5),
)
pisa = dict(
config='configs/pisa/pisa_faster_rcnn_r50_fpn_1x_coco.py',
checkpoint='pisa_faster_rcnn_r50_fpn_1x_coco-dea93523.pth',
eval='bbox',
metric=dict(bbox_mAP=38.4),
)
point_rend = dict(
config='configs/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py',
checkpoint='point_rend_r50_caffe_fpn_mstrain_1x_coco-1bcb5fb4.pth',
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=38.4, segm_mAP=36.3),
)
regnet = dict(
config='configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py',
checkpoint='mask_rcnn_regnetx-3.2GF_fpn_1x_coco_20200520_163141-2a9d1814.pth', # noqa
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=40.4, segm_mAP=36.7),
)
reppoints = dict(
config='configs/reppoints/reppoints_moment_r50_fpn_1x_coco.py',
checkpoint='reppoints_moment_r50_fpn_1x_coco_20200330-b73db8d1.pth',
eval='bbox',
metric=dict(bbox_mAP=37.0),
)
res2net = dict(
config='configs/res2net/faster_rcnn_r2_101_fpn_2x_coco.py',
checkpoint='faster_rcnn_r2_101_fpn_2x_coco-175f1da6.pth',
eval='bbox',
metric=dict(bbox_mAP=43.0),
)
resnest = dict(
config='configs/resnest/faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py', # noqa
checkpoint='faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco_20200926_125502-20289c16.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=42.0),
)
retinanet = dict(
config='configs/retinanet/retinanet_r50_fpn_1x_coco.py',
checkpoint='retinanet_r50_fpn_1x_coco_20200130-c2398f9e.pth',
eval='bbox',
metric=dict(bbox_mAP=36.5),
)
rpn = dict(
config='configs/rpn/rpn_r50_fpn_1x_coco.py',
checkpoint='rpn_r50_fpn_1x_coco_20200218-5525fa2e.pth',
eval='proposal_fast',
metric=dict(AR_1000=58.2),
)
sabl = [
dict(
config='configs/sabl/sabl_retinanet_r50_fpn_1x_coco.py',
checkpoint='sabl_retinanet_r50_fpn_1x_coco-6c54fd4f.pth',
eval='bbox',
metric=dict(bbox_mAP=37.7),
),
dict(
config='configs/sabl/sabl_faster_rcnn_r50_fpn_1x_coco.py',
checkpoint='sabl_faster_rcnn_r50_fpn_1x_coco-e867595b.pth',
eval='bbox',
metric=dict(bbox_mAP=39.9),
),
]
scnet = dict(
config='configs/scnet/scnet_r50_fpn_1x_coco.py',
checkpoint='scnet_r50_fpn_1x_coco-c3f09857.pth',
eval='bbox',
metric=dict(bbox_mAP=43.5),
)
sparse_rcnn = dict(
config='configs/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py',
checkpoint='sparse_rcnn_r50_fpn_1x_coco_20201222_214453-dc79b137.pth',
eval='bbox',
metric=dict(bbox_mAP=37.9),
)
ssd = [
dict(
config='configs/ssd/ssd300_coco.py',
checkpoint='ssd300_coco_20210803_015428-d231a06e.pth',
eval='bbox',
metric=dict(bbox_mAP=25.5),
),
dict(
config='configs/ssd/ssdlite_mobilenetv2_scratch_600e_coco.py',
checkpoint='ssdlite_mobilenetv2_scratch_600e_coco_20210629_110627-974d9307.pth',# noqa
eval='bbox',
metric=dict(bbox_mAP=21.3),
),
]
tridentnet = dict(
config='configs/tridentnet/tridentnet_r50_caffe_1x_coco.py',
checkpoint='tridentnet_r50_caffe_1x_coco_20201230_141838-2ec0b530.pth',
eval='bbox',
metric=dict(bbox_mAP=37.6),
)
vfnet = dict(
config='configs/vfnet/vfnet_r50_fpn_1x_coco.py',
checkpoint='vfnet_r50_fpn_1x_coco_20201027-38db6f58.pth',
eval='bbox',
metric=dict(bbox_mAP=41.6),
)
yolact = dict(
config='configs/yolact/yolact_r50_1x8_coco.py',
checkpoint='yolact_r50_1x8_coco_20200908-f38d58df.pth',
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=31.2, segm_mAP=29.0),
)
yolo = dict(
config='configs/yolo/yolov3_d53_320_273e_coco.py',
checkpoint='yolov3_d53_320_273e_coco-421362b6.pth',
eval='bbox',
metric=dict(bbox_mAP=27.9),
)
yolof = dict(
config='configs/yolof/yolof_r50_c5_8x8_1x_coco.py',
checkpoint='yolof_r50_c5_8x8_1x_coco_20210425_024427-8e864411.pth',
eval='bbox',
metric=dict(bbox_mAP=37.5),
)
centernet = dict(
config='configs/centernet/centernet_resnet18_dcnv2_140e_coco.py',
checkpoint='centernet_resnet18_dcnv2_140e_coco_20210702_155131-c8cd631f.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=29.5),
)
yolox = dict(
config='configs/yolox/yolox_tiny_8x8_300e_coco.py',
checkpoint='yolox_tiny_8x8_300e_coco_20210806_234250-4ff3b67e.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=31.5),
)
# yapf: enable
| 12,707 | 34.3 | 117 | py |
DSLA-DSLA | DSLA-DSLA/tests/test_runtime/async_benchmark.py | # Copyright (c) OpenMMLab. All rights reserved.
import asyncio
import os
import shutil
import urllib
import mmcv
import torch
from mmdet.apis import (async_inference_detector, inference_detector,
init_detector)
from mmdet.utils.contextmanagers import concurrent
from mmdet.utils.profiling import profile_time
async def main():
"""Benchmark between async and synchronous inference interfaces.
Sample runs for 20 demo images on K80 GPU, model - mask_rcnn_r50_fpn_1x:
async sync
7981.79 ms 9660.82 ms
8074.52 ms 9660.94 ms
7976.44 ms 9406.83 ms
Async variant takes about 0.83-0.85 of the time of the synchronous
interface.
"""
project_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
project_dir = os.path.join(project_dir, '..')
config_file = os.path.join(
project_dir, 'configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py')
checkpoint_file = os.path.join(
project_dir,
'checkpoints/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth')
if not os.path.exists(checkpoint_file):
url = ('https://download.openmmlab.com/mmdetection/v2.0'
'/mask_rcnn/mask_rcnn_r50_fpn_1x_coco'
'/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth')
print(f'Downloading {url} ...')
local_filename, _ = urllib.request.urlretrieve(url)
os.makedirs(os.path.dirname(checkpoint_file), exist_ok=True)
shutil.move(local_filename, checkpoint_file)
print(f'Saved as {checkpoint_file}')
else:
print(f'Using existing checkpoint {checkpoint_file}')
device = 'cuda:0'
model = init_detector(
config_file, checkpoint=checkpoint_file, device=device)
# queue is used for concurrent inference of multiple images
streamqueue = asyncio.Queue()
# queue size defines concurrency level
streamqueue_size = 4
for _ in range(streamqueue_size):
streamqueue.put_nowait(torch.cuda.Stream(device=device))
# test a single image and show the results
img = mmcv.imread(os.path.join(project_dir, 'demo/demo.jpg'))
# warmup
await async_inference_detector(model, img)
async def detect(img):
async with concurrent(streamqueue):
return await async_inference_detector(model, img)
num_of_images = 20
with profile_time('benchmark', 'async'):
tasks = [
asyncio.create_task(detect(img)) for _ in range(num_of_images)
]
async_results = await asyncio.gather(*tasks)
with torch.cuda.stream(torch.cuda.default_stream()):
with profile_time('benchmark', 'sync'):
sync_results = [
inference_detector(model, img) for _ in range(num_of_images)
]
result_dir = os.path.join(project_dir, 'demo')
model.show_result(
img,
async_results[0],
score_thr=0.5,
show=False,
out_file=os.path.join(result_dir, 'result_async.jpg'))
model.show_result(
img,
sync_results[0],
score_thr=0.5,
show=False,
out_file=os.path.join(result_dir, 'result_sync.jpg'))
if __name__ == '__main__':
asyncio.run(main())
| 3,215 | 30.223301 | 77 | py |
DSLA-DSLA | DSLA-DSLA/tests/test_runtime/test_async.py | # Copyright (c) OpenMMLab. All rights reserved.
"""Tests for async interface."""
import asyncio
import os
import sys
import asynctest
import mmcv
import torch
from mmdet.apis import async_inference_detector, init_detector
if sys.version_info >= (3, 7):
from mmdet.utils.contextmanagers import concurrent
class AsyncTestCase(asynctest.TestCase):
use_default_loop = False
forbid_get_event_loop = True
TEST_TIMEOUT = int(os.getenv('ASYNCIO_TEST_TIMEOUT', '30'))
def _run_test_method(self, method):
result = method()
if asyncio.iscoroutine(result):
self.loop.run_until_complete(
asyncio.wait_for(result, timeout=self.TEST_TIMEOUT))
class MaskRCNNDetector:
def __init__(self,
model_config,
checkpoint=None,
streamqueue_size=3,
device='cuda:0'):
self.streamqueue_size = streamqueue_size
self.device = device
# build the model and load checkpoint
self.model = init_detector(
model_config, checkpoint=None, device=self.device)
self.streamqueue = None
async def init(self):
self.streamqueue = asyncio.Queue()
for _ in range(self.streamqueue_size):
stream = torch.cuda.Stream(device=self.device)
self.streamqueue.put_nowait(stream)
if sys.version_info >= (3, 7):
async def apredict(self, img):
if isinstance(img, str):
img = mmcv.imread(img)
async with concurrent(self.streamqueue):
result = await async_inference_detector(self.model, img)
return result
class AsyncInferenceTestCase(AsyncTestCase):
if sys.version_info >= (3, 7):
async def test_simple_inference(self):
if not torch.cuda.is_available():
import pytest
pytest.skip('test requires GPU and torch+cuda')
ori_grad_enabled = torch.is_grad_enabled()
root_dir = os.path.dirname(os.path.dirname(__name__))
model_config = os.path.join(
root_dir, 'configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py')
detector = MaskRCNNDetector(model_config)
await detector.init()
img_path = os.path.join(root_dir, 'demo/demo.jpg')
bboxes, _ = await detector.apredict(img_path)
self.assertTrue(bboxes)
# asy inference detector will hack grad_enabled,
# so restore here to avoid it to influence other tests
torch.set_grad_enabled(ori_grad_enabled)
| 2,608 | 30.059524 | 75 | py |
DSLA-DSLA | DSLA-DSLA/tests/test_runtime/test_config.py | # Copyright (c) OpenMMLab. All rights reserved.
from os.path import dirname, exists, join
from unittest.mock import Mock
import pytest
from mmdet.core import BitmapMasks, PolygonMasks
from mmdet.datasets.builder import DATASETS
from mmdet.datasets.utils import NumClassCheckHook
def _get_config_directory():
"""Find the predefined detector config directory."""
try:
# Assume we are running in the source mmdetection repo
repo_dpath = dirname(dirname(__file__))
repo_dpath = join(repo_dpath, '..')
except NameError:
# For IPython development when this __file__ is not defined
import mmdet
repo_dpath = dirname(dirname(mmdet.__file__))
config_dpath = join(repo_dpath, 'configs')
if not exists(config_dpath):
raise Exception('Cannot find config path')
return config_dpath
def _check_numclasscheckhook(detector, config_mod):
dummy_runner = Mock()
dummy_runner.model = detector
def get_dataset_name_classes(dataset):
# deal with `RepeatDataset`,`ConcatDataset`,`ClassBalancedDataset`..
if isinstance(dataset, (list, tuple)):
dataset = dataset[0]
while ('dataset' in dataset):
dataset = dataset['dataset']
# ConcatDataset
if isinstance(dataset, (list, tuple)):
dataset = dataset[0]
return dataset['type'], dataset.get('classes', None)
compatible_check = NumClassCheckHook()
dataset_name, CLASSES = get_dataset_name_classes(
config_mod['data']['train'])
if CLASSES is None:
CLASSES = DATASETS.get(dataset_name).CLASSES
dummy_runner.data_loader.dataset.CLASSES = CLASSES
compatible_check.before_train_epoch(dummy_runner)
dummy_runner.data_loader.dataset.CLASSES = None
compatible_check.before_train_epoch(dummy_runner)
dataset_name, CLASSES = get_dataset_name_classes(config_mod['data']['val'])
if CLASSES is None:
CLASSES = DATASETS.get(dataset_name).CLASSES
dummy_runner.data_loader.dataset.CLASSES = CLASSES
compatible_check.before_val_epoch(dummy_runner)
dummy_runner.data_loader.dataset.CLASSES = None
compatible_check.before_val_epoch(dummy_runner)
def _check_roi_head(config, head):
# check consistency between head_config and roi_head
assert config['type'] == head.__class__.__name__
# check roi_align
bbox_roi_cfg = config.bbox_roi_extractor
bbox_roi_extractor = head.bbox_roi_extractor
_check_roi_extractor(bbox_roi_cfg, bbox_roi_extractor)
# check bbox head infos
bbox_cfg = config.bbox_head
bbox_head = head.bbox_head
_check_bbox_head(bbox_cfg, bbox_head)
if head.with_mask:
# check roi_align
if config.mask_roi_extractor:
mask_roi_cfg = config.mask_roi_extractor
mask_roi_extractor = head.mask_roi_extractor
_check_roi_extractor(mask_roi_cfg, mask_roi_extractor,
bbox_roi_extractor)
# check mask head infos
mask_head = head.mask_head
mask_cfg = config.mask_head
_check_mask_head(mask_cfg, mask_head)
# check arch specific settings, e.g., cascade/htc
if config['type'] in ['CascadeRoIHead', 'HybridTaskCascadeRoIHead']:
assert config.num_stages == len(head.bbox_head)
assert config.num_stages == len(head.bbox_roi_extractor)
if head.with_mask:
assert config.num_stages == len(head.mask_head)
assert config.num_stages == len(head.mask_roi_extractor)
elif config['type'] in ['MaskScoringRoIHead']:
assert (hasattr(head, 'mask_iou_head')
and head.mask_iou_head is not None)
mask_iou_cfg = config.mask_iou_head
mask_iou_head = head.mask_iou_head
assert (mask_iou_cfg.fc_out_channels ==
mask_iou_head.fc_mask_iou.in_features)
elif config['type'] in ['GridRoIHead']:
grid_roi_cfg = config.grid_roi_extractor
grid_roi_extractor = head.grid_roi_extractor
_check_roi_extractor(grid_roi_cfg, grid_roi_extractor,
bbox_roi_extractor)
config.grid_head.grid_points = head.grid_head.grid_points
def _check_roi_extractor(config, roi_extractor, prev_roi_extractor=None):
import torch.nn as nn
# Separate roi_extractor and prev_roi_extractor checks for flexibility
if isinstance(roi_extractor, nn.ModuleList):
roi_extractor = roi_extractor[0]
if prev_roi_extractor and isinstance(prev_roi_extractor, nn.ModuleList):
prev_roi_extractor = prev_roi_extractor[0]
assert (len(config.featmap_strides) == len(roi_extractor.roi_layers))
assert (config.out_channels == roi_extractor.out_channels)
from torch.nn.modules.utils import _pair
assert (_pair(config.roi_layer.output_size) ==
roi_extractor.roi_layers[0].output_size)
if 'use_torchvision' in config.roi_layer:
assert (config.roi_layer.use_torchvision ==
roi_extractor.roi_layers[0].use_torchvision)
elif 'aligned' in config.roi_layer:
assert (
config.roi_layer.aligned == roi_extractor.roi_layers[0].aligned)
if prev_roi_extractor:
assert (roi_extractor.roi_layers[0].aligned ==
prev_roi_extractor.roi_layers[0].aligned)
assert (roi_extractor.roi_layers[0].use_torchvision ==
prev_roi_extractor.roi_layers[0].use_torchvision)
def _check_mask_head(mask_cfg, mask_head):
import torch.nn as nn
if isinstance(mask_cfg, list):
for single_mask_cfg, single_mask_head in zip(mask_cfg, mask_head):
_check_mask_head(single_mask_cfg, single_mask_head)
elif isinstance(mask_head, nn.ModuleList):
for single_mask_head in mask_head:
_check_mask_head(mask_cfg, single_mask_head)
else:
assert mask_cfg['type'] == mask_head.__class__.__name__
assert mask_cfg.in_channels == mask_head.in_channels
class_agnostic = mask_cfg.get('class_agnostic', False)
out_dim = (1 if class_agnostic else mask_cfg.num_classes)
if hasattr(mask_head, 'conv_logits'):
assert (mask_cfg.conv_out_channels ==
mask_head.conv_logits.in_channels)
assert mask_head.conv_logits.out_channels == out_dim
else:
assert mask_cfg.fc_out_channels == mask_head.fc_logits.in_features
assert (mask_head.fc_logits.out_features == out_dim *
mask_head.output_area)
def _check_bbox_head(bbox_cfg, bbox_head):
import torch.nn as nn
if isinstance(bbox_cfg, list):
for single_bbox_cfg, single_bbox_head in zip(bbox_cfg, bbox_head):
_check_bbox_head(single_bbox_cfg, single_bbox_head)
elif isinstance(bbox_head, nn.ModuleList):
for single_bbox_head in bbox_head:
_check_bbox_head(bbox_cfg, single_bbox_head)
else:
assert bbox_cfg['type'] == bbox_head.__class__.__name__
if bbox_cfg['type'] == 'SABLHead':
assert bbox_cfg.cls_in_channels == bbox_head.cls_in_channels
assert bbox_cfg.reg_in_channels == bbox_head.reg_in_channels
cls_out_channels = bbox_cfg.get('cls_out_channels', 1024)
assert (cls_out_channels == bbox_head.fc_cls.in_features)
assert (bbox_cfg.num_classes + 1 == bbox_head.fc_cls.out_features)
elif bbox_cfg['type'] == 'DIIHead':
assert bbox_cfg['num_ffn_fcs'] == bbox_head.ffn.num_fcs
# 3 means FC and LN and Relu
assert bbox_cfg['num_cls_fcs'] == len(bbox_head.cls_fcs) // 3
assert bbox_cfg['num_reg_fcs'] == len(bbox_head.reg_fcs) // 3
assert bbox_cfg['in_channels'] == bbox_head.in_channels
assert bbox_cfg['in_channels'] == bbox_head.fc_cls.in_features
assert bbox_cfg['in_channels'] == bbox_head.fc_reg.in_features
assert bbox_cfg['in_channels'] == bbox_head.attention.embed_dims
assert bbox_cfg[
'feedforward_channels'] == bbox_head.ffn.feedforward_channels
else:
assert bbox_cfg.in_channels == bbox_head.in_channels
with_cls = bbox_cfg.get('with_cls', True)
if with_cls:
fc_out_channels = bbox_cfg.get('fc_out_channels', 2048)
assert (fc_out_channels == bbox_head.fc_cls.in_features)
if bbox_head.custom_cls_channels:
assert (bbox_head.loss_cls.get_cls_channels(
bbox_head.num_classes) == bbox_head.fc_cls.out_features
)
else:
assert (bbox_cfg.num_classes +
1 == bbox_head.fc_cls.out_features)
with_reg = bbox_cfg.get('with_reg', True)
if with_reg:
out_dim = (4 if bbox_cfg.reg_class_agnostic else 4 *
bbox_cfg.num_classes)
assert bbox_head.fc_reg.out_features == out_dim
def _check_anchorhead(config, head):
# check consistency between head_config and roi_head
assert config['type'] == head.__class__.__name__
assert config.in_channels == head.in_channels
num_classes = (
config.num_classes -
1 if config.loss_cls.get('use_sigmoid', False) else config.num_classes)
if config['type'] == 'ATSSHead':
assert (config.feat_channels == head.atss_cls.in_channels)
assert (config.feat_channels == head.atss_reg.in_channels)
assert (config.feat_channels == head.atss_centerness.in_channels)
elif config['type'] == 'SABLRetinaHead':
assert (config.feat_channels == head.retina_cls.in_channels)
assert (config.feat_channels == head.retina_bbox_reg.in_channels)
assert (config.feat_channels == head.retina_bbox_cls.in_channels)
else:
assert (config.in_channels == head.conv_cls.in_channels)
assert (config.in_channels == head.conv_reg.in_channels)
assert (head.conv_cls.out_channels == num_classes * head.num_anchors)
assert head.fc_reg.out_channels == 4 * head.num_anchors
# Only tests a representative subset of configurations
# TODO: test pipelines using Albu, current Albu throw None given empty GT
@pytest.mark.parametrize(
'config_rpath',
[
'wider_face/ssd300_wider_face.py',
'pascal_voc/ssd300_voc0712.py',
'pascal_voc/ssd512_voc0712.py',
# 'albu_example/mask_rcnn_r50_fpn_1x.py',
'foveabox/fovea_align_r50_fpn_gn-head_mstrain_640-800_4x4_2x_coco.py',
'mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py',
'mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain_1x_coco.py',
'mask_rcnn/mask_rcnn_r50_fpn_fp16_1x_coco.py'
])
def test_config_data_pipeline(config_rpath):
"""Test whether the data pipeline is valid and can process corner cases.
CommandLine:
xdoctest -m tests/test_runtime/
test_config.py test_config_build_data_pipeline
"""
from mmcv import Config
from mmdet.datasets.pipelines import Compose
import numpy as np
config_dpath = _get_config_directory()
print(f'Found config_dpath = {config_dpath}')
def dummy_masks(h, w, num_obj=3, mode='bitmap'):
assert mode in ('polygon', 'bitmap')
if mode == 'bitmap':
masks = np.random.randint(0, 2, (num_obj, h, w), dtype=np.uint8)
masks = BitmapMasks(masks, h, w)
else:
masks = []
for i in range(num_obj):
masks.append([])
masks[-1].append(
np.random.uniform(0, min(h - 1, w - 1), (8 + 4 * i, )))
masks[-1].append(
np.random.uniform(0, min(h - 1, w - 1), (10 + 4 * i, )))
masks = PolygonMasks(masks, h, w)
return masks
config_fpath = join(config_dpath, config_rpath)
cfg = Config.fromfile(config_fpath)
# remove loading pipeline
loading_pipeline = cfg.train_pipeline.pop(0)
loading_ann_pipeline = cfg.train_pipeline.pop(0)
cfg.test_pipeline.pop(0)
train_pipeline = Compose(cfg.train_pipeline)
test_pipeline = Compose(cfg.test_pipeline)
print(f'Building data pipeline, config_fpath = {config_fpath}')
print(f'Test training data pipeline: \n{train_pipeline!r}')
img = np.random.randint(0, 255, size=(888, 666, 3), dtype=np.uint8)
if loading_pipeline.get('to_float32', False):
img = img.astype(np.float32)
mode = 'bitmap' if loading_ann_pipeline.get('poly2mask',
True) else 'polygon'
results = dict(
filename='test_img.png',
ori_filename='test_img.png',
img=img,
img_shape=img.shape,
ori_shape=img.shape,
gt_bboxes=np.array([[35.2, 11.7, 39.7, 15.7]], dtype=np.float32),
gt_labels=np.array([1], dtype=np.int64),
gt_masks=dummy_masks(img.shape[0], img.shape[1], mode=mode),
)
results['img_fields'] = ['img']
results['bbox_fields'] = ['gt_bboxes']
results['mask_fields'] = ['gt_masks']
output_results = train_pipeline(results)
assert output_results is not None
print(f'Test testing data pipeline: \n{test_pipeline!r}')
results = dict(
filename='test_img.png',
ori_filename='test_img.png',
img=img,
img_shape=img.shape,
ori_shape=img.shape,
gt_bboxes=np.array([[35.2, 11.7, 39.7, 15.7]], dtype=np.float32),
gt_labels=np.array([1], dtype=np.int64),
gt_masks=dummy_masks(img.shape[0], img.shape[1], mode=mode),
)
results['img_fields'] = ['img']
results['bbox_fields'] = ['gt_bboxes']
results['mask_fields'] = ['gt_masks']
output_results = test_pipeline(results)
assert output_results is not None
# test empty GT
print('Test empty GT with training data pipeline: '
f'\n{train_pipeline!r}')
results = dict(
filename='test_img.png',
ori_filename='test_img.png',
img=img,
img_shape=img.shape,
ori_shape=img.shape,
gt_bboxes=np.zeros((0, 4), dtype=np.float32),
gt_labels=np.array([], dtype=np.int64),
gt_masks=dummy_masks(img.shape[0], img.shape[1], num_obj=0, mode=mode),
)
results['img_fields'] = ['img']
results['bbox_fields'] = ['gt_bboxes']
results['mask_fields'] = ['gt_masks']
output_results = train_pipeline(results)
assert output_results is not None
print(f'Test empty GT with testing data pipeline: \n{test_pipeline!r}')
results = dict(
filename='test_img.png',
ori_filename='test_img.png',
img=img,
img_shape=img.shape,
ori_shape=img.shape,
gt_bboxes=np.zeros((0, 4), dtype=np.float32),
gt_labels=np.array([], dtype=np.int64),
gt_masks=dummy_masks(img.shape[0], img.shape[1], num_obj=0, mode=mode),
)
results['img_fields'] = ['img']
results['bbox_fields'] = ['gt_bboxes']
results['mask_fields'] = ['gt_masks']
output_results = test_pipeline(results)
assert output_results is not None
| 15,152 | 39.733871 | 79 | py |
DSLA-DSLA | DSLA-DSLA/tests/test_runtime/test_eval_hook.py | # Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import tempfile
import unittest.mock as mock
from collections import OrderedDict
from unittest.mock import MagicMock, patch
import pytest
import torch
import torch.nn as nn
from mmcv.runner import EpochBasedRunner, build_optimizer
from mmcv.utils import get_logger
from torch.utils.data import DataLoader, Dataset
from mmdet.core import DistEvalHook, EvalHook
class ExampleDataset(Dataset):
def __init__(self):
self.index = 0
self.eval_result = [0.1, 0.4, 0.3, 0.7, 0.2, 0.05, 0.4, 0.6]
def __getitem__(self, idx):
results = dict(imgs=torch.tensor([1]))
return results
def __len__(self):
return 1
@mock.create_autospec
def evaluate(self, results, logger=None):
pass
class EvalDataset(ExampleDataset):
def evaluate(self, results, logger=None):
mean_ap = self.eval_result[self.index]
output = OrderedDict(mAP=mean_ap, index=self.index, score=mean_ap)
self.index += 1
return output
class ExampleModel(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Linear(1, 1)
self.test_cfg = None
def forward(self, imgs, rescale=False, return_loss=False):
return imgs
def train_step(self, data_batch, optimizer, **kwargs):
outputs = {
'loss': 0.5,
'log_vars': {
'accuracy': 0.98
},
'num_samples': 1
}
return outputs
@pytest.mark.skipif(
not torch.cuda.is_available(), reason='requires CUDA support')
@patch('mmdet.apis.single_gpu_test', MagicMock)
@patch('mmdet.apis.multi_gpu_test', MagicMock)
@pytest.mark.parametrize('EvalHookCls', (EvalHook, DistEvalHook))
def test_eval_hook(EvalHookCls):
with pytest.raises(TypeError):
# dataloader must be a pytorch DataLoader
test_dataset = ExampleDataset()
data_loader = [
DataLoader(
test_dataset,
batch_size=1,
sampler=None,
num_worker=0,
shuffle=False)
]
EvalHookCls(data_loader)
with pytest.raises(KeyError):
# rule must be in keys of rule_map
test_dataset = ExampleDataset()
data_loader = DataLoader(
test_dataset,
batch_size=1,
sampler=None,
num_workers=0,
shuffle=False)
EvalHookCls(data_loader, save_best='auto', rule='unsupport')
with pytest.raises(ValueError):
# key_indicator must be valid when rule_map is None
test_dataset = ExampleDataset()
data_loader = DataLoader(
test_dataset,
batch_size=1,
sampler=None,
num_workers=0,
shuffle=False)
EvalHookCls(data_loader, save_best='unsupport')
optimizer_cfg = dict(
type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
test_dataset = ExampleDataset()
loader = DataLoader(test_dataset, batch_size=1)
model = ExampleModel()
optimizer = build_optimizer(model, optimizer_cfg)
data_loader = DataLoader(test_dataset, batch_size=1)
eval_hook = EvalHookCls(data_loader, save_best=None)
with tempfile.TemporaryDirectory() as tmpdir:
logger = get_logger('test_eval')
runner = EpochBasedRunner(
model=model,
batch_processor=None,
optimizer=optimizer,
work_dir=tmpdir,
logger=logger)
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 1)
assert runner.meta is None or 'best_score' not in runner.meta[
'hook_msgs']
assert runner.meta is None or 'best_ckpt' not in runner.meta[
'hook_msgs']
# when `save_best` is set to 'auto', first metric will be used.
loader = DataLoader(EvalDataset(), batch_size=1)
model = ExampleModel()
data_loader = DataLoader(EvalDataset(), batch_size=1)
eval_hook = EvalHookCls(data_loader, interval=1, save_best='auto')
with tempfile.TemporaryDirectory() as tmpdir:
logger = get_logger('test_eval')
runner = EpochBasedRunner(
model=model,
batch_processor=None,
optimizer=optimizer,
work_dir=tmpdir,
logger=logger)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 8)
real_path = osp.join(tmpdir, 'best_mAP_epoch_4.pth')
assert runner.meta['hook_msgs']['best_ckpt'] == osp.realpath(real_path)
assert runner.meta['hook_msgs']['best_score'] == 0.7
loader = DataLoader(EvalDataset(), batch_size=1)
model = ExampleModel()
data_loader = DataLoader(EvalDataset(), batch_size=1)
eval_hook = EvalHookCls(data_loader, interval=1, save_best='mAP')
with tempfile.TemporaryDirectory() as tmpdir:
logger = get_logger('test_eval')
runner = EpochBasedRunner(
model=model,
batch_processor=None,
optimizer=optimizer,
work_dir=tmpdir,
logger=logger)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 8)
real_path = osp.join(tmpdir, 'best_mAP_epoch_4.pth')
assert runner.meta['hook_msgs']['best_ckpt'] == osp.realpath(real_path)
assert runner.meta['hook_msgs']['best_score'] == 0.7
data_loader = DataLoader(EvalDataset(), batch_size=1)
eval_hook = EvalHookCls(
data_loader, interval=1, save_best='score', rule='greater')
with tempfile.TemporaryDirectory() as tmpdir:
logger = get_logger('test_eval')
runner = EpochBasedRunner(
model=model,
batch_processor=None,
optimizer=optimizer,
work_dir=tmpdir,
logger=logger)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 8)
real_path = osp.join(tmpdir, 'best_score_epoch_4.pth')
assert runner.meta['hook_msgs']['best_ckpt'] == osp.realpath(real_path)
assert runner.meta['hook_msgs']['best_score'] == 0.7
data_loader = DataLoader(EvalDataset(), batch_size=1)
eval_hook = EvalHookCls(data_loader, save_best='mAP', rule='less')
with tempfile.TemporaryDirectory() as tmpdir:
logger = get_logger('test_eval')
runner = EpochBasedRunner(
model=model,
batch_processor=None,
optimizer=optimizer,
work_dir=tmpdir,
logger=logger)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 8)
real_path = osp.join(tmpdir, 'best_mAP_epoch_6.pth')
assert runner.meta['hook_msgs']['best_ckpt'] == osp.realpath(real_path)
assert runner.meta['hook_msgs']['best_score'] == 0.05
data_loader = DataLoader(EvalDataset(), batch_size=1)
eval_hook = EvalHookCls(data_loader, save_best='mAP')
with tempfile.TemporaryDirectory() as tmpdir:
logger = get_logger('test_eval')
runner = EpochBasedRunner(
model=model,
batch_processor=None,
optimizer=optimizer,
work_dir=tmpdir,
logger=logger)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 2)
real_path = osp.join(tmpdir, 'best_mAP_epoch_2.pth')
assert runner.meta['hook_msgs']['best_ckpt'] == osp.realpath(real_path)
assert runner.meta['hook_msgs']['best_score'] == 0.4
resume_from = osp.join(tmpdir, 'latest.pth')
loader = DataLoader(ExampleDataset(), batch_size=1)
eval_hook = EvalHookCls(data_loader, save_best='mAP')
runner = EpochBasedRunner(
model=model,
batch_processor=None,
optimizer=optimizer,
work_dir=tmpdir,
logger=logger)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_hook(eval_hook)
runner.resume(resume_from)
runner.run([loader], [('train', 1)], 8)
real_path = osp.join(tmpdir, 'best_mAP_epoch_4.pth')
assert runner.meta['hook_msgs']['best_ckpt'] == osp.realpath(real_path)
assert runner.meta['hook_msgs']['best_score'] == 0.7
| 8,590 | 32.956522 | 79 | py |
DSLA-DSLA | DSLA-DSLA/tests/test_runtime/test_fp16.py | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import pytest
import torch
import torch.nn as nn
from mmcv.runner import auto_fp16, force_fp32
from mmcv.runner.fp16_utils import cast_tensor_type
def test_cast_tensor_type():
inputs = torch.FloatTensor([5.])
src_type = torch.float32
dst_type = torch.int32
outputs = cast_tensor_type(inputs, src_type, dst_type)
assert isinstance(outputs, torch.Tensor)
assert outputs.dtype == dst_type
inputs = 'tensor'
src_type = str
dst_type = str
outputs = cast_tensor_type(inputs, src_type, dst_type)
assert isinstance(outputs, str)
inputs = np.array([5.])
src_type = np.ndarray
dst_type = np.ndarray
outputs = cast_tensor_type(inputs, src_type, dst_type)
assert isinstance(outputs, np.ndarray)
inputs = dict(
tensor_a=torch.FloatTensor([1.]), tensor_b=torch.FloatTensor([2.]))
src_type = torch.float32
dst_type = torch.int32
outputs = cast_tensor_type(inputs, src_type, dst_type)
assert isinstance(outputs, dict)
assert outputs['tensor_a'].dtype == dst_type
assert outputs['tensor_b'].dtype == dst_type
inputs = [torch.FloatTensor([1.]), torch.FloatTensor([2.])]
src_type = torch.float32
dst_type = torch.int32
outputs = cast_tensor_type(inputs, src_type, dst_type)
assert isinstance(outputs, list)
assert outputs[0].dtype == dst_type
assert outputs[1].dtype == dst_type
inputs = 5
outputs = cast_tensor_type(inputs, None, None)
assert isinstance(outputs, int)
def test_auto_fp16():
with pytest.raises(TypeError):
# ExampleObject is not a subclass of nn.Module
class ExampleObject:
@auto_fp16()
def __call__(self, x):
return x
model = ExampleObject()
input_x = torch.ones(1, dtype=torch.float32)
model(input_x)
# apply to all input args
class ExampleModule(nn.Module):
@auto_fp16()
def forward(self, x, y):
return x, y
model = ExampleModule()
input_x = torch.ones(1, dtype=torch.float32)
input_y = torch.ones(1, dtype=torch.float32)
output_x, output_y = model(input_x, input_y)
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.float32
model.fp16_enabled = True
output_x, output_y = model(input_x, input_y)
assert output_x.dtype == torch.half
assert output_y.dtype == torch.half
if torch.cuda.is_available():
model.cuda()
output_x, output_y = model(input_x.cuda(), input_y.cuda())
assert output_x.dtype == torch.half
assert output_y.dtype == torch.half
# apply to specified input args
class ExampleModule(nn.Module):
@auto_fp16(apply_to=('x', ))
def forward(self, x, y):
return x, y
model = ExampleModule()
input_x = torch.ones(1, dtype=torch.float32)
input_y = torch.ones(1, dtype=torch.float32)
output_x, output_y = model(input_x, input_y)
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.float32
model.fp16_enabled = True
output_x, output_y = model(input_x, input_y)
assert output_x.dtype == torch.half
assert output_y.dtype == torch.float32
if torch.cuda.is_available():
model.cuda()
output_x, output_y = model(input_x.cuda(), input_y.cuda())
assert output_x.dtype == torch.half
assert output_y.dtype == torch.float32
# apply to optional input args
class ExampleModule(nn.Module):
@auto_fp16(apply_to=('x', 'y'))
def forward(self, x, y=None, z=None):
return x, y, z
model = ExampleModule()
input_x = torch.ones(1, dtype=torch.float32)
input_y = torch.ones(1, dtype=torch.float32)
input_z = torch.ones(1, dtype=torch.float32)
output_x, output_y, output_z = model(input_x, y=input_y, z=input_z)
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.float32
assert output_z.dtype == torch.float32
model.fp16_enabled = True
output_x, output_y, output_z = model(input_x, y=input_y, z=input_z)
assert output_x.dtype == torch.half
assert output_y.dtype == torch.half
assert output_z.dtype == torch.float32
if torch.cuda.is_available():
model.cuda()
output_x, output_y, output_z = model(
input_x.cuda(), y=input_y.cuda(), z=input_z.cuda())
assert output_x.dtype == torch.half
assert output_y.dtype == torch.half
assert output_z.dtype == torch.float32
# out_fp32=True
class ExampleModule(nn.Module):
@auto_fp16(apply_to=('x', 'y'), out_fp32=True)
def forward(self, x, y=None, z=None):
return x, y, z
model = ExampleModule()
input_x = torch.ones(1, dtype=torch.half)
input_y = torch.ones(1, dtype=torch.float32)
input_z = torch.ones(1, dtype=torch.float32)
output_x, output_y, output_z = model(input_x, y=input_y, z=input_z)
assert output_x.dtype == torch.half
assert output_y.dtype == torch.float32
assert output_z.dtype == torch.float32
model.fp16_enabled = True
output_x, output_y, output_z = model(input_x, y=input_y, z=input_z)
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.float32
assert output_z.dtype == torch.float32
if torch.cuda.is_available():
model.cuda()
output_x, output_y, output_z = model(
input_x.cuda(), y=input_y.cuda(), z=input_z.cuda())
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.float32
assert output_z.dtype == torch.float32
def test_force_fp32():
with pytest.raises(TypeError):
# ExampleObject is not a subclass of nn.Module
class ExampleObject:
@force_fp32()
def __call__(self, x):
return x
model = ExampleObject()
input_x = torch.ones(1, dtype=torch.float32)
model(input_x)
# apply to all input args
class ExampleModule(nn.Module):
@force_fp32()
def forward(self, x, y):
return x, y
model = ExampleModule()
input_x = torch.ones(1, dtype=torch.half)
input_y = torch.ones(1, dtype=torch.half)
output_x, output_y = model(input_x, input_y)
assert output_x.dtype == torch.half
assert output_y.dtype == torch.half
model.fp16_enabled = True
output_x, output_y = model(input_x, input_y)
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.float32
if torch.cuda.is_available():
model.cuda()
output_x, output_y = model(input_x.cuda(), input_y.cuda())
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.float32
# apply to specified input args
class ExampleModule(nn.Module):
@force_fp32(apply_to=('x', ))
def forward(self, x, y):
return x, y
model = ExampleModule()
input_x = torch.ones(1, dtype=torch.half)
input_y = torch.ones(1, dtype=torch.half)
output_x, output_y = model(input_x, input_y)
assert output_x.dtype == torch.half
assert output_y.dtype == torch.half
model.fp16_enabled = True
output_x, output_y = model(input_x, input_y)
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.half
if torch.cuda.is_available():
model.cuda()
output_x, output_y = model(input_x.cuda(), input_y.cuda())
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.half
# apply to optional input args
class ExampleModule(nn.Module):
@force_fp32(apply_to=('x', 'y'))
def forward(self, x, y=None, z=None):
return x, y, z
model = ExampleModule()
input_x = torch.ones(1, dtype=torch.half)
input_y = torch.ones(1, dtype=torch.half)
input_z = torch.ones(1, dtype=torch.half)
output_x, output_y, output_z = model(input_x, y=input_y, z=input_z)
assert output_x.dtype == torch.half
assert output_y.dtype == torch.half
assert output_z.dtype == torch.half
model.fp16_enabled = True
output_x, output_y, output_z = model(input_x, y=input_y, z=input_z)
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.float32
assert output_z.dtype == torch.half
if torch.cuda.is_available():
model.cuda()
output_x, output_y, output_z = model(
input_x.cuda(), y=input_y.cuda(), z=input_z.cuda())
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.float32
assert output_z.dtype == torch.half
# out_fp16=True
class ExampleModule(nn.Module):
@force_fp32(apply_to=('x', 'y'), out_fp16=True)
def forward(self, x, y=None, z=None):
return x, y, z
model = ExampleModule()
input_x = torch.ones(1, dtype=torch.float32)
input_y = torch.ones(1, dtype=torch.half)
input_z = torch.ones(1, dtype=torch.half)
output_x, output_y, output_z = model(input_x, y=input_y, z=input_z)
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.half
assert output_z.dtype == torch.half
model.fp16_enabled = True
output_x, output_y, output_z = model(input_x, y=input_y, z=input_z)
assert output_x.dtype == torch.half
assert output_y.dtype == torch.half
assert output_z.dtype == torch.half
if torch.cuda.is_available():
model.cuda()
output_x, output_y, output_z = model(
input_x.cuda(), y=input_y.cuda(), z=input_z.cuda())
assert output_x.dtype == torch.half
assert output_y.dtype == torch.half
assert output_z.dtype == torch.half
| 9,746 | 31.274834 | 75 | py |
DSLA-DSLA | DSLA-DSLA/tests/test_models/test_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmcv.utils import digit_version
from mmdet.models.losses import (BalancedL1Loss, CrossEntropyLoss, DiceLoss,
DistributionFocalLoss, FocalLoss,
GaussianFocalLoss,
KnowledgeDistillationKLDivLoss, L1Loss,
MSELoss, QualityFocalLoss, SeesawLoss,
SmoothL1Loss, VarifocalLoss)
from mmdet.models.losses.ghm_loss import GHMC, GHMR
from mmdet.models.losses.iou_loss import (BoundedIoULoss, CIoULoss, DIoULoss,
GIoULoss, IoULoss)
@pytest.mark.parametrize(
'loss_class', [IoULoss, BoundedIoULoss, GIoULoss, DIoULoss, CIoULoss])
def test_iou_type_loss_zeros_weight(loss_class):
pred = torch.rand((10, 4))
target = torch.rand((10, 4))
weight = torch.zeros(10)
loss = loss_class()(pred, target, weight)
assert loss == 0.
@pytest.mark.parametrize('loss_class', [
BalancedL1Loss, BoundedIoULoss, CIoULoss, CrossEntropyLoss, DIoULoss,
FocalLoss, DistributionFocalLoss, MSELoss, SeesawLoss, GaussianFocalLoss,
GIoULoss, IoULoss, L1Loss, QualityFocalLoss, VarifocalLoss, GHMR, GHMC,
SmoothL1Loss, KnowledgeDistillationKLDivLoss, DiceLoss
])
def test_loss_with_reduction_override(loss_class):
pred = torch.rand((10, 4))
target = torch.rand((10, 4)),
weight = None
with pytest.raises(AssertionError):
# only reduction_override from [None, 'none', 'mean', 'sum']
# is not allowed
reduction_override = True
loss_class()(
pred, target, weight, reduction_override=reduction_override)
@pytest.mark.parametrize('loss_class', [
IoULoss, BoundedIoULoss, GIoULoss, DIoULoss, CIoULoss, MSELoss, L1Loss,
SmoothL1Loss, BalancedL1Loss
])
@pytest.mark.parametrize('input_shape', [(10, 4), (0, 4)])
def test_regression_losses(loss_class, input_shape):
pred = torch.rand(input_shape)
target = torch.rand(input_shape)
weight = torch.rand(input_shape)
# Test loss forward
loss = loss_class()(pred, target)
assert isinstance(loss, torch.Tensor)
# Test loss forward with weight
loss = loss_class()(pred, target, weight)
assert isinstance(loss, torch.Tensor)
# Test loss forward with reduction_override
loss = loss_class()(pred, target, reduction_override='mean')
assert isinstance(loss, torch.Tensor)
# Test loss forward with avg_factor
loss = loss_class()(pred, target, avg_factor=10)
assert isinstance(loss, torch.Tensor)
with pytest.raises(ValueError):
# loss can evaluate with avg_factor only if
# reduction is None, 'none' or 'mean'.
reduction_override = 'sum'
loss_class()(
pred, target, avg_factor=10, reduction_override=reduction_override)
# Test loss forward with avg_factor and reduction
for reduction_override in [None, 'none', 'mean']:
loss_class()(
pred, target, avg_factor=10, reduction_override=reduction_override)
assert isinstance(loss, torch.Tensor)
@pytest.mark.parametrize('loss_class', [FocalLoss, CrossEntropyLoss])
@pytest.mark.parametrize('input_shape', [(10, 5), (0, 5)])
def test_classification_losses(loss_class, input_shape):
if input_shape[0] == 0 and digit_version(
torch.__version__) < digit_version('1.5.0'):
pytest.skip(
f'CELoss in PyTorch {torch.__version__} does not support empty'
f'tensor.')
pred = torch.rand(input_shape)
target = torch.randint(0, 5, (input_shape[0], ))
# Test loss forward
loss = loss_class()(pred, target)
assert isinstance(loss, torch.Tensor)
# Test loss forward with reduction_override
loss = loss_class()(pred, target, reduction_override='mean')
assert isinstance(loss, torch.Tensor)
# Test loss forward with avg_factor
loss = loss_class()(pred, target, avg_factor=10)
assert isinstance(loss, torch.Tensor)
with pytest.raises(ValueError):
# loss can evaluate with avg_factor only if
# reduction is None, 'none' or 'mean'.
reduction_override = 'sum'
loss_class()(
pred, target, avg_factor=10, reduction_override=reduction_override)
# Test loss forward with avg_factor and reduction
for reduction_override in [None, 'none', 'mean']:
loss_class()(
pred, target, avg_factor=10, reduction_override=reduction_override)
assert isinstance(loss, torch.Tensor)
@pytest.mark.parametrize('loss_class', [GHMR])
@pytest.mark.parametrize('input_shape', [(10, 4), (0, 4)])
def test_GHMR_loss(loss_class, input_shape):
pred = torch.rand(input_shape)
target = torch.rand(input_shape)
weight = torch.rand(input_shape)
# Test loss forward
loss = loss_class()(pred, target, weight)
assert isinstance(loss, torch.Tensor)
@pytest.mark.parametrize('use_sigmoid', [True, False])
def test_loss_with_ignore_index(use_sigmoid):
# Test cross_entropy loss
loss_class = CrossEntropyLoss(
use_sigmoid=use_sigmoid, use_mask=False, ignore_index=255)
pred = torch.rand((10, 5))
target = torch.randint(0, 5, (10, ))
ignored_indices = torch.randint(0, 10, (2, ), dtype=torch.long)
target[ignored_indices] = 255
# Test loss forward with default ignore
loss_with_ignore = loss_class(pred, target, reduction_override='sum')
assert isinstance(loss_with_ignore, torch.Tensor)
# Test loss forward with forward ignore
target[ignored_indices] = 250
loss_with_forward_ignore = loss_class(
pred, target, ignore_index=250, reduction_override='sum')
assert isinstance(loss_with_forward_ignore, torch.Tensor)
# Verify correctness
not_ignored_indices = (target != 250)
pred = pred[not_ignored_indices]
target = target[not_ignored_indices]
loss = loss_class(pred, target, reduction_override='sum')
assert torch.allclose(loss, loss_with_ignore)
assert torch.allclose(loss, loss_with_forward_ignore)
def test_dice_loss():
loss_class = DiceLoss
pred = torch.rand((10, 4, 4))
target = torch.rand((10, 4, 4))
weight = torch.rand((10))
# Test loss forward
loss = loss_class()(pred, target)
assert isinstance(loss, torch.Tensor)
# Test loss forward with weight
loss = loss_class()(pred, target, weight)
assert isinstance(loss, torch.Tensor)
# Test loss forward with reduction_override
loss = loss_class()(pred, target, reduction_override='mean')
assert isinstance(loss, torch.Tensor)
# Test loss forward with avg_factor
loss = loss_class()(pred, target, avg_factor=10)
assert isinstance(loss, torch.Tensor)
with pytest.raises(ValueError):
# loss can evaluate with avg_factor only if
# reduction is None, 'none' or 'mean'.
reduction_override = 'sum'
loss_class()(
pred, target, avg_factor=10, reduction_override=reduction_override)
# Test loss forward with avg_factor and reduction
for reduction_override in [None, 'none', 'mean']:
loss_class()(
pred, target, avg_factor=10, reduction_override=reduction_override)
assert isinstance(loss, torch.Tensor)
# Test loss forward with has_acted=False and use_sigmoid=False
with pytest.raises(NotImplementedError):
loss_class(use_sigmoid=False, activate=True)(pred, target)
# Test loss forward with weight.ndim != loss.ndim
with pytest.raises(AssertionError):
weight = torch.rand((2, 8))
loss_class()(pred, target, weight)
# Test loss forward with len(weight) != len(pred)
with pytest.raises(AssertionError):
weight = torch.rand((8))
loss_class()(pred, target, weight)
| 7,860 | 35.393519 | 79 | py |
DSLA-DSLA | DSLA-DSLA/tests/test_models/test_forward.py | # Copyright (c) OpenMMLab. All rights reserved.
"""pytest tests/test_forward.py."""
import copy
from os.path import dirname, exists, join
import numpy as np
import pytest
import torch
def _get_config_directory():
"""Find the predefined detector config directory."""
try:
# Assume we are running in the source mmdetection repo
repo_dpath = dirname(dirname(dirname(__file__)))
except NameError:
# For IPython development when this __file__ is not defined
import mmdet
repo_dpath = dirname(dirname(mmdet.__file__))
config_dpath = join(repo_dpath, 'configs')
if not exists(config_dpath):
raise Exception('Cannot find config path')
return config_dpath
def _get_config_module(fname):
"""Load a configuration as a python module."""
from mmcv import Config
config_dpath = _get_config_directory()
config_fpath = join(config_dpath, fname)
config_mod = Config.fromfile(config_fpath)
return config_mod
def _get_detector_cfg(fname):
"""Grab configs necessary to create a detector.
These are deep copied to allow for safe modification of parameters without
influencing other tests.
"""
config = _get_config_module(fname)
model = copy.deepcopy(config.model)
return model
def _replace_r50_with_r18(model):
"""Replace ResNet50 with ResNet18 in config."""
model = copy.deepcopy(model)
if model.backbone.type == 'ResNet':
model.backbone.depth = 18
model.backbone.base_channels = 2
model.neck.in_channels = [2, 4, 8, 16]
return model
def test_sparse_rcnn_forward():
config_path = 'sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py'
model = _get_detector_cfg(config_path)
model = _replace_r50_with_r18(model)
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
detector.init_weights()
input_shape = (1, 3, 100, 100)
mm_inputs = _demo_mm_inputs(input_shape, num_items=[5])
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
# Test forward train with non-empty truth batch
detector.train()
gt_bboxes = mm_inputs['gt_bboxes']
gt_bboxes = [item for item in gt_bboxes]
gt_labels = mm_inputs['gt_labels']
gt_labels = [item for item in gt_labels]
losses = detector.forward(
imgs,
img_metas,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
return_loss=True)
assert isinstance(losses, dict)
loss, _ = detector._parse_losses(losses)
assert float(loss.item()) > 0
detector.forward_dummy(imgs)
# Test forward train with an empty truth batch
mm_inputs = _demo_mm_inputs(input_shape, num_items=[0])
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
gt_bboxes = mm_inputs['gt_bboxes']
gt_bboxes = [item for item in gt_bboxes]
gt_labels = mm_inputs['gt_labels']
gt_labels = [item for item in gt_labels]
losses = detector.forward(
imgs,
img_metas,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
return_loss=True)
assert isinstance(losses, dict)
loss, _ = detector._parse_losses(losses)
assert float(loss.item()) > 0
# Test forward test
detector.eval()
with torch.no_grad():
img_list = [g[None, :] for g in imgs]
batch_results = []
for one_img, one_meta in zip(img_list, img_metas):
result = detector.forward([one_img], [[one_meta]],
rescale=True,
return_loss=False)
batch_results.append(result)
# test empty proposal in roi_head
with torch.no_grad():
# test no proposal in the whole batch
detector.roi_head.simple_test([imgs[0][None, :]], torch.empty(
(1, 0, 4)), torch.empty((1, 100, 4)), [img_metas[0]],
torch.ones((1, 4)))
def test_rpn_forward():
model = _get_detector_cfg('rpn/rpn_r50_fpn_1x_coco.py')
model = _replace_r50_with_r18(model)
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
input_shape = (1, 3, 100, 100)
mm_inputs = _demo_mm_inputs(input_shape)
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
# Test forward train
gt_bboxes = mm_inputs['gt_bboxes']
losses = detector.forward(
imgs, img_metas, gt_bboxes=gt_bboxes, return_loss=True)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [g[None, :] for g in imgs]
batch_results = []
for one_img, one_meta in zip(img_list, img_metas):
result = detector.forward([one_img], [[one_meta]],
return_loss=False)
batch_results.append(result)
@pytest.mark.parametrize(
'cfg_file',
[
'reppoints/reppoints_moment_r50_fpn_1x_coco.py',
'retinanet/retinanet_r50_fpn_1x_coco.py',
'guided_anchoring/ga_retinanet_r50_fpn_1x_coco.py',
'ghm/retinanet_ghm_r50_fpn_1x_coco.py',
'fcos/fcos_center_r50_caffe_fpn_gn-head_1x_coco.py',
'foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco.py',
# 'free_anchor/retinanet_free_anchor_r50_fpn_1x_coco.py',
# 'atss/atss_r50_fpn_1x_coco.py', # not ready for topk
'yolo/yolov3_mobilenetv2_320_300e_coco.py',
'yolox/yolox_tiny_8x8_300e_coco.py'
])
def test_single_stage_forward_gpu(cfg_file):
if not torch.cuda.is_available():
import pytest
pytest.skip('test requires GPU and torch+cuda')
model = _get_detector_cfg(cfg_file)
model = _replace_r50_with_r18(model)
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
input_shape = (2, 3, 128, 128)
mm_inputs = _demo_mm_inputs(input_shape)
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
detector = detector.cuda()
imgs = imgs.cuda()
# Test forward train
gt_bboxes = [b.cuda() for b in mm_inputs['gt_bboxes']]
gt_labels = [g.cuda() for g in mm_inputs['gt_labels']]
losses = detector.forward(
imgs,
img_metas,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
return_loss=True)
assert isinstance(losses, dict)
# Test forward test
detector.eval()
with torch.no_grad():
img_list = [g[None, :] for g in imgs]
batch_results = []
for one_img, one_meta in zip(img_list, img_metas):
result = detector.forward([one_img], [[one_meta]],
return_loss=False)
batch_results.append(result)
def test_faster_rcnn_ohem_forward():
model = _get_detector_cfg(
'faster_rcnn/faster_rcnn_r50_fpn_ohem_1x_coco.py')
model = _replace_r50_with_r18(model)
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
input_shape = (1, 3, 100, 100)
# Test forward train with a non-empty truth batch
mm_inputs = _demo_mm_inputs(input_shape, num_items=[10])
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
gt_bboxes = mm_inputs['gt_bboxes']
gt_labels = mm_inputs['gt_labels']
losses = detector.forward(
imgs,
img_metas,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
return_loss=True)
assert isinstance(losses, dict)
loss, _ = detector._parse_losses(losses)
assert float(loss.item()) > 0
# Test forward train with an empty truth batch
mm_inputs = _demo_mm_inputs(input_shape, num_items=[0])
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
gt_bboxes = mm_inputs['gt_bboxes']
gt_labels = mm_inputs['gt_labels']
losses = detector.forward(
imgs,
img_metas,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
return_loss=True)
assert isinstance(losses, dict)
loss, _ = detector._parse_losses(losses)
assert float(loss.item()) > 0
# Test RoI forward train with an empty proposals
feature = detector.extract_feat(imgs[0][None, :])
losses = detector.roi_head.forward_train(
feature,
img_metas, [torch.empty((0, 5))],
gt_bboxes=gt_bboxes,
gt_labels=gt_labels)
assert isinstance(losses, dict)
@pytest.mark.parametrize(
'cfg_file',
[
# 'cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py',
'mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py',
# 'grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco.py',
# 'ms_rcnn/ms_rcnn_r50_fpn_1x_coco.py',
# 'htc/htc_r50_fpn_1x_coco.py',
# 'panoptic_fpn/panoptic_fpn_r50_fpn_1x_coco.py',
# 'scnet/scnet_r50_fpn_20e_coco.py',
# 'seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py' # noqa: E501
])
def test_two_stage_forward(cfg_file):
models_with_semantic = [
'htc/htc_r50_fpn_1x_coco.py',
'panoptic_fpn/panoptic_fpn_r50_fpn_1x_coco.py',
'scnet/scnet_r50_fpn_20e_coco.py',
]
if cfg_file in models_with_semantic:
with_semantic = True
else:
with_semantic = False
model = _get_detector_cfg(cfg_file)
model = _replace_r50_with_r18(model)
model.backbone.init_cfg = None
# Save cost
if cfg_file in [
'seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py' # noqa: E501
]:
model.roi_head.bbox_head.num_classes = 80
model.roi_head.bbox_head.loss_cls.num_classes = 80
model.roi_head.mask_head.num_classes = 80
model.test_cfg.rcnn.score_thr = 0.05
model.test_cfg.rcnn.max_per_img = 100
from mmdet.models import build_detector
detector = build_detector(model)
input_shape = (1, 3, 128, 128)
# Test forward train with a non-empty truth batch
mm_inputs = _demo_mm_inputs(
input_shape, num_items=[10], with_semantic=with_semantic)
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
losses = detector.forward(imgs, img_metas, return_loss=True, **mm_inputs)
assert isinstance(losses, dict)
loss, _ = detector._parse_losses(losses)
loss.requires_grad_(True)
assert float(loss.item()) > 0
loss.backward()
# Test forward train with an empty truth batch
mm_inputs = _demo_mm_inputs(
input_shape, num_items=[0], with_semantic=with_semantic)
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
losses = detector.forward(imgs, img_metas, return_loss=True, **mm_inputs)
assert isinstance(losses, dict)
loss, _ = detector._parse_losses(losses)
loss.requires_grad_(True)
assert float(loss.item()) > 0
loss.backward()
# Test RoI forward train with an empty proposals
if cfg_file in [
'panoptic_fpn/panoptic_fpn_r50_fpn_1x_coco.py' # noqa: E501
]:
mm_inputs.pop('gt_semantic_seg')
feature = detector.extract_feat(imgs[0][None, :])
losses = detector.roi_head.forward_train(feature, img_metas,
[torch.empty(
(0, 5))], **mm_inputs)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [g[None, :] for g in imgs]
batch_results = []
for one_img, one_meta in zip(img_list, img_metas):
result = detector.forward([one_img], [[one_meta]],
return_loss=False)
batch_results.append(result)
cascade_models = [
'cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py',
'htc/htc_r50_fpn_1x_coco.py',
'scnet/scnet_r50_fpn_20e_coco.py',
]
# test empty proposal in roi_head
with torch.no_grad():
# test no proposal in the whole batch
detector.simple_test(
imgs[0][None, :], [img_metas[0]], proposals=[torch.empty((0, 4))])
# test no proposal of aug
features = detector.extract_feats([imgs[0][None, :]] * 2)
detector.roi_head.aug_test(features, [torch.empty((0, 4))] * 2,
[[img_metas[0]]] * 2)
# test rcnn_test_cfg is None
if cfg_file not in cascade_models:
feature = detector.extract_feat(imgs[0][None, :])
bboxes, scores = detector.roi_head.simple_test_bboxes(
feature, [img_metas[0]], [torch.empty((0, 4))], None)
assert all([bbox.shape == torch.Size((0, 4)) for bbox in bboxes])
assert all([
score.shape == torch.Size(
(0, detector.roi_head.bbox_head.fc_cls.out_features))
for score in scores
])
# test no proposal in the some image
x1y1 = torch.randint(1, 100, (10, 2)).float()
# x2y2 must be greater than x1y1
x2y2 = x1y1 + torch.randint(1, 100, (10, 2))
detector.simple_test(
imgs[0][None, :].repeat(2, 1, 1, 1), [img_metas[0]] * 2,
proposals=[torch.empty((0, 4)),
torch.cat([x1y1, x2y2], dim=-1)])
# test no proposal of aug
detector.roi_head.aug_test(
features, [torch.cat([x1y1, x2y2], dim=-1),
torch.empty((0, 4))], [[img_metas[0]]] * 2)
# test rcnn_test_cfg is None
if cfg_file not in cascade_models:
feature = detector.extract_feat(imgs[0][None, :].repeat(
2, 1, 1, 1))
bboxes, scores = detector.roi_head.simple_test_bboxes(
feature, [img_metas[0]] * 2,
[torch.empty((0, 4)),
torch.cat([x1y1, x2y2], dim=-1)], None)
assert bboxes[0].shape == torch.Size((0, 4))
assert scores[0].shape == torch.Size(
(0, detector.roi_head.bbox_head.fc_cls.out_features))
@pytest.mark.parametrize(
'cfg_file', ['ghm/retinanet_ghm_r50_fpn_1x_coco.py', 'ssd/ssd300_coco.py'])
def test_single_stage_forward_cpu(cfg_file):
model = _get_detector_cfg(cfg_file)
model = _replace_r50_with_r18(model)
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
input_shape = (1, 3, 300, 300)
mm_inputs = _demo_mm_inputs(input_shape)
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
# Test forward train
gt_bboxes = mm_inputs['gt_bboxes']
gt_labels = mm_inputs['gt_labels']
losses = detector.forward(
imgs,
img_metas,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
return_loss=True)
assert isinstance(losses, dict)
# Test forward test
detector.eval()
with torch.no_grad():
img_list = [g[None, :] for g in imgs]
batch_results = []
for one_img, one_meta in zip(img_list, img_metas):
result = detector.forward([one_img], [[one_meta]],
return_loss=False)
batch_results.append(result)
def _demo_mm_inputs(input_shape=(1, 3, 300, 300),
num_items=None, num_classes=10,
with_semantic=False): # yapf: disable
"""Create a superset of inputs needed to run test or train batches.
Args:
input_shape (tuple):
input batch dimensions
num_items (None | List[int]):
specifies the number of boxes in each batch item
num_classes (int):
number of different labels a box might have
"""
from mmdet.core import BitmapMasks
(N, C, H, W) = input_shape
rng = np.random.RandomState(0)
imgs = rng.rand(*input_shape)
img_metas = [{
'img_shape': (H, W, C),
'ori_shape': (H, W, C),
'pad_shape': (H, W, C),
'filename': '<demo>.png',
'scale_factor': np.array([1.1, 1.2, 1.1, 1.2]),
'flip': False,
'flip_direction': None,
} for _ in range(N)]
gt_bboxes = []
gt_labels = []
gt_masks = []
for batch_idx in range(N):
if num_items is None:
num_boxes = rng.randint(1, 10)
else:
num_boxes = num_items[batch_idx]
cx, cy, bw, bh = rng.rand(num_boxes, 4).T
tl_x = ((cx * W) - (W * bw / 2)).clip(0, W)
tl_y = ((cy * H) - (H * bh / 2)).clip(0, H)
br_x = ((cx * W) + (W * bw / 2)).clip(0, W)
br_y = ((cy * H) + (H * bh / 2)).clip(0, H)
boxes = np.vstack([tl_x, tl_y, br_x, br_y]).T
class_idxs = rng.randint(1, num_classes, size=num_boxes)
gt_bboxes.append(torch.FloatTensor(boxes))
gt_labels.append(torch.LongTensor(class_idxs))
mask = np.random.randint(0, 2, (len(boxes), H, W), dtype=np.uint8)
gt_masks.append(BitmapMasks(mask, H, W))
mm_inputs = {
'imgs': torch.FloatTensor(imgs).requires_grad_(True),
'img_metas': img_metas,
'gt_bboxes': gt_bboxes,
'gt_labels': gt_labels,
'gt_bboxes_ignore': None,
'gt_masks': gt_masks,
}
if with_semantic:
# assume gt_semantic_seg using scale 1/8 of the img
gt_semantic_seg = np.random.randint(
0, num_classes, (1, 1, H // 8, W // 8), dtype=np.uint8)
mm_inputs.update(
{'gt_semantic_seg': torch.ByteTensor(gt_semantic_seg)})
return mm_inputs
def test_yolact_forward():
model = _get_detector_cfg('yolact/yolact_r50_1x8_coco.py')
model = _replace_r50_with_r18(model)
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
input_shape = (1, 3, 100, 100)
mm_inputs = _demo_mm_inputs(input_shape)
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
# Test forward train
detector.train()
gt_bboxes = mm_inputs['gt_bboxes']
gt_labels = mm_inputs['gt_labels']
gt_masks = mm_inputs['gt_masks']
losses = detector.forward(
imgs,
img_metas,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
gt_masks=gt_masks,
return_loss=True)
assert isinstance(losses, dict)
# Test forward dummy for get_flops
detector.forward_dummy(imgs)
# Test forward test
detector.eval()
with torch.no_grad():
img_list = [g[None, :] for g in imgs]
batch_results = []
for one_img, one_meta in zip(img_list, img_metas):
result = detector.forward([one_img], [[one_meta]],
rescale=True,
return_loss=False)
batch_results.append(result)
def test_detr_forward():
model = _get_detector_cfg('detr/detr_r50_8x2_150e_coco.py')
model.backbone.depth = 18
model.bbox_head.in_channels = 512
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
input_shape = (1, 3, 100, 100)
mm_inputs = _demo_mm_inputs(input_shape)
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
# Test forward train with non-empty truth batch
detector.train()
gt_bboxes = mm_inputs['gt_bboxes']
gt_labels = mm_inputs['gt_labels']
losses = detector.forward(
imgs,
img_metas,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
return_loss=True)
assert isinstance(losses, dict)
loss, _ = detector._parse_losses(losses)
assert float(loss.item()) > 0
# Test forward train with an empty truth batch
mm_inputs = _demo_mm_inputs(input_shape, num_items=[0])
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
gt_bboxes = mm_inputs['gt_bboxes']
gt_labels = mm_inputs['gt_labels']
losses = detector.forward(
imgs,
img_metas,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
return_loss=True)
assert isinstance(losses, dict)
loss, _ = detector._parse_losses(losses)
assert float(loss.item()) > 0
# Test forward test
detector.eval()
with torch.no_grad():
img_list = [g[None, :] for g in imgs]
batch_results = []
for one_img, one_meta in zip(img_list, img_metas):
result = detector.forward([one_img], [[one_meta]],
rescale=True,
return_loss=False)
batch_results.append(result)
def test_inference_detector():
from mmdet.apis import inference_detector
from mmdet.models import build_detector
from mmcv import ConfigDict
# small RetinaNet
num_class = 3
model_dict = dict(
type='RetinaNet',
backbone=dict(
type='ResNet',
depth=18,
num_stages=4,
out_indices=(3, ),
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='pytorch'),
neck=None,
bbox_head=dict(
type='RetinaHead',
num_classes=num_class,
in_channels=512,
stacked_convs=1,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5],
strides=[32]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100))
rng = np.random.RandomState(0)
img1 = rng.rand(100, 100, 3)
img2 = rng.rand(100, 100, 3)
model = build_detector(ConfigDict(model_dict))
config = _get_config_module('retinanet/retinanet_r50_fpn_1x_coco.py')
model.cfg = config
# test single image
result = inference_detector(model, img1)
assert len(result) == num_class
# test multiple image
result = inference_detector(model, [img1, img2])
assert len(result) == 2 and len(result[0]) == num_class
@pytest.mark.skipif(
not torch.cuda.is_available(), reason='requires CUDA support')
def test_yolox_random_size():
from mmdet.models import build_detector
model = _get_detector_cfg('yolox/yolox_tiny_8x8_300e_coco.py')
model.random_size_range = (2, 2)
model.input_size = (64, 96)
model.random_size_interval = 1
detector = build_detector(model)
input_shape = (1, 3, 64, 64)
mm_inputs = _demo_mm_inputs(input_shape)
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
# Test forward train with non-empty truth batch
detector.train()
gt_bboxes = mm_inputs['gt_bboxes']
gt_labels = mm_inputs['gt_labels']
detector.forward(
imgs,
img_metas,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
return_loss=True)
assert detector._input_size == (64, 96)
| 23,230 | 32.092593 | 110 | py |
DSLA-DSLA | DSLA-DSLA/tests/test_models/test_necks.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from torch.nn.modules.batchnorm import _BatchNorm
from mmdet.models.necks import (FPN, YOLOXPAFPN, ChannelMapper, CTResNetNeck,
DilatedEncoder, SSDNeck, YOLOV3Neck)
def test_fpn():
"""Tests fpn."""
s = 64
in_channels = [8, 16, 32, 64]
feat_sizes = [s // 2**i for i in range(4)] # [64, 32, 16, 8]
out_channels = 8
# `num_outs` is not equal to len(in_channels) - start_level
with pytest.raises(AssertionError):
FPN(in_channels=in_channels,
out_channels=out_channels,
start_level=1,
num_outs=2)
# `end_level` is larger than len(in_channels) - 1
with pytest.raises(AssertionError):
FPN(in_channels=in_channels,
out_channels=out_channels,
start_level=1,
end_level=4,
num_outs=2)
# `num_outs` is not equal to end_level - start_level
with pytest.raises(AssertionError):
FPN(in_channels=in_channels,
out_channels=out_channels,
start_level=1,
end_level=3,
num_outs=1)
# Invalid `add_extra_convs` option
with pytest.raises(AssertionError):
FPN(in_channels=in_channels,
out_channels=out_channels,
start_level=1,
add_extra_convs='on_xxx',
num_outs=5)
fpn_model = FPN(
in_channels=in_channels,
out_channels=out_channels,
start_level=1,
add_extra_convs=True,
num_outs=5)
# FPN expects a multiple levels of features per image
feats = [
torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i])
for i in range(len(in_channels))
]
outs = fpn_model(feats)
assert fpn_model.add_extra_convs == 'on_input'
assert len(outs) == fpn_model.num_outs
for i in range(fpn_model.num_outs):
outs[i].shape[1] == out_channels
outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
# Tests for fpn with no extra convs (pooling is used instead)
fpn_model = FPN(
in_channels=in_channels,
out_channels=out_channels,
start_level=1,
add_extra_convs=False,
num_outs=5)
outs = fpn_model(feats)
assert len(outs) == fpn_model.num_outs
assert not fpn_model.add_extra_convs
for i in range(fpn_model.num_outs):
outs[i].shape[1] == out_channels
outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
# Tests for fpn with lateral bns
fpn_model = FPN(
in_channels=in_channels,
out_channels=out_channels,
start_level=1,
add_extra_convs=True,
no_norm_on_lateral=False,
norm_cfg=dict(type='BN', requires_grad=True),
num_outs=5)
outs = fpn_model(feats)
assert len(outs) == fpn_model.num_outs
assert fpn_model.add_extra_convs == 'on_input'
for i in range(fpn_model.num_outs):
outs[i].shape[1] == out_channels
outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
bn_exist = False
for m in fpn_model.modules():
if isinstance(m, _BatchNorm):
bn_exist = True
assert bn_exist
# Bilinear upsample
fpn_model = FPN(
in_channels=in_channels,
out_channels=out_channels,
start_level=1,
add_extra_convs=True,
upsample_cfg=dict(mode='bilinear', align_corners=True),
num_outs=5)
fpn_model(feats)
outs = fpn_model(feats)
assert len(outs) == fpn_model.num_outs
assert fpn_model.add_extra_convs == 'on_input'
for i in range(fpn_model.num_outs):
outs[i].shape[1] == out_channels
outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
# Scale factor instead of fixed upsample size upsample
fpn_model = FPN(
in_channels=in_channels,
out_channels=out_channels,
start_level=1,
add_extra_convs=True,
upsample_cfg=dict(scale_factor=2),
num_outs=5)
outs = fpn_model(feats)
assert len(outs) == fpn_model.num_outs
for i in range(fpn_model.num_outs):
outs[i].shape[1] == out_channels
outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
# Extra convs source is 'inputs'
fpn_model = FPN(
in_channels=in_channels,
out_channels=out_channels,
add_extra_convs='on_input',
start_level=1,
num_outs=5)
assert fpn_model.add_extra_convs == 'on_input'
outs = fpn_model(feats)
assert len(outs) == fpn_model.num_outs
for i in range(fpn_model.num_outs):
outs[i].shape[1] == out_channels
outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
# Extra convs source is 'laterals'
fpn_model = FPN(
in_channels=in_channels,
out_channels=out_channels,
add_extra_convs='on_lateral',
start_level=1,
num_outs=5)
assert fpn_model.add_extra_convs == 'on_lateral'
outs = fpn_model(feats)
assert len(outs) == fpn_model.num_outs
for i in range(fpn_model.num_outs):
outs[i].shape[1] == out_channels
outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
# Extra convs source is 'outputs'
fpn_model = FPN(
in_channels=in_channels,
out_channels=out_channels,
add_extra_convs='on_output',
start_level=1,
num_outs=5)
assert fpn_model.add_extra_convs == 'on_output'
outs = fpn_model(feats)
assert len(outs) == fpn_model.num_outs
for i in range(fpn_model.num_outs):
outs[i].shape[1] == out_channels
outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
def test_channel_mapper():
"""Tests ChannelMapper."""
s = 64
in_channels = [8, 16, 32, 64]
feat_sizes = [s // 2**i for i in range(4)] # [64, 32, 16, 8]
out_channels = 8
kernel_size = 3
feats = [
torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i])
for i in range(len(in_channels))
]
# in_channels must be a list
with pytest.raises(AssertionError):
channel_mapper = ChannelMapper(
in_channels=10, out_channels=out_channels, kernel_size=kernel_size)
# the length of channel_mapper's inputs must be equal to the length of
# in_channels
with pytest.raises(AssertionError):
channel_mapper = ChannelMapper(
in_channels=in_channels[:-1],
out_channels=out_channels,
kernel_size=kernel_size)
channel_mapper(feats)
channel_mapper = ChannelMapper(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size)
outs = channel_mapper(feats)
assert len(outs) == len(feats)
for i in range(len(feats)):
outs[i].shape[1] == out_channels
outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
def test_dilated_encoder():
in_channels = 16
out_channels = 32
out_shape = 34
dilated_encoder = DilatedEncoder(in_channels, out_channels, 16, 2)
feat = [torch.rand(1, in_channels, 34, 34)]
out_feat = dilated_encoder(feat)[0]
assert out_feat.shape == (1, out_channels, out_shape, out_shape)
def test_ct_resnet_neck():
# num_filters/num_kernels must be a list
with pytest.raises(TypeError):
CTResNetNeck(
in_channel=10, num_deconv_filters=10, num_deconv_kernels=4)
# num_filters/num_kernels must be same length
with pytest.raises(AssertionError):
CTResNetNeck(
in_channel=10,
num_deconv_filters=(10, 10),
num_deconv_kernels=(4, ))
in_channels = 16
num_filters = (8, 8)
num_kernels = (4, 4)
feat = torch.rand(1, 16, 4, 4)
ct_resnet_neck = CTResNetNeck(
in_channel=in_channels,
num_deconv_filters=num_filters,
num_deconv_kernels=num_kernels,
use_dcn=False)
# feat must be list or tuple
with pytest.raises(AssertionError):
ct_resnet_neck(feat)
out_feat = ct_resnet_neck([feat])[0]
assert out_feat.shape == (1, num_filters[-1], 16, 16)
if torch.cuda.is_available():
# test dcn
ct_resnet_neck = CTResNetNeck(
in_channel=in_channels,
num_deconv_filters=num_filters,
num_deconv_kernels=num_kernels)
ct_resnet_neck = ct_resnet_neck.cuda()
feat = feat.cuda()
out_feat = ct_resnet_neck([feat])[0]
assert out_feat.shape == (1, num_filters[-1], 16, 16)
def test_yolov3_neck():
# num_scales, in_channels, out_channels must be same length
with pytest.raises(AssertionError):
YOLOV3Neck(num_scales=3, in_channels=[16, 8, 4], out_channels=[8, 4])
# len(feats) must equal to num_scales
with pytest.raises(AssertionError):
neck = YOLOV3Neck(
num_scales=3, in_channels=[16, 8, 4], out_channels=[8, 4, 2])
feats = (torch.rand(1, 4, 16, 16), torch.rand(1, 8, 16, 16))
neck(feats)
# test normal channels
s = 32
in_channels = [16, 8, 4]
out_channels = [8, 4, 2]
feat_sizes = [s // 2**i for i in range(len(in_channels) - 1, -1, -1)]
feats = [
torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i])
for i in range(len(in_channels) - 1, -1, -1)
]
neck = YOLOV3Neck(
num_scales=3, in_channels=in_channels, out_channels=out_channels)
outs = neck(feats)
assert len(outs) == len(feats)
for i in range(len(outs)):
assert outs[i].shape == \
(1, out_channels[i], feat_sizes[i], feat_sizes[i])
# test more flexible setting
s = 32
in_channels = [32, 8, 16]
out_channels = [19, 21, 5]
feat_sizes = [s // 2**i for i in range(len(in_channels) - 1, -1, -1)]
feats = [
torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i])
for i in range(len(in_channels) - 1, -1, -1)
]
neck = YOLOV3Neck(
num_scales=3, in_channels=in_channels, out_channels=out_channels)
outs = neck(feats)
assert len(outs) == len(feats)
for i in range(len(outs)):
assert outs[i].shape == \
(1, out_channels[i], feat_sizes[i], feat_sizes[i])
def test_ssd_neck():
# level_strides/level_paddings must be same length
with pytest.raises(AssertionError):
SSDNeck(
in_channels=[8, 16],
out_channels=[8, 16, 32],
level_strides=[2],
level_paddings=[2, 1])
# length of out_channels must larger than in_channels
with pytest.raises(AssertionError):
SSDNeck(
in_channels=[8, 16],
out_channels=[8],
level_strides=[2],
level_paddings=[2])
# len(out_channels) - len(in_channels) must equal to len(level_strides)
with pytest.raises(AssertionError):
SSDNeck(
in_channels=[8, 16],
out_channels=[4, 16, 64],
level_strides=[2, 2],
level_paddings=[2, 2])
# in_channels must be same with out_channels[:len(in_channels)]
with pytest.raises(AssertionError):
SSDNeck(
in_channels=[8, 16],
out_channels=[4, 16, 64],
level_strides=[2],
level_paddings=[2])
ssd_neck = SSDNeck(
in_channels=[4],
out_channels=[4, 8, 16],
level_strides=[2, 1],
level_paddings=[1, 0])
feats = (torch.rand(1, 4, 16, 16), )
outs = ssd_neck(feats)
assert outs[0].shape == (1, 4, 16, 16)
assert outs[1].shape == (1, 8, 8, 8)
assert outs[2].shape == (1, 16, 6, 6)
# test SSD-Lite Neck
ssd_neck = SSDNeck(
in_channels=[4, 8],
out_channels=[4, 8, 16],
level_strides=[1],
level_paddings=[1],
l2_norm_scale=None,
use_depthwise=True,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU6'))
assert not hasattr(ssd_neck, 'l2_norm')
from mmcv.cnn.bricks import DepthwiseSeparableConvModule
assert isinstance(ssd_neck.extra_layers[0][-1],
DepthwiseSeparableConvModule)
feats = (torch.rand(1, 4, 8, 8), torch.rand(1, 8, 8, 8))
outs = ssd_neck(feats)
assert outs[0].shape == (1, 4, 8, 8)
assert outs[1].shape == (1, 8, 8, 8)
assert outs[2].shape == (1, 16, 8, 8)
def test_yolox_pafpn():
s = 64
in_channels = [8, 16, 32, 64]
feat_sizes = [s // 2**i for i in range(4)] # [64, 32, 16, 8]
out_channels = 24
feats = [
torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i])
for i in range(len(in_channels))
]
neck = YOLOXPAFPN(in_channels=in_channels, out_channels=out_channels)
outs = neck(feats)
assert len(outs) == len(feats)
for i in range(len(feats)):
assert outs[i].shape[1] == out_channels
assert outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
# test depth-wise
neck = YOLOXPAFPN(
in_channels=in_channels, out_channels=out_channels, use_depthwise=True)
from mmcv.cnn.bricks import DepthwiseSeparableConvModule
assert isinstance(neck.downsamples[0], DepthwiseSeparableConvModule)
outs = neck(feats)
assert len(outs) == len(feats)
for i in range(len(feats)):
assert outs[i].shape[1] == out_channels
assert outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
| 13,273 | 31.614251 | 79 | py |
DSLA-DSLA | DSLA-DSLA/tests/test_models/test_plugins.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.plugins import DropBlock
def test_dropblock():
feat = torch.rand(1, 1, 11, 11)
drop_prob = 1.0
dropblock = DropBlock(drop_prob, block_size=11, warmup_iters=0)
out_feat = dropblock(feat)
assert (out_feat == 0).all() and out_feat.shape == feat.shape
drop_prob = 0.5
dropblock = DropBlock(drop_prob, block_size=5, warmup_iters=0)
out_feat = dropblock(feat)
assert out_feat.shape == feat.shape
# drop_prob must be (0,1]
with pytest.raises(AssertionError):
DropBlock(1.5, 3)
# block_size cannot be an even number
with pytest.raises(AssertionError):
DropBlock(0.5, 2)
# warmup_iters cannot be less than 0
with pytest.raises(AssertionError):
DropBlock(0.5, 3, -1)
| 840 | 27.033333 | 67 | py |
DSLA-DSLA | DSLA-DSLA/tests/test_models/test_loss_compatibility.py | # Copyright (c) OpenMMLab. All rights reserved.
"""pytest tests/test_loss_compatibility.py."""
import copy
from os.path import dirname, exists, join
import numpy as np
import pytest
import torch
def _get_config_directory():
"""Find the predefined detector config directory."""
try:
# Assume we are running in the source mmdetection repo
repo_dpath = dirname(dirname(dirname(__file__)))
except NameError:
# For IPython development when this __file__ is not defined
import mmdet
repo_dpath = dirname(dirname(mmdet.__file__))
config_dpath = join(repo_dpath, 'configs')
if not exists(config_dpath):
raise Exception('Cannot find config path')
return config_dpath
def _get_config_module(fname):
"""Load a configuration as a python module."""
from mmcv import Config
config_dpath = _get_config_directory()
config_fpath = join(config_dpath, fname)
config_mod = Config.fromfile(config_fpath)
return config_mod
def _get_detector_cfg(fname):
"""Grab configs necessary to create a detector.
These are deep copied to allow for safe modification of parameters without
influencing other tests.
"""
config = _get_config_module(fname)
model = copy.deepcopy(config.model)
return model
@pytest.mark.parametrize('loss_bbox', [
dict(type='L1Loss', loss_weight=1.0),
dict(type='GHMR', mu=0.02, bins=10, momentum=0.7, loss_weight=10.0),
dict(type='IoULoss', loss_weight=1.0),
dict(type='BoundedIoULoss', loss_weight=1.0),
dict(type='GIoULoss', loss_weight=1.0),
dict(type='DIoULoss', loss_weight=1.0),
dict(type='CIoULoss', loss_weight=1.0),
dict(type='MSELoss', loss_weight=1.0),
dict(type='SmoothL1Loss', loss_weight=1.0),
dict(type='BalancedL1Loss', loss_weight=1.0)
])
def test_bbox_loss_compatibility(loss_bbox):
"""Test loss_bbox compatibility.
Using Faster R-CNN as a sample, modifying the loss function in the config
file to verify the compatibility of Loss APIS
"""
# Faster R-CNN config dict
config_path = '_base_/models/faster_rcnn_r50_fpn.py'
cfg_model = _get_detector_cfg(config_path)
input_shape = (1, 3, 256, 256)
mm_inputs = _demo_mm_inputs(input_shape, num_items=[10])
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
if 'IoULoss' in loss_bbox['type']:
cfg_model.roi_head.bbox_head.reg_decoded_bbox = True
cfg_model.roi_head.bbox_head.loss_bbox = loss_bbox
from mmdet.models import build_detector
detector = build_detector(cfg_model)
loss = detector.forward(imgs, img_metas, return_loss=True, **mm_inputs)
assert isinstance(loss, dict)
loss, _ = detector._parse_losses(loss)
assert float(loss.item()) > 0
@pytest.mark.parametrize('loss_cls', [
dict(type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
dict(
type='GHMC', bins=30, momentum=0.75, use_sigmoid=True, loss_weight=1.0)
])
def test_cls_loss_compatibility(loss_cls):
"""Test loss_cls compatibility.
Using Faster R-CNN as a sample, modifying the loss function in the config
file to verify the compatibility of Loss APIS
"""
# Faster R-CNN config dict
config_path = '_base_/models/faster_rcnn_r50_fpn.py'
cfg_model = _get_detector_cfg(config_path)
input_shape = (1, 3, 256, 256)
mm_inputs = _demo_mm_inputs(input_shape, num_items=[10])
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
# verify class loss function compatibility
# for loss_cls in loss_clses:
cfg_model.roi_head.bbox_head.loss_cls = loss_cls
from mmdet.models import build_detector
detector = build_detector(cfg_model)
loss = detector.forward(imgs, img_metas, return_loss=True, **mm_inputs)
assert isinstance(loss, dict)
loss, _ = detector._parse_losses(loss)
assert float(loss.item()) > 0
def _demo_mm_inputs(input_shape=(1, 3, 300, 300),
num_items=None, num_classes=10,
with_semantic=False): # yapf: disable
"""Create a superset of inputs needed to run test or train batches.
Args:
input_shape (tuple):
input batch dimensions
num_items (None | List[int]):
specifies the number of boxes in each batch item
num_classes (int):
number of different labels a box might have
"""
from mmdet.core import BitmapMasks
(N, C, H, W) = input_shape
rng = np.random.RandomState(0)
imgs = rng.rand(*input_shape)
img_metas = [{
'img_shape': (H, W, C),
'ori_shape': (H, W, C),
'pad_shape': (H, W, C),
'filename': '<demo>.png',
'scale_factor': np.array([1.1, 1.2, 1.1, 1.2]),
'flip': False,
'flip_direction': None,
} for _ in range(N)]
gt_bboxes = []
gt_labels = []
gt_masks = []
for batch_idx in range(N):
if num_items is None:
num_boxes = rng.randint(1, 10)
else:
num_boxes = num_items[batch_idx]
cx, cy, bw, bh = rng.rand(num_boxes, 4).T
tl_x = ((cx * W) - (W * bw / 2)).clip(0, W)
tl_y = ((cy * H) - (H * bh / 2)).clip(0, H)
br_x = ((cx * W) + (W * bw / 2)).clip(0, W)
br_y = ((cy * H) + (H * bh / 2)).clip(0, H)
boxes = np.vstack([tl_x, tl_y, br_x, br_y]).T
class_idxs = rng.randint(1, num_classes, size=num_boxes)
gt_bboxes.append(torch.FloatTensor(boxes))
gt_labels.append(torch.LongTensor(class_idxs))
mask = np.random.randint(0, 2, (len(boxes), H, W), dtype=np.uint8)
gt_masks.append(BitmapMasks(mask, H, W))
mm_inputs = {
'imgs': torch.FloatTensor(imgs).requires_grad_(True),
'img_metas': img_metas,
'gt_bboxes': gt_bboxes,
'gt_labels': gt_labels,
'gt_bboxes_ignore': None,
'gt_masks': gt_masks,
}
if with_semantic:
# assume gt_semantic_seg using scale 1/8 of the img
gt_semantic_seg = np.random.randint(
0, num_classes, (1, 1, H // 8, W // 8), dtype=np.uint8)
mm_inputs.update(
{'gt_semantic_seg': torch.ByteTensor(gt_semantic_seg)})
return mm_inputs
| 6,361 | 30.49505 | 79 | py |
DSLA-DSLA | DSLA-DSLA/tests/test_models/test_backbones/test_pvt.py | import pytest
import torch
from mmdet.models.backbones.pvt import (PVTEncoderLayer,
PyramidVisionTransformer,
PyramidVisionTransformerV2)
def test_pvt_block():
# test PVT structure and forward
block = PVTEncoderLayer(
embed_dims=64, num_heads=4, feedforward_channels=256)
assert block.ffn.embed_dims == 64
assert block.attn.num_heads == 4
assert block.ffn.feedforward_channels == 256
x = torch.randn(1, 56 * 56, 64)
x_out = block(x, (56, 56))
assert x_out.shape == torch.Size([1, 56 * 56, 64])
def test_pvt():
"""Test PVT backbone."""
with pytest.raises(TypeError):
# Pretrained arg must be str or None.
PyramidVisionTransformer(pretrained=123)
# test pretrained image size
with pytest.raises(AssertionError):
PyramidVisionTransformer(pretrain_img_size=(224, 224, 224))
# Test absolute position embedding
temp = torch.randn((1, 3, 224, 224))
model = PyramidVisionTransformer(
pretrain_img_size=224, use_abs_pos_embed=True)
model.init_weights()
model(temp)
# Test normal inference
temp = torch.randn((1, 3, 32, 32))
model = PyramidVisionTransformer()
outs = model(temp)
assert outs[0].shape == (1, 64, 8, 8)
assert outs[1].shape == (1, 128, 4, 4)
assert outs[2].shape == (1, 320, 2, 2)
assert outs[3].shape == (1, 512, 1, 1)
# Test abnormal inference size
temp = torch.randn((1, 3, 33, 33))
model = PyramidVisionTransformer()
outs = model(temp)
assert outs[0].shape == (1, 64, 8, 8)
assert outs[1].shape == (1, 128, 4, 4)
assert outs[2].shape == (1, 320, 2, 2)
assert outs[3].shape == (1, 512, 1, 1)
# Test abnormal inference size
temp = torch.randn((1, 3, 112, 137))
model = PyramidVisionTransformer()
outs = model(temp)
assert outs[0].shape == (1, 64, 28, 34)
assert outs[1].shape == (1, 128, 14, 17)
assert outs[2].shape == (1, 320, 7, 8)
assert outs[3].shape == (1, 512, 3, 4)
def test_pvtv2():
"""Test PVTv2 backbone."""
with pytest.raises(TypeError):
# Pretrained arg must be str or None.
PyramidVisionTransformerV2(pretrained=123)
# test pretrained image size
with pytest.raises(AssertionError):
PyramidVisionTransformerV2(pretrain_img_size=(224, 224, 224))
# Test normal inference
temp = torch.randn((1, 3, 32, 32))
model = PyramidVisionTransformerV2()
outs = model(temp)
assert outs[0].shape == (1, 64, 8, 8)
assert outs[1].shape == (1, 128, 4, 4)
assert outs[2].shape == (1, 320, 2, 2)
assert outs[3].shape == (1, 512, 1, 1)
# Test abnormal inference size
temp = torch.randn((1, 3, 31, 31))
model = PyramidVisionTransformerV2()
outs = model(temp)
assert outs[0].shape == (1, 64, 8, 8)
assert outs[1].shape == (1, 128, 4, 4)
assert outs[2].shape == (1, 320, 2, 2)
assert outs[3].shape == (1, 512, 1, 1)
# Test abnormal inference size
temp = torch.randn((1, 3, 112, 137))
model = PyramidVisionTransformerV2()
outs = model(temp)
assert outs[0].shape == (1, 64, 28, 35)
assert outs[1].shape == (1, 128, 14, 18)
assert outs[2].shape == (1, 320, 7, 9)
assert outs[3].shape == (1, 512, 4, 5)
| 3,332 | 31.048077 | 69 | py |
DSLA-DSLA | DSLA-DSLA/tests/test_models/test_backbones/test_hourglass.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.backbones.hourglass import HourglassNet
def test_hourglass_backbone():
with pytest.raises(AssertionError):
# HourglassNet's num_stacks should larger than 0
HourglassNet(num_stacks=0)
with pytest.raises(AssertionError):
# len(stage_channels) should equal len(stage_blocks)
HourglassNet(
stage_channels=[256, 256, 384, 384, 384],
stage_blocks=[2, 2, 2, 2, 2, 4])
with pytest.raises(AssertionError):
# len(stage_channels) should lagrer than downsample_times
HourglassNet(
downsample_times=5,
stage_channels=[256, 256, 384, 384, 384],
stage_blocks=[2, 2, 2, 2, 2])
# Test HourglassNet-52
model = HourglassNet(
num_stacks=1,
stage_channels=(64, 64, 96, 96, 96, 128),
feat_channel=64)
model.train()
imgs = torch.randn(1, 3, 256, 256)
feat = model(imgs)
assert len(feat) == 1
assert feat[0].shape == torch.Size([1, 64, 64, 64])
# Test HourglassNet-104
model = HourglassNet(
num_stacks=2,
stage_channels=(64, 64, 96, 96, 96, 128),
feat_channel=64)
model.train()
imgs = torch.randn(1, 3, 256, 256)
feat = model(imgs)
assert len(feat) == 2
assert feat[0].shape == torch.Size([1, 64, 64, 64])
assert feat[1].shape == torch.Size([1, 64, 64, 64])
| 1,464 | 28.3 | 65 | py |
DSLA-DSLA | DSLA-DSLA/tests/test_models/test_backbones/test_res2net.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.backbones import Res2Net
from mmdet.models.backbones.res2net import Bottle2neck
from .utils import is_block
def test_res2net_bottle2neck():
with pytest.raises(AssertionError):
# Style must be in ['pytorch', 'caffe']
Bottle2neck(64, 64, base_width=26, scales=4, style='tensorflow')
with pytest.raises(AssertionError):
# Scale must be larger than 1
Bottle2neck(64, 64, base_width=26, scales=1, style='pytorch')
# Test Res2Net Bottle2neck structure
block = Bottle2neck(
64, 64, base_width=26, stride=2, scales=4, style='pytorch')
assert block.scales == 4
# Test Res2Net Bottle2neck with DCN
dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)
with pytest.raises(AssertionError):
# conv_cfg must be None if dcn is not None
Bottle2neck(
64,
64,
base_width=26,
scales=4,
dcn=dcn,
conv_cfg=dict(type='Conv'))
Bottle2neck(64, 64, dcn=dcn)
# Test Res2Net Bottle2neck forward
block = Bottle2neck(64, 16, base_width=26, scales=4)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
def test_res2net_backbone():
with pytest.raises(KeyError):
# Res2Net depth should be in [50, 101, 152]
Res2Net(depth=18)
# Test Res2Net with scales 4, base_width 26
model = Res2Net(depth=50, scales=4, base_width=26)
for m in model.modules():
if is_block(m):
assert m.scales == 4
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 256, 8, 8])
assert feat[1].shape == torch.Size([1, 512, 4, 4])
assert feat[2].shape == torch.Size([1, 1024, 2, 2])
assert feat[3].shape == torch.Size([1, 2048, 1, 1])
| 1,976 | 30.380952 | 72 | py |
DSLA-DSLA | DSLA-DSLA/tests/test_models/test_backbones/test_swin.py | import pytest
import torch
from mmdet.models.backbones.swin import SwinBlock, SwinTransformer
def test_swin_block():
# test SwinBlock structure and forward
block = SwinBlock(embed_dims=64, num_heads=4, feedforward_channels=256)
assert block.ffn.embed_dims == 64
assert block.attn.w_msa.num_heads == 4
assert block.ffn.feedforward_channels == 256
x = torch.randn(1, 56 * 56, 64)
x_out = block(x, (56, 56))
assert x_out.shape == torch.Size([1, 56 * 56, 64])
# Test BasicBlock with checkpoint forward
block = SwinBlock(
embed_dims=64, num_heads=4, feedforward_channels=256, with_cp=True)
assert block.with_cp
x = torch.randn(1, 56 * 56, 64)
x_out = block(x, (56, 56))
assert x_out.shape == torch.Size([1, 56 * 56, 64])
def test_swin_transformer():
"""Test Swin Transformer backbone."""
with pytest.raises(TypeError):
# Pretrained arg must be str or None.
SwinTransformer(pretrained=123)
with pytest.raises(AssertionError):
# Because swin uses non-overlapping patch embed, so the stride of patch
# embed must be equal to patch size.
SwinTransformer(strides=(2, 2, 2, 2), patch_size=4)
# test pretrained image size
with pytest.raises(AssertionError):
SwinTransformer(pretrain_img_size=(224, 224, 224))
# Test absolute position embedding
temp = torch.randn((1, 3, 224, 224))
model = SwinTransformer(pretrain_img_size=224, use_abs_pos_embed=True)
model.init_weights()
model(temp)
# Test patch norm
model = SwinTransformer(patch_norm=False)
model(temp)
# Test normal inference
temp = torch.randn((1, 3, 32, 32))
model = SwinTransformer()
outs = model(temp)
assert outs[0].shape == (1, 96, 8, 8)
assert outs[1].shape == (1, 192, 4, 4)
assert outs[2].shape == (1, 384, 2, 2)
assert outs[3].shape == (1, 768, 1, 1)
# Test abnormal inference size
temp = torch.randn((1, 3, 31, 31))
model = SwinTransformer()
outs = model(temp)
assert outs[0].shape == (1, 96, 8, 8)
assert outs[1].shape == (1, 192, 4, 4)
assert outs[2].shape == (1, 384, 2, 2)
assert outs[3].shape == (1, 768, 1, 1)
# Test abnormal inference size
temp = torch.randn((1, 3, 112, 137))
model = SwinTransformer()
outs = model(temp)
assert outs[0].shape == (1, 96, 28, 35)
assert outs[1].shape == (1, 192, 14, 18)
assert outs[2].shape == (1, 384, 7, 9)
assert outs[3].shape == (1, 768, 4, 5)
model = SwinTransformer(frozen_stages=4)
model.train()
for p in model.parameters():
assert not p.requires_grad
| 2,648 | 30.915663 | 79 | py |
DSLA-DSLA | DSLA-DSLA/tests/test_models/test_backbones/test_resnet.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmcv import assert_params_all_zeros
from mmcv.ops import DeformConv2dPack
from torch.nn.modules import AvgPool2d, GroupNorm
from torch.nn.modules.batchnorm import _BatchNorm
from mmdet.models.backbones import ResNet, ResNetV1d
from mmdet.models.backbones.resnet import BasicBlock, Bottleneck
from mmdet.models.utils import ResLayer, SimplifiedBasicBlock
from .utils import check_norm_state, is_block, is_norm
def test_resnet_basic_block():
with pytest.raises(AssertionError):
# Not implemented yet.
dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)
BasicBlock(64, 64, dcn=dcn)
with pytest.raises(AssertionError):
# Not implemented yet.
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv3')
]
BasicBlock(64, 64, plugins=plugins)
with pytest.raises(AssertionError):
# Not implemented yet
plugins = [
dict(
cfg=dict(
type='GeneralizedAttention',
spatial_range=-1,
num_heads=8,
attention_type='0010',
kv_stride=2),
position='after_conv2')
]
BasicBlock(64, 64, plugins=plugins)
# test BasicBlock structure and forward
block = BasicBlock(64, 64)
assert block.conv1.in_channels == 64
assert block.conv1.out_channels == 64
assert block.conv1.kernel_size == (3, 3)
assert block.conv2.in_channels == 64
assert block.conv2.out_channels == 64
assert block.conv2.kernel_size == (3, 3)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
# Test BasicBlock with checkpoint forward
block = BasicBlock(64, 64, with_cp=True)
assert block.with_cp
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
def test_resnet_bottleneck():
with pytest.raises(AssertionError):
# Style must be in ['pytorch', 'caffe']
Bottleneck(64, 64, style='tensorflow')
with pytest.raises(AssertionError):
# Allowed positions are 'after_conv1', 'after_conv2', 'after_conv3'
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv4')
]
Bottleneck(64, 16, plugins=plugins)
with pytest.raises(AssertionError):
# Need to specify different postfix to avoid duplicate plugin name
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv3'),
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv3')
]
Bottleneck(64, 16, plugins=plugins)
with pytest.raises(KeyError):
# Plugin type is not supported
plugins = [dict(cfg=dict(type='WrongPlugin'), position='after_conv3')]
Bottleneck(64, 16, plugins=plugins)
# Test Bottleneck with checkpoint forward
block = Bottleneck(64, 16, with_cp=True)
assert block.with_cp
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
# Test Bottleneck style
block = Bottleneck(64, 64, stride=2, style='pytorch')
assert block.conv1.stride == (1, 1)
assert block.conv2.stride == (2, 2)
block = Bottleneck(64, 64, stride=2, style='caffe')
assert block.conv1.stride == (2, 2)
assert block.conv2.stride == (1, 1)
# Test Bottleneck DCN
dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)
with pytest.raises(AssertionError):
Bottleneck(64, 64, dcn=dcn, conv_cfg=dict(type='Conv'))
block = Bottleneck(64, 64, dcn=dcn)
assert isinstance(block.conv2, DeformConv2dPack)
# Test Bottleneck forward
block = Bottleneck(64, 16)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
# Test Bottleneck with 1 ContextBlock after conv3
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv3')
]
block = Bottleneck(64, 16, plugins=plugins)
assert block.context_block.in_channels == 64
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
# Test Bottleneck with 1 GeneralizedAttention after conv2
plugins = [
dict(
cfg=dict(
type='GeneralizedAttention',
spatial_range=-1,
num_heads=8,
attention_type='0010',
kv_stride=2),
position='after_conv2')
]
block = Bottleneck(64, 16, plugins=plugins)
assert block.gen_attention_block.in_channels == 16
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
# Test Bottleneck with 1 GeneralizedAttention after conv2, 1 NonLocal2D
# after conv2, 1 ContextBlock after conv3
plugins = [
dict(
cfg=dict(
type='GeneralizedAttention',
spatial_range=-1,
num_heads=8,
attention_type='0010',
kv_stride=2),
position='after_conv2'),
dict(cfg=dict(type='NonLocal2d'), position='after_conv2'),
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv3')
]
block = Bottleneck(64, 16, plugins=plugins)
assert block.gen_attention_block.in_channels == 16
assert block.nonlocal_block.in_channels == 16
assert block.context_block.in_channels == 64
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
# Test Bottleneck with 1 ContextBlock after conv2, 2 ContextBlock after
# conv3
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=1),
position='after_conv2'),
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=2),
position='after_conv3'),
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=3),
position='after_conv3')
]
block = Bottleneck(64, 16, plugins=plugins)
assert block.context_block1.in_channels == 16
assert block.context_block2.in_channels == 64
assert block.context_block3.in_channels == 64
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
def test_simplied_basic_block():
with pytest.raises(AssertionError):
# Not implemented yet.
dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)
SimplifiedBasicBlock(64, 64, dcn=dcn)
with pytest.raises(AssertionError):
# Not implemented yet.
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv3')
]
SimplifiedBasicBlock(64, 64, plugins=plugins)
with pytest.raises(AssertionError):
# Not implemented yet
plugins = [
dict(
cfg=dict(
type='GeneralizedAttention',
spatial_range=-1,
num_heads=8,
attention_type='0010',
kv_stride=2),
position='after_conv2')
]
SimplifiedBasicBlock(64, 64, plugins=plugins)
with pytest.raises(AssertionError):
# Not implemented yet
SimplifiedBasicBlock(64, 64, with_cp=True)
# test SimplifiedBasicBlock structure and forward
block = SimplifiedBasicBlock(64, 64)
assert block.conv1.in_channels == 64
assert block.conv1.out_channels == 64
assert block.conv1.kernel_size == (3, 3)
assert block.conv2.in_channels == 64
assert block.conv2.out_channels == 64
assert block.conv2.kernel_size == (3, 3)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
# test SimplifiedBasicBlock without norm
block = SimplifiedBasicBlock(64, 64, norm_cfg=None)
assert block.norm1 is None
assert block.norm2 is None
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
def test_resnet_res_layer():
# Test ResLayer of 3 Bottleneck w\o downsample
layer = ResLayer(Bottleneck, 64, 16, 3)
assert len(layer) == 3
assert layer[0].conv1.in_channels == 64
assert layer[0].conv1.out_channels == 16
for i in range(1, len(layer)):
assert layer[i].conv1.in_channels == 64
assert layer[i].conv1.out_channels == 16
for i in range(len(layer)):
assert layer[i].downsample is None
x = torch.randn(1, 64, 56, 56)
x_out = layer(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
# Test ResLayer of 3 Bottleneck with downsample
layer = ResLayer(Bottleneck, 64, 64, 3)
assert layer[0].downsample[0].out_channels == 256
for i in range(1, len(layer)):
assert layer[i].downsample is None
x = torch.randn(1, 64, 56, 56)
x_out = layer(x)
assert x_out.shape == torch.Size([1, 256, 56, 56])
# Test ResLayer of 3 Bottleneck with stride=2
layer = ResLayer(Bottleneck, 64, 64, 3, stride=2)
assert layer[0].downsample[0].out_channels == 256
assert layer[0].downsample[0].stride == (2, 2)
for i in range(1, len(layer)):
assert layer[i].downsample is None
x = torch.randn(1, 64, 56, 56)
x_out = layer(x)
assert x_out.shape == torch.Size([1, 256, 28, 28])
# Test ResLayer of 3 Bottleneck with stride=2 and average downsample
layer = ResLayer(Bottleneck, 64, 64, 3, stride=2, avg_down=True)
assert isinstance(layer[0].downsample[0], AvgPool2d)
assert layer[0].downsample[1].out_channels == 256
assert layer[0].downsample[1].stride == (1, 1)
for i in range(1, len(layer)):
assert layer[i].downsample is None
x = torch.randn(1, 64, 56, 56)
x_out = layer(x)
assert x_out.shape == torch.Size([1, 256, 28, 28])
# Test ResLayer of 3 BasicBlock with stride=2 and downsample_first=False
layer = ResLayer(BasicBlock, 64, 64, 3, stride=2, downsample_first=False)
assert layer[2].downsample[0].out_channels == 64
assert layer[2].downsample[0].stride == (2, 2)
for i in range(len(layer) - 1):
assert layer[i].downsample is None
x = torch.randn(1, 64, 56, 56)
x_out = layer(x)
assert x_out.shape == torch.Size([1, 64, 28, 28])
def test_resnest_stem():
# Test default stem_channels
model = ResNet(50)
assert model.stem_channels == 64
assert model.conv1.out_channels == 64
assert model.norm1.num_features == 64
# Test default stem_channels, with base_channels=3
model = ResNet(50, base_channels=3)
assert model.stem_channels == 3
assert model.conv1.out_channels == 3
assert model.norm1.num_features == 3
assert model.layer1[0].conv1.in_channels == 3
# Test stem_channels=3
model = ResNet(50, stem_channels=3)
assert model.stem_channels == 3
assert model.conv1.out_channels == 3
assert model.norm1.num_features == 3
assert model.layer1[0].conv1.in_channels == 3
# Test stem_channels=3, with base_channels=2
model = ResNet(50, stem_channels=3, base_channels=2)
assert model.stem_channels == 3
assert model.conv1.out_channels == 3
assert model.norm1.num_features == 3
assert model.layer1[0].conv1.in_channels == 3
# Test V1d stem_channels
model = ResNetV1d(depth=50, stem_channels=6)
model.train()
assert model.stem[0].out_channels == 3
assert model.stem[1].num_features == 3
assert model.stem[3].out_channels == 3
assert model.stem[4].num_features == 3
assert model.stem[6].out_channels == 6
assert model.stem[7].num_features == 6
assert model.layer1[0].conv1.in_channels == 6
def test_resnet_backbone():
"""Test resnet backbone."""
with pytest.raises(KeyError):
# ResNet depth should be in [18, 34, 50, 101, 152]
ResNet(20)
with pytest.raises(AssertionError):
# In ResNet: 1 <= num_stages <= 4
ResNet(50, num_stages=0)
with pytest.raises(AssertionError):
# len(stage_with_dcn) == num_stages
dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)
ResNet(50, dcn=dcn, stage_with_dcn=(True, ))
with pytest.raises(AssertionError):
# len(stage_with_plugin) == num_stages
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
stages=(False, True, True),
position='after_conv3')
]
ResNet(50, plugins=plugins)
with pytest.raises(AssertionError):
# In ResNet: 1 <= num_stages <= 4
ResNet(50, num_stages=5)
with pytest.raises(AssertionError):
# len(strides) == len(dilations) == num_stages
ResNet(50, strides=(1, ), dilations=(1, 1), num_stages=3)
with pytest.raises(TypeError):
# pretrained must be a string path
model = ResNet(50, pretrained=0)
with pytest.raises(AssertionError):
# Style must be in ['pytorch', 'caffe']
ResNet(50, style='tensorflow')
# Test ResNet50 norm_eval=True
model = ResNet(50, norm_eval=True, base_channels=1)
model.train()
assert check_norm_state(model.modules(), False)
# Test ResNet50 with torchvision pretrained weight
model = ResNet(
depth=50, norm_eval=True, pretrained='torchvision://resnet50')
model.train()
assert check_norm_state(model.modules(), False)
# Test ResNet50 with first stage frozen
frozen_stages = 1
model = ResNet(50, frozen_stages=frozen_stages, base_channels=1)
model.train()
assert model.norm1.training is False
for layer in [model.conv1, model.norm1]:
for param in layer.parameters():
assert param.requires_grad is False
for i in range(1, frozen_stages + 1):
layer = getattr(model, f'layer{i}')
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert mod.training is False
for param in layer.parameters():
assert param.requires_grad is False
# Test ResNet50V1d with first stage frozen
model = ResNetV1d(depth=50, frozen_stages=frozen_stages, base_channels=2)
assert len(model.stem) == 9
model.train()
assert check_norm_state(model.stem, False)
for param in model.stem.parameters():
assert param.requires_grad is False
for i in range(1, frozen_stages + 1):
layer = getattr(model, f'layer{i}')
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert mod.training is False
for param in layer.parameters():
assert param.requires_grad is False
# Test ResNet18 forward
model = ResNet(18)
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 64, 8, 8])
assert feat[1].shape == torch.Size([1, 128, 4, 4])
assert feat[2].shape == torch.Size([1, 256, 2, 2])
assert feat[3].shape == torch.Size([1, 512, 1, 1])
# Test ResNet18 with checkpoint forward
model = ResNet(18, with_cp=True)
for m in model.modules():
if is_block(m):
assert m.with_cp
# Test ResNet50 with BatchNorm forward
model = ResNet(50, base_channels=1)
for m in model.modules():
if is_norm(m):
assert isinstance(m, _BatchNorm)
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 4, 8, 8])
assert feat[1].shape == torch.Size([1, 8, 4, 4])
assert feat[2].shape == torch.Size([1, 16, 2, 2])
assert feat[3].shape == torch.Size([1, 32, 1, 1])
# Test ResNet50 with layers 1, 2, 3 out forward
model = ResNet(50, out_indices=(0, 1, 2), base_channels=1)
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 3
assert feat[0].shape == torch.Size([1, 4, 8, 8])
assert feat[1].shape == torch.Size([1, 8, 4, 4])
assert feat[2].shape == torch.Size([1, 16, 2, 2])
# Test ResNet50 with checkpoint forward
model = ResNet(50, with_cp=True, base_channels=1)
for m in model.modules():
if is_block(m):
assert m.with_cp
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 4, 8, 8])
assert feat[1].shape == torch.Size([1, 8, 4, 4])
assert feat[2].shape == torch.Size([1, 16, 2, 2])
assert feat[3].shape == torch.Size([1, 32, 1, 1])
# Test ResNet50 with GroupNorm forward
model = ResNet(
50,
base_channels=4,
norm_cfg=dict(type='GN', num_groups=2, requires_grad=True))
for m in model.modules():
if is_norm(m):
assert isinstance(m, GroupNorm)
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 16, 8, 8])
assert feat[1].shape == torch.Size([1, 32, 4, 4])
assert feat[2].shape == torch.Size([1, 64, 2, 2])
assert feat[3].shape == torch.Size([1, 128, 1, 1])
# Test ResNet50 with 1 GeneralizedAttention after conv2, 1 NonLocal2D
# after conv2, 1 ContextBlock after conv3 in layers 2, 3, 4
plugins = [
dict(
cfg=dict(
type='GeneralizedAttention',
spatial_range=-1,
num_heads=8,
attention_type='0010',
kv_stride=2),
stages=(False, True, True, True),
position='after_conv2'),
dict(cfg=dict(type='NonLocal2d'), position='after_conv2'),
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
stages=(False, True, True, False),
position='after_conv3')
]
model = ResNet(50, plugins=plugins, base_channels=8)
for m in model.layer1.modules():
if is_block(m):
assert not hasattr(m, 'context_block')
assert not hasattr(m, 'gen_attention_block')
assert m.nonlocal_block.in_channels == 8
for m in model.layer2.modules():
if is_block(m):
assert m.nonlocal_block.in_channels == 16
assert m.gen_attention_block.in_channels == 16
assert m.context_block.in_channels == 64
for m in model.layer3.modules():
if is_block(m):
assert m.nonlocal_block.in_channels == 32
assert m.gen_attention_block.in_channels == 32
assert m.context_block.in_channels == 128
for m in model.layer4.modules():
if is_block(m):
assert m.nonlocal_block.in_channels == 64
assert m.gen_attention_block.in_channels == 64
assert not hasattr(m, 'context_block')
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 32, 8, 8])
assert feat[1].shape == torch.Size([1, 64, 4, 4])
assert feat[2].shape == torch.Size([1, 128, 2, 2])
assert feat[3].shape == torch.Size([1, 256, 1, 1])
# Test ResNet50 with 1 ContextBlock after conv2, 1 ContextBlock after
# conv3 in layers 2, 3, 4
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=1),
stages=(False, True, True, False),
position='after_conv3'),
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=2),
stages=(False, True, True, False),
position='after_conv3')
]
model = ResNet(50, plugins=plugins, base_channels=8)
for m in model.layer1.modules():
if is_block(m):
assert not hasattr(m, 'context_block')
assert not hasattr(m, 'context_block1')
assert not hasattr(m, 'context_block2')
for m in model.layer2.modules():
if is_block(m):
assert not hasattr(m, 'context_block')
assert m.context_block1.in_channels == 64
assert m.context_block2.in_channels == 64
for m in model.layer3.modules():
if is_block(m):
assert not hasattr(m, 'context_block')
assert m.context_block1.in_channels == 128
assert m.context_block2.in_channels == 128
for m in model.layer4.modules():
if is_block(m):
assert not hasattr(m, 'context_block')
assert not hasattr(m, 'context_block1')
assert not hasattr(m, 'context_block2')
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 32, 8, 8])
assert feat[1].shape == torch.Size([1, 64, 4, 4])
assert feat[2].shape == torch.Size([1, 128, 2, 2])
assert feat[3].shape == torch.Size([1, 256, 1, 1])
# Test ResNet50 zero initialization of residual
model = ResNet(50, zero_init_residual=True, base_channels=1)
model.init_weights()
for m in model.modules():
if isinstance(m, Bottleneck):
assert assert_params_all_zeros(m.norm3)
elif isinstance(m, BasicBlock):
assert assert_params_all_zeros(m.norm2)
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 4, 8, 8])
assert feat[1].shape == torch.Size([1, 8, 4, 4])
assert feat[2].shape == torch.Size([1, 16, 2, 2])
assert feat[3].shape == torch.Size([1, 32, 1, 1])
# Test ResNetV1d forward
model = ResNetV1d(depth=50, base_channels=2)
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 8, 8, 8])
assert feat[1].shape == torch.Size([1, 16, 4, 4])
assert feat[2].shape == torch.Size([1, 32, 2, 2])
assert feat[3].shape == torch.Size([1, 64, 1, 1])
| 22,380 | 34.35703 | 78 | py |
DSLA-DSLA | DSLA-DSLA/tests/test_models/test_backbones/utils.py | # Copyright (c) OpenMMLab. All rights reserved.
from torch.nn.modules import GroupNorm
from torch.nn.modules.batchnorm import _BatchNorm
from mmdet.models.backbones.res2net import Bottle2neck
from mmdet.models.backbones.resnet import BasicBlock, Bottleneck
from mmdet.models.backbones.resnext import Bottleneck as BottleneckX
from mmdet.models.utils import SimplifiedBasicBlock
def is_block(modules):
"""Check if is ResNet building block."""
if isinstance(modules, (BasicBlock, Bottleneck, BottleneckX, Bottle2neck,
SimplifiedBasicBlock)):
return True
return False
def is_norm(modules):
"""Check if is one of the norms."""
if isinstance(modules, (GroupNorm, _BatchNorm)):
return True
return False
def check_norm_state(modules, train_state):
"""Check if norm layer is in correct train state."""
for mod in modules:
if isinstance(mod, _BatchNorm):
if mod.training != train_state:
return False
return True
| 1,026 | 30.121212 | 77 | py |
DSLA-DSLA | DSLA-DSLA/tests/test_models/test_backbones/test_mobilenet_v2.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from torch.nn.modules import GroupNorm
from torch.nn.modules.batchnorm import _BatchNorm
from mmdet.models.backbones.mobilenet_v2 import MobileNetV2
from .utils import check_norm_state, is_block, is_norm
def test_mobilenetv2_backbone():
with pytest.raises(ValueError):
# frozen_stages must in range(-1, 8)
MobileNetV2(frozen_stages=8)
with pytest.raises(ValueError):
# out_indices in range(-1, 8)
MobileNetV2(out_indices=[8])
# Test MobileNetV2 with first stage frozen
frozen_stages = 1
model = MobileNetV2(frozen_stages=frozen_stages)
model.train()
for mod in model.conv1.modules():
for param in mod.parameters():
assert param.requires_grad is False
for i in range(1, frozen_stages + 1):
layer = getattr(model, f'layer{i}')
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert mod.training is False
for param in layer.parameters():
assert param.requires_grad is False
# Test MobileNetV2 with norm_eval=True
model = MobileNetV2(norm_eval=True)
model.train()
assert check_norm_state(model.modules(), False)
# Test MobileNetV2 forward with widen_factor=1.0
model = MobileNetV2(widen_factor=1.0, out_indices=range(0, 8))
model.train()
assert check_norm_state(model.modules(), True)
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 8
assert feat[0].shape == torch.Size((1, 16, 112, 112))
assert feat[1].shape == torch.Size((1, 24, 56, 56))
assert feat[2].shape == torch.Size((1, 32, 28, 28))
assert feat[3].shape == torch.Size((1, 64, 14, 14))
assert feat[4].shape == torch.Size((1, 96, 14, 14))
assert feat[5].shape == torch.Size((1, 160, 7, 7))
assert feat[6].shape == torch.Size((1, 320, 7, 7))
assert feat[7].shape == torch.Size((1, 1280, 7, 7))
# Test MobileNetV2 forward with widen_factor=0.5
model = MobileNetV2(widen_factor=0.5, out_indices=range(0, 7))
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 7
assert feat[0].shape == torch.Size((1, 8, 112, 112))
assert feat[1].shape == torch.Size((1, 16, 56, 56))
assert feat[2].shape == torch.Size((1, 16, 28, 28))
assert feat[3].shape == torch.Size((1, 32, 14, 14))
assert feat[4].shape == torch.Size((1, 48, 14, 14))
assert feat[5].shape == torch.Size((1, 80, 7, 7))
assert feat[6].shape == torch.Size((1, 160, 7, 7))
# Test MobileNetV2 forward with widen_factor=2.0
model = MobileNetV2(widen_factor=2.0, out_indices=range(0, 8))
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert feat[0].shape == torch.Size((1, 32, 112, 112))
assert feat[1].shape == torch.Size((1, 48, 56, 56))
assert feat[2].shape == torch.Size((1, 64, 28, 28))
assert feat[3].shape == torch.Size((1, 128, 14, 14))
assert feat[4].shape == torch.Size((1, 192, 14, 14))
assert feat[5].shape == torch.Size((1, 320, 7, 7))
assert feat[6].shape == torch.Size((1, 640, 7, 7))
assert feat[7].shape == torch.Size((1, 2560, 7, 7))
# Test MobileNetV2 forward with dict(type='ReLU')
model = MobileNetV2(
widen_factor=1.0, act_cfg=dict(type='ReLU'), out_indices=range(0, 7))
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 7
assert feat[0].shape == torch.Size((1, 16, 112, 112))
assert feat[1].shape == torch.Size((1, 24, 56, 56))
assert feat[2].shape == torch.Size((1, 32, 28, 28))
assert feat[3].shape == torch.Size((1, 64, 14, 14))
assert feat[4].shape == torch.Size((1, 96, 14, 14))
assert feat[5].shape == torch.Size((1, 160, 7, 7))
assert feat[6].shape == torch.Size((1, 320, 7, 7))
# Test MobileNetV2 with BatchNorm forward
model = MobileNetV2(widen_factor=1.0, out_indices=range(0, 7))
for m in model.modules():
if is_norm(m):
assert isinstance(m, _BatchNorm)
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 7
assert feat[0].shape == torch.Size((1, 16, 112, 112))
assert feat[1].shape == torch.Size((1, 24, 56, 56))
assert feat[2].shape == torch.Size((1, 32, 28, 28))
assert feat[3].shape == torch.Size((1, 64, 14, 14))
assert feat[4].shape == torch.Size((1, 96, 14, 14))
assert feat[5].shape == torch.Size((1, 160, 7, 7))
assert feat[6].shape == torch.Size((1, 320, 7, 7))
# Test MobileNetV2 with GroupNorm forward
model = MobileNetV2(
widen_factor=1.0,
norm_cfg=dict(type='GN', num_groups=2, requires_grad=True),
out_indices=range(0, 7))
for m in model.modules():
if is_norm(m):
assert isinstance(m, GroupNorm)
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 7
assert feat[0].shape == torch.Size((1, 16, 112, 112))
assert feat[1].shape == torch.Size((1, 24, 56, 56))
assert feat[2].shape == torch.Size((1, 32, 28, 28))
assert feat[3].shape == torch.Size((1, 64, 14, 14))
assert feat[4].shape == torch.Size((1, 96, 14, 14))
assert feat[5].shape == torch.Size((1, 160, 7, 7))
assert feat[6].shape == torch.Size((1, 320, 7, 7))
# Test MobileNetV2 with layers 1, 3, 5 out forward
model = MobileNetV2(widen_factor=1.0, out_indices=(0, 2, 4))
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 3
assert feat[0].shape == torch.Size((1, 16, 112, 112))
assert feat[1].shape == torch.Size((1, 32, 28, 28))
assert feat[2].shape == torch.Size((1, 96, 14, 14))
# Test MobileNetV2 with checkpoint forward
model = MobileNetV2(
widen_factor=1.0, with_cp=True, out_indices=range(0, 7))
for m in model.modules():
if is_block(m):
assert m.with_cp
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 7
assert feat[0].shape == torch.Size((1, 16, 112, 112))
assert feat[1].shape == torch.Size((1, 24, 56, 56))
assert feat[2].shape == torch.Size((1, 32, 28, 28))
assert feat[3].shape == torch.Size((1, 64, 14, 14))
assert feat[4].shape == torch.Size((1, 96, 14, 14))
assert feat[5].shape == torch.Size((1, 160, 7, 7))
assert feat[6].shape == torch.Size((1, 320, 7, 7))
| 6,546 | 36.626437 | 77 | py |
DSLA-DSLA | DSLA-DSLA/tests/test_models/test_backbones/test_hrnet.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.backbones.hrnet import HRModule, HRNet
from mmdet.models.backbones.resnet import BasicBlock, Bottleneck
@pytest.mark.parametrize('block', [BasicBlock, Bottleneck])
def test_hrmodule(block):
# Test multiscale forward
num_channles = (32, 64)
in_channels = [c * block.expansion for c in num_channles]
hrmodule = HRModule(
num_branches=2,
blocks=block,
in_channels=in_channels,
num_blocks=(4, 4),
num_channels=num_channles,
)
feats = [
torch.randn(1, in_channels[0], 64, 64),
torch.randn(1, in_channels[1], 32, 32)
]
feats = hrmodule(feats)
assert len(feats) == 2
assert feats[0].shape == torch.Size([1, in_channels[0], 64, 64])
assert feats[1].shape == torch.Size([1, in_channels[1], 32, 32])
# Test single scale forward
num_channles = (32, 64)
in_channels = [c * block.expansion for c in num_channles]
hrmodule = HRModule(
num_branches=2,
blocks=block,
in_channels=in_channels,
num_blocks=(4, 4),
num_channels=num_channles,
multiscale_output=False,
)
feats = [
torch.randn(1, in_channels[0], 64, 64),
torch.randn(1, in_channels[1], 32, 32)
]
feats = hrmodule(feats)
assert len(feats) == 1
assert feats[0].shape == torch.Size([1, in_channels[0], 64, 64])
def test_hrnet_backbone():
# only have 3 stages
extra = dict(
stage1=dict(
num_modules=1,
num_branches=1,
block='BOTTLENECK',
num_blocks=(4, ),
num_channels=(64, )),
stage2=dict(
num_modules=1,
num_branches=2,
block='BASIC',
num_blocks=(4, 4),
num_channels=(32, 64)),
stage3=dict(
num_modules=4,
num_branches=3,
block='BASIC',
num_blocks=(4, 4, 4),
num_channels=(32, 64, 128)))
with pytest.raises(AssertionError):
# HRNet now only support 4 stages
HRNet(extra=extra)
extra['stage4'] = dict(
num_modules=3,
num_branches=3, # should be 4
block='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(32, 64, 128, 256))
with pytest.raises(AssertionError):
# len(num_blocks) should equal num_branches
HRNet(extra=extra)
extra['stage4']['num_branches'] = 4
# Test hrnetv2p_w32
model = HRNet(extra=extra)
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 256, 256)
feats = model(imgs)
assert len(feats) == 4
assert feats[0].shape == torch.Size([1, 32, 64, 64])
assert feats[3].shape == torch.Size([1, 256, 8, 8])
# Test single scale output
model = HRNet(extra=extra, multiscale_output=False)
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 256, 256)
feats = model(imgs)
assert len(feats) == 1
assert feats[0].shape == torch.Size([1, 32, 64, 64])
| 3,089 | 26.589286 | 68 | py |
DSLA-DSLA | DSLA-DSLA/tests/test_models/test_backbones/test_csp_darknet.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from torch.nn.modules.batchnorm import _BatchNorm
from mmdet.models.backbones.csp_darknet import CSPDarknet
from .utils import check_norm_state, is_norm
def test_csp_darknet_backbone():
with pytest.raises(ValueError):
# frozen_stages must in range(-1, len(arch_setting) + 1)
CSPDarknet(frozen_stages=6)
with pytest.raises(AssertionError):
# out_indices in range(len(arch_setting) + 1)
CSPDarknet(out_indices=[6])
# Test CSPDarknet with first stage frozen
frozen_stages = 1
model = CSPDarknet(frozen_stages=frozen_stages)
model.train()
for mod in model.stem.modules():
for param in mod.parameters():
assert param.requires_grad is False
for i in range(1, frozen_stages + 1):
layer = getattr(model, f'stage{i}')
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert mod.training is False
for param in layer.parameters():
assert param.requires_grad is False
# Test CSPDarknet with norm_eval=True
model = CSPDarknet(norm_eval=True)
model.train()
assert check_norm_state(model.modules(), False)
# Test CSPDarknet-P5 forward with widen_factor=0.5
model = CSPDarknet(arch='P5', widen_factor=0.25, out_indices=range(0, 5))
model.train()
imgs = torch.randn(1, 3, 64, 64)
feat = model(imgs)
assert len(feat) == 5
assert feat[0].shape == torch.Size((1, 16, 32, 32))
assert feat[1].shape == torch.Size((1, 32, 16, 16))
assert feat[2].shape == torch.Size((1, 64, 8, 8))
assert feat[3].shape == torch.Size((1, 128, 4, 4))
assert feat[4].shape == torch.Size((1, 256, 2, 2))
# Test CSPDarknet-P6 forward with widen_factor=0.5
model = CSPDarknet(
arch='P6',
widen_factor=0.25,
out_indices=range(0, 6),
spp_kernal_sizes=(3, 5, 7))
model.train()
imgs = torch.randn(1, 3, 128, 128)
feat = model(imgs)
assert feat[0].shape == torch.Size((1, 16, 64, 64))
assert feat[1].shape == torch.Size((1, 32, 32, 32))
assert feat[2].shape == torch.Size((1, 64, 16, 16))
assert feat[3].shape == torch.Size((1, 128, 8, 8))
assert feat[4].shape == torch.Size((1, 192, 4, 4))
assert feat[5].shape == torch.Size((1, 256, 2, 2))
# Test CSPDarknet forward with dict(type='ReLU')
model = CSPDarknet(
widen_factor=0.125, act_cfg=dict(type='ReLU'), out_indices=range(0, 5))
model.train()
imgs = torch.randn(1, 3, 64, 64)
feat = model(imgs)
assert len(feat) == 5
assert feat[0].shape == torch.Size((1, 8, 32, 32))
assert feat[1].shape == torch.Size((1, 16, 16, 16))
assert feat[2].shape == torch.Size((1, 32, 8, 8))
assert feat[3].shape == torch.Size((1, 64, 4, 4))
assert feat[4].shape == torch.Size((1, 128, 2, 2))
# Test CSPDarknet with BatchNorm forward
model = CSPDarknet(widen_factor=0.125, out_indices=range(0, 5))
for m in model.modules():
if is_norm(m):
assert isinstance(m, _BatchNorm)
model.train()
imgs = torch.randn(1, 3, 64, 64)
feat = model(imgs)
assert len(feat) == 5
assert feat[0].shape == torch.Size((1, 8, 32, 32))
assert feat[1].shape == torch.Size((1, 16, 16, 16))
assert feat[2].shape == torch.Size((1, 32, 8, 8))
assert feat[3].shape == torch.Size((1, 64, 4, 4))
assert feat[4].shape == torch.Size((1, 128, 2, 2))
# Test CSPDarknet with custom arch forward
arch_ovewrite = [[32, 56, 3, True, False], [56, 224, 2, True, False],
[224, 512, 1, True, False]]
model = CSPDarknet(
arch_ovewrite=arch_ovewrite,
widen_factor=0.25,
out_indices=(0, 1, 2, 3))
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size((1, 8, 16, 16))
assert feat[1].shape == torch.Size((1, 14, 8, 8))
assert feat[2].shape == torch.Size((1, 56, 4, 4))
assert feat[3].shape == torch.Size((1, 128, 2, 2))
| 4,117 | 34.196581 | 79 | py |
DSLA-DSLA | DSLA-DSLA/tests/test_models/test_backbones/test_renext.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.backbones import ResNeXt
from mmdet.models.backbones.resnext import Bottleneck as BottleneckX
from .utils import is_block
def test_renext_bottleneck():
with pytest.raises(AssertionError):
# Style must be in ['pytorch', 'caffe']
BottleneckX(64, 64, groups=32, base_width=4, style='tensorflow')
# Test ResNeXt Bottleneck structure
block = BottleneckX(
64, 64, groups=32, base_width=4, stride=2, style='pytorch')
assert block.conv2.stride == (2, 2)
assert block.conv2.groups == 32
assert block.conv2.out_channels == 128
# Test ResNeXt Bottleneck with DCN
dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)
with pytest.raises(AssertionError):
# conv_cfg must be None if dcn is not None
BottleneckX(
64,
64,
groups=32,
base_width=4,
dcn=dcn,
conv_cfg=dict(type='Conv'))
BottleneckX(64, 64, dcn=dcn)
# Test ResNeXt Bottleneck forward
block = BottleneckX(64, 16, groups=32, base_width=4)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
# Test ResNeXt Bottleneck forward with plugins
plugins = [
dict(
cfg=dict(
type='GeneralizedAttention',
spatial_range=-1,
num_heads=8,
attention_type='0010',
kv_stride=2),
stages=(False, False, True, True),
position='after_conv2')
]
block = BottleneckX(64, 16, groups=32, base_width=4, plugins=plugins)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
def test_resnext_backbone():
with pytest.raises(KeyError):
# ResNeXt depth should be in [50, 101, 152]
ResNeXt(depth=18)
# Test ResNeXt with group 32, base_width 4
model = ResNeXt(depth=50, groups=32, base_width=4)
for m in model.modules():
if is_block(m):
assert m.conv2.groups == 32
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 256, 8, 8])
assert feat[1].shape == torch.Size([1, 512, 4, 4])
assert feat[2].shape == torch.Size([1, 1024, 2, 2])
assert feat[3].shape == torch.Size([1, 2048, 1, 1])
regnet_test_data = [
('regnetx_400mf',
dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22,
bot_mul=1.0), [32, 64, 160, 384]),
('regnetx_800mf',
dict(w0=56, wa=35.73, wm=2.28, group_w=16, depth=16,
bot_mul=1.0), [64, 128, 288, 672]),
('regnetx_1.6gf',
dict(w0=80, wa=34.01, wm=2.25, group_w=24, depth=18,
bot_mul=1.0), [72, 168, 408, 912]),
('regnetx_3.2gf',
dict(w0=88, wa=26.31, wm=2.25, group_w=48, depth=25,
bot_mul=1.0), [96, 192, 432, 1008]),
('regnetx_4.0gf',
dict(w0=96, wa=38.65, wm=2.43, group_w=40, depth=23,
bot_mul=1.0), [80, 240, 560, 1360]),
('regnetx_6.4gf',
dict(w0=184, wa=60.83, wm=2.07, group_w=56, depth=17,
bot_mul=1.0), [168, 392, 784, 1624]),
('regnetx_8.0gf',
dict(w0=80, wa=49.56, wm=2.88, group_w=120, depth=23,
bot_mul=1.0), [80, 240, 720, 1920]),
('regnetx_12gf',
dict(w0=168, wa=73.36, wm=2.37, group_w=112, depth=19,
bot_mul=1.0), [224, 448, 896, 2240]),
]
| 3,528 | 32.292453 | 73 | py |
DSLA-DSLA | DSLA-DSLA/tests/test_models/test_backbones/test_trident_resnet.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.backbones import TridentResNet
from mmdet.models.backbones.trident_resnet import TridentBottleneck
def test_trident_resnet_bottleneck():
trident_dilations = (1, 2, 3)
test_branch_idx = 1
concat_output = True
trident_build_config = (trident_dilations, test_branch_idx, concat_output)
with pytest.raises(AssertionError):
# Style must be in ['pytorch', 'caffe']
TridentBottleneck(
*trident_build_config, inplanes=64, planes=64, style='tensorflow')
with pytest.raises(AssertionError):
# Allowed positions are 'after_conv1', 'after_conv2', 'after_conv3'
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv4')
]
TridentBottleneck(
*trident_build_config, inplanes=64, planes=16, plugins=plugins)
with pytest.raises(AssertionError):
# Need to specify different postfix to avoid duplicate plugin name
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv3'),
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv3')
]
TridentBottleneck(
*trident_build_config, inplanes=64, planes=16, plugins=plugins)
with pytest.raises(KeyError):
# Plugin type is not supported
plugins = [dict(cfg=dict(type='WrongPlugin'), position='after_conv3')]
TridentBottleneck(
*trident_build_config, inplanes=64, planes=16, plugins=plugins)
# Test Bottleneck with checkpoint forward
block = TridentBottleneck(
*trident_build_config, inplanes=64, planes=16, with_cp=True)
assert block.with_cp
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([block.num_branch, 64, 56, 56])
# Test Bottleneck style
block = TridentBottleneck(
*trident_build_config,
inplanes=64,
planes=64,
stride=2,
style='pytorch')
assert block.conv1.stride == (1, 1)
assert block.conv2.stride == (2, 2)
block = TridentBottleneck(
*trident_build_config, inplanes=64, planes=64, stride=2, style='caffe')
assert block.conv1.stride == (2, 2)
assert block.conv2.stride == (1, 1)
# Test Bottleneck forward
block = TridentBottleneck(*trident_build_config, inplanes=64, planes=16)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([block.num_branch, 64, 56, 56])
# Test Bottleneck with 1 ContextBlock after conv3
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv3')
]
block = TridentBottleneck(
*trident_build_config, inplanes=64, planes=16, plugins=plugins)
assert block.context_block.in_channels == 64
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([block.num_branch, 64, 56, 56])
# Test Bottleneck with 1 GeneralizedAttention after conv2
plugins = [
dict(
cfg=dict(
type='GeneralizedAttention',
spatial_range=-1,
num_heads=8,
attention_type='0010',
kv_stride=2),
position='after_conv2')
]
block = TridentBottleneck(
*trident_build_config, inplanes=64, planes=16, plugins=plugins)
assert block.gen_attention_block.in_channels == 16
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([block.num_branch, 64, 56, 56])
# Test Bottleneck with 1 GeneralizedAttention after conv2, 1 NonLocal2D
# after conv2, 1 ContextBlock after conv3
plugins = [
dict(
cfg=dict(
type='GeneralizedAttention',
spatial_range=-1,
num_heads=8,
attention_type='0010',
kv_stride=2),
position='after_conv2'),
dict(cfg=dict(type='NonLocal2d'), position='after_conv2'),
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv3')
]
block = TridentBottleneck(
*trident_build_config, inplanes=64, planes=16, plugins=plugins)
assert block.gen_attention_block.in_channels == 16
assert block.nonlocal_block.in_channels == 16
assert block.context_block.in_channels == 64
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([block.num_branch, 64, 56, 56])
# Test Bottleneck with 1 ContextBlock after conv2, 2 ContextBlock after
# conv3
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=1),
position='after_conv2'),
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=2),
position='after_conv3'),
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=3),
position='after_conv3')
]
block = TridentBottleneck(
*trident_build_config, inplanes=64, planes=16, plugins=plugins)
assert block.context_block1.in_channels == 16
assert block.context_block2.in_channels == 64
assert block.context_block3.in_channels == 64
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([block.num_branch, 64, 56, 56])
def test_trident_resnet_backbone():
tridentresnet_config = dict(
num_branch=3,
test_branch_idx=1,
strides=(1, 2, 2),
dilations=(1, 1, 1),
trident_dilations=(1, 2, 3),
out_indices=(2, ),
)
"""Test tridentresnet backbone."""
with pytest.raises(AssertionError):
# TridentResNet depth should be in [50, 101, 152]
TridentResNet(18, **tridentresnet_config)
with pytest.raises(AssertionError):
# In TridentResNet: num_stages == 3
TridentResNet(50, num_stages=4, **tridentresnet_config)
model = TridentResNet(50, num_stages=3, **tridentresnet_config)
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 1
assert feat[0].shape == torch.Size([3, 1024, 2, 2])
| 6,372 | 34.209945 | 79 | py |
DSLA-DSLA | DSLA-DSLA/tests/test_models/test_backbones/test_resnest.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.backbones import ResNeSt
from mmdet.models.backbones.resnest import Bottleneck as BottleneckS
def test_resnest_bottleneck():
with pytest.raises(AssertionError):
# Style must be in ['pytorch', 'caffe']
BottleneckS(64, 64, radix=2, reduction_factor=4, style='tensorflow')
# Test ResNeSt Bottleneck structure
block = BottleneckS(
2, 4, radix=2, reduction_factor=4, stride=2, style='pytorch')
assert block.avd_layer.stride == 2
assert block.conv2.channels == 4
# Test ResNeSt Bottleneck forward
block = BottleneckS(16, 4, radix=2, reduction_factor=4)
x = torch.randn(2, 16, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([2, 16, 56, 56])
def test_resnest_backbone():
with pytest.raises(KeyError):
# ResNeSt depth should be in [50, 101, 152, 200]
ResNeSt(depth=18)
# Test ResNeSt with radix 2, reduction_factor 4
model = ResNeSt(
depth=50,
base_channels=4,
radix=2,
reduction_factor=4,
out_indices=(0, 1, 2, 3))
model.train()
imgs = torch.randn(2, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([2, 16, 8, 8])
assert feat[1].shape == torch.Size([2, 32, 4, 4])
assert feat[2].shape == torch.Size([2, 64, 2, 2])
assert feat[3].shape == torch.Size([2, 128, 1, 1])
| 1,473 | 29.708333 | 76 | py |
DSLA-DSLA | DSLA-DSLA/tests/test_models/test_backbones/test_regnet.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.backbones import RegNet
regnet_test_data = [
('regnetx_400mf',
dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22,
bot_mul=1.0), [32, 64, 160, 384]),
('regnetx_800mf',
dict(w0=56, wa=35.73, wm=2.28, group_w=16, depth=16,
bot_mul=1.0), [64, 128, 288, 672]),
('regnetx_1.6gf',
dict(w0=80, wa=34.01, wm=2.25, group_w=24, depth=18,
bot_mul=1.0), [72, 168, 408, 912]),
('regnetx_3.2gf',
dict(w0=88, wa=26.31, wm=2.25, group_w=48, depth=25,
bot_mul=1.0), [96, 192, 432, 1008]),
('regnetx_4.0gf',
dict(w0=96, wa=38.65, wm=2.43, group_w=40, depth=23,
bot_mul=1.0), [80, 240, 560, 1360]),
('regnetx_6.4gf',
dict(w0=184, wa=60.83, wm=2.07, group_w=56, depth=17,
bot_mul=1.0), [168, 392, 784, 1624]),
('regnetx_8.0gf',
dict(w0=80, wa=49.56, wm=2.88, group_w=120, depth=23,
bot_mul=1.0), [80, 240, 720, 1920]),
('regnetx_12gf',
dict(w0=168, wa=73.36, wm=2.37, group_w=112, depth=19,
bot_mul=1.0), [224, 448, 896, 2240]),
]
@pytest.mark.parametrize('arch_name,arch,out_channels', regnet_test_data)
def test_regnet_backbone(arch_name, arch, out_channels):
with pytest.raises(AssertionError):
# ResNeXt depth should be in [50, 101, 152]
RegNet(arch_name + '233')
# Test RegNet with arch_name
model = RegNet(arch_name)
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, out_channels[0], 8, 8])
assert feat[1].shape == torch.Size([1, out_channels[1], 4, 4])
assert feat[2].shape == torch.Size([1, out_channels[2], 2, 2])
assert feat[3].shape == torch.Size([1, out_channels[3], 1, 1])
# Test RegNet with arch
model = RegNet(arch)
assert feat[0].shape == torch.Size([1, out_channels[0], 8, 8])
assert feat[1].shape == torch.Size([1, out_channels[1], 4, 4])
assert feat[2].shape == torch.Size([1, out_channels[2], 2, 2])
assert feat[3].shape == torch.Size([1, out_channels[3], 1, 1])
| 2,177 | 35.915254 | 73 | py |
DSLA-DSLA | DSLA-DSLA/tests/test_models/test_backbones/test_detectors_resnet.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
from mmdet.models.backbones import DetectoRS_ResNet
def test_detectorrs_resnet_backbone():
detectorrs_cfg = dict(
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
conv_cfg=dict(type='ConvAWS'),
sac=dict(type='SAC', use_deform=True),
stage_with_sac=(False, True, True, True),
output_img=True)
"""Test init_weights config"""
with pytest.raises(AssertionError):
# pretrained and init_cfg cannot be specified at the same time
DetectoRS_ResNet(
**detectorrs_cfg, pretrained='Pretrained', init_cfg='Pretrained')
with pytest.raises(AssertionError):
# init_cfg must be a dict
DetectoRS_ResNet(
**detectorrs_cfg, pretrained=None, init_cfg=['Pretrained'])
with pytest.raises(KeyError):
# init_cfg must contain the key `type`
DetectoRS_ResNet(
**detectorrs_cfg,
pretrained=None,
init_cfg=dict(checkpoint='Pretrained'))
with pytest.raises(AssertionError):
# init_cfg only support initialize pretrained model way
DetectoRS_ResNet(
**detectorrs_cfg, pretrained=None, init_cfg=dict(type='Trained'))
with pytest.raises(TypeError):
# pretrained mast be a str or None
model = DetectoRS_ResNet(
**detectorrs_cfg, pretrained=['Pretrained'], init_cfg=None)
model.init_weights()
| 1,611 | 32.583333 | 77 | py |
DSLA-DSLA | DSLA-DSLA/tests/test_models/test_utils/test_position_encoding.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.utils import (LearnedPositionalEncoding,
SinePositionalEncoding)
def test_sine_positional_encoding(num_feats=16, batch_size=2):
# test invalid type of scale
with pytest.raises(AssertionError):
module = SinePositionalEncoding(
num_feats, scale=(3., ), normalize=True)
module = SinePositionalEncoding(num_feats)
h, w = 10, 6
mask = (torch.rand(batch_size, h, w) > 0.5).to(torch.int)
assert not module.normalize
out = module(mask)
assert out.shape == (batch_size, num_feats * 2, h, w)
# set normalize
module = SinePositionalEncoding(num_feats, normalize=True)
assert module.normalize
out = module(mask)
assert out.shape == (batch_size, num_feats * 2, h, w)
def test_learned_positional_encoding(num_feats=16,
row_num_embed=10,
col_num_embed=10,
batch_size=2):
module = LearnedPositionalEncoding(num_feats, row_num_embed, col_num_embed)
assert module.row_embed.weight.shape == (row_num_embed, num_feats)
assert module.col_embed.weight.shape == (col_num_embed, num_feats)
h, w = 10, 6
mask = torch.rand(batch_size, h, w) > 0.5
out = module(mask)
assert out.shape == (batch_size, num_feats * 2, h, w)
| 1,437 | 34.95 | 79 | py |
DSLA-DSLA | DSLA-DSLA/tests/test_models/test_utils/test_model_misc.py | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
from mmdet.models.utils import interpolate_as
def test_interpolate_as():
source = torch.rand((1, 5, 4, 4))
target = torch.rand((1, 1, 16, 16))
# Test 4D source and target
result = interpolate_as(source, target)
assert result.shape == torch.Size((1, 5, 16, 16))
# Test 3D target
result = interpolate_as(source, target.squeeze(0))
assert result.shape == torch.Size((1, 5, 16, 16))
# Test 3D source
result = interpolate_as(source.squeeze(0), target)
assert result.shape == torch.Size((5, 16, 16))
# Test type(target) == np.ndarray
target = np.random.rand(16, 16)
result = interpolate_as(source.squeeze(0), target)
assert result.shape == torch.Size((5, 16, 16))
| 805 | 27.785714 | 54 | py |
DSLA-DSLA | DSLA-DSLA/tests/test_models/test_utils/test_se_layer.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.utils import SELayer
def test_se_layer():
with pytest.raises(AssertionError):
# act_cfg sequence length must equal to 2
SELayer(channels=32, act_cfg=(dict(type='ReLU'), ))
with pytest.raises(AssertionError):
# act_cfg sequence must be a tuple of dict
SELayer(channels=32, act_cfg=[dict(type='ReLU'), dict(type='ReLU')])
# Test SELayer forward
layer = SELayer(channels=32)
layer.init_weights()
layer.train()
x = torch.randn((1, 32, 10, 10))
x_out = layer(x)
assert x_out.shape == torch.Size((1, 32, 10, 10))
| 674 | 26 | 76 | py |
DSLA-DSLA | DSLA-DSLA/tests/test_models/test_utils/test_inverted_residual.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmcv.cnn import is_norm
from torch.nn.modules import GroupNorm
from mmdet.models.utils import InvertedResidual, SELayer
def test_inverted_residual():
with pytest.raises(AssertionError):
# stride must be in [1, 2]
InvertedResidual(16, 16, 32, stride=3)
with pytest.raises(AssertionError):
# se_cfg must be None or dict
InvertedResidual(16, 16, 32, se_cfg=list())
with pytest.raises(AssertionError):
# in_channeld and mid_channels must be the same if
# with_expand_conv is False
InvertedResidual(16, 16, 32, with_expand_conv=False)
# Test InvertedResidual forward, stride=1
block = InvertedResidual(16, 16, 32, stride=1)
x = torch.randn(1, 16, 56, 56)
x_out = block(x)
assert getattr(block, 'se', None) is None
assert block.with_res_shortcut
assert x_out.shape == torch.Size((1, 16, 56, 56))
# Test InvertedResidual forward, stride=2
block = InvertedResidual(16, 16, 32, stride=2)
x = torch.randn(1, 16, 56, 56)
x_out = block(x)
assert not block.with_res_shortcut
assert x_out.shape == torch.Size((1, 16, 28, 28))
# Test InvertedResidual forward with se layer
se_cfg = dict(channels=32)
block = InvertedResidual(16, 16, 32, stride=1, se_cfg=se_cfg)
x = torch.randn(1, 16, 56, 56)
x_out = block(x)
assert isinstance(block.se, SELayer)
assert x_out.shape == torch.Size((1, 16, 56, 56))
# Test InvertedResidual forward, with_expand_conv=False
block = InvertedResidual(32, 16, 32, with_expand_conv=False)
x = torch.randn(1, 32, 56, 56)
x_out = block(x)
assert getattr(block, 'expand_conv', None) is None
assert x_out.shape == torch.Size((1, 16, 56, 56))
# Test InvertedResidual forward with GroupNorm
block = InvertedResidual(
16, 16, 32, norm_cfg=dict(type='GN', num_groups=2))
x = torch.randn(1, 16, 56, 56)
x_out = block(x)
for m in block.modules():
if is_norm(m):
assert isinstance(m, GroupNorm)
assert x_out.shape == torch.Size((1, 16, 56, 56))
# Test InvertedResidual forward with HSigmoid
block = InvertedResidual(16, 16, 32, act_cfg=dict(type='HSigmoid'))
x = torch.randn(1, 16, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size((1, 16, 56, 56))
# Test InvertedResidual forward with checkpoint
block = InvertedResidual(16, 16, 32, with_cp=True)
x = torch.randn(1, 16, 56, 56)
x_out = block(x)
assert block.with_cp
assert x_out.shape == torch.Size((1, 16, 56, 56))
| 2,635 | 33.233766 | 71 | py |
DSLA-DSLA | DSLA-DSLA/tests/test_models/test_utils/test_conv_upsample.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.utils import ConvUpsample
@pytest.mark.parametrize('num_layers', [0, 1, 2])
def test_conv_upsample(num_layers):
num_upsample = num_layers if num_layers > 0 else 0
num_layers = num_layers if num_layers > 0 else 1
layer = ConvUpsample(
10,
5,
num_layers=num_layers,
num_upsample=num_upsample,
conv_cfg=None,
norm_cfg=None)
size = 5
x = torch.randn((1, 10, size, size))
size = size * pow(2, num_upsample)
x = layer(x)
assert x.shape[-2:] == (size, size)
| 628 | 24.16 | 54 | py |
DSLA-DSLA | DSLA-DSLA/tests/test_models/test_utils/test_transformer.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmcv.utils import ConfigDict
from mmdet.models.utils.transformer import (AdaptivePadding,
DetrTransformerDecoder,
DetrTransformerEncoder, PatchEmbed,
PatchMerging, Transformer)
def test_adaptive_padding():
for padding in ('same', 'corner'):
kernel_size = 16
stride = 16
dilation = 1
input = torch.rand(1, 1, 15, 17)
pool = AdaptivePadding(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=padding)
out = pool(input)
# padding to divisible by 16
assert (out.shape[2], out.shape[3]) == (16, 32)
input = torch.rand(1, 1, 16, 17)
out = pool(input)
# padding to divisible by 16
assert (out.shape[2], out.shape[3]) == (16, 32)
kernel_size = (2, 2)
stride = (2, 2)
dilation = (1, 1)
adap_pad = AdaptivePadding(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=padding)
input = torch.rand(1, 1, 11, 13)
out = adap_pad(input)
# padding to divisible by 2
assert (out.shape[2], out.shape[3]) == (12, 14)
kernel_size = (2, 2)
stride = (10, 10)
dilation = (1, 1)
adap_pad = AdaptivePadding(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=padding)
input = torch.rand(1, 1, 10, 13)
out = adap_pad(input)
# no padding
assert (out.shape[2], out.shape[3]) == (10, 13)
kernel_size = (11, 11)
adap_pad = AdaptivePadding(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=padding)
input = torch.rand(1, 1, 11, 13)
out = adap_pad(input)
# all padding
assert (out.shape[2], out.shape[3]) == (21, 21)
# test padding as kernel is (7,9)
input = torch.rand(1, 1, 11, 13)
stride = (3, 4)
kernel_size = (4, 5)
dilation = (2, 2)
# actually (7, 9)
adap_pad = AdaptivePadding(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=padding)
dilation_out = adap_pad(input)
assert (dilation_out.shape[2], dilation_out.shape[3]) == (16, 21)
kernel_size = (7, 9)
dilation = (1, 1)
adap_pad = AdaptivePadding(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=padding)
kernel79_out = adap_pad(input)
assert (kernel79_out.shape[2], kernel79_out.shape[3]) == (16, 21)
assert kernel79_out.shape == dilation_out.shape
# assert only support "same" "corner"
with pytest.raises(AssertionError):
AdaptivePadding(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=1)
def test_patch_embed():
B = 2
H = 3
W = 4
C = 3
embed_dims = 10
kernel_size = 3
stride = 1
dummy_input = torch.rand(B, C, H, W)
patch_merge_1 = PatchEmbed(
in_channels=C,
embed_dims=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=0,
dilation=1,
norm_cfg=None)
x1, shape = patch_merge_1(dummy_input)
# test out shape
assert x1.shape == (2, 2, 10)
# test outsize is correct
assert shape == (1, 2)
# test L = out_h * out_w
assert shape[0] * shape[1] == x1.shape[1]
B = 2
H = 10
W = 10
C = 3
embed_dims = 10
kernel_size = 5
stride = 2
dummy_input = torch.rand(B, C, H, W)
# test dilation
patch_merge_2 = PatchEmbed(
in_channels=C,
embed_dims=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=0,
dilation=2,
norm_cfg=None,
)
x2, shape = patch_merge_2(dummy_input)
# test out shape
assert x2.shape == (2, 1, 10)
# test outsize is correct
assert shape == (1, 1)
# test L = out_h * out_w
assert shape[0] * shape[1] == x2.shape[1]
stride = 2
input_size = (10, 10)
dummy_input = torch.rand(B, C, H, W)
# test stride and norm
patch_merge_3 = PatchEmbed(
in_channels=C,
embed_dims=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=0,
dilation=2,
norm_cfg=dict(type='LN'),
input_size=input_size)
x3, shape = patch_merge_3(dummy_input)
# test out shape
assert x3.shape == (2, 1, 10)
# test outsize is correct
assert shape == (1, 1)
# test L = out_h * out_w
assert shape[0] * shape[1] == x3.shape[1]
# test the init_out_size with nn.Unfold
assert patch_merge_3.init_out_size[1] == (input_size[0] - 2 * 4 -
1) // 2 + 1
assert patch_merge_3.init_out_size[0] == (input_size[0] - 2 * 4 -
1) // 2 + 1
H = 11
W = 12
input_size = (H, W)
dummy_input = torch.rand(B, C, H, W)
# test stride and norm
patch_merge_3 = PatchEmbed(
in_channels=C,
embed_dims=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=0,
dilation=2,
norm_cfg=dict(type='LN'),
input_size=input_size)
_, shape = patch_merge_3(dummy_input)
# when input_size equal to real input
# the out_size should be equal to `init_out_size`
assert shape == patch_merge_3.init_out_size
input_size = (H, W)
dummy_input = torch.rand(B, C, H, W)
# test stride and norm
patch_merge_3 = PatchEmbed(
in_channels=C,
embed_dims=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=0,
dilation=2,
norm_cfg=dict(type='LN'),
input_size=input_size)
_, shape = patch_merge_3(dummy_input)
# when input_size equal to real input
# the out_size should be equal to `init_out_size`
assert shape == patch_merge_3.init_out_size
# test adap padding
for padding in ('same', 'corner'):
in_c = 2
embed_dims = 3
B = 2
# test stride is 1
input_size = (5, 5)
kernel_size = (5, 5)
stride = (1, 1)
dilation = 1
bias = False
x = torch.rand(B, in_c, *input_size)
patch_embed = PatchEmbed(
in_channels=in_c,
embed_dims=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
x_out, out_size = patch_embed(x)
assert x_out.size() == (B, 25, 3)
assert out_size == (5, 5)
assert x_out.size(1) == out_size[0] * out_size[1]
# test kernel_size == stride
input_size = (5, 5)
kernel_size = (5, 5)
stride = (5, 5)
dilation = 1
bias = False
x = torch.rand(B, in_c, *input_size)
patch_embed = PatchEmbed(
in_channels=in_c,
embed_dims=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
x_out, out_size = patch_embed(x)
assert x_out.size() == (B, 1, 3)
assert out_size == (1, 1)
assert x_out.size(1) == out_size[0] * out_size[1]
# test kernel_size == stride
input_size = (6, 5)
kernel_size = (5, 5)
stride = (5, 5)
dilation = 1
bias = False
x = torch.rand(B, in_c, *input_size)
patch_embed = PatchEmbed(
in_channels=in_c,
embed_dims=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
x_out, out_size = patch_embed(x)
assert x_out.size() == (B, 2, 3)
assert out_size == (2, 1)
assert x_out.size(1) == out_size[0] * out_size[1]
# test different kernel_size with different stride
input_size = (6, 5)
kernel_size = (6, 2)
stride = (6, 2)
dilation = 1
bias = False
x = torch.rand(B, in_c, *input_size)
patch_embed = PatchEmbed(
in_channels=in_c,
embed_dims=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
x_out, out_size = patch_embed(x)
assert x_out.size() == (B, 3, 3)
assert out_size == (1, 3)
assert x_out.size(1) == out_size[0] * out_size[1]
def test_patch_merging():
# Test the model with int padding
in_c = 3
out_c = 4
kernel_size = 3
stride = 3
padding = 1
dilation = 1
bias = False
# test the case `pad_to_stride` is False
patch_merge = PatchMerging(
in_channels=in_c,
out_channels=out_c,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
B, L, C = 1, 100, 3
input_size = (10, 10)
x = torch.rand(B, L, C)
x_out, out_size = patch_merge(x, input_size)
assert x_out.size() == (1, 16, 4)
assert out_size == (4, 4)
# assert out size is consistent with real output
assert x_out.size(1) == out_size[0] * out_size[1]
in_c = 4
out_c = 5
kernel_size = 6
stride = 3
padding = 2
dilation = 2
bias = False
patch_merge = PatchMerging(
in_channels=in_c,
out_channels=out_c,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
B, L, C = 1, 100, 4
input_size = (10, 10)
x = torch.rand(B, L, C)
x_out, out_size = patch_merge(x, input_size)
assert x_out.size() == (1, 4, 5)
assert out_size == (2, 2)
# assert out size is consistent with real output
assert x_out.size(1) == out_size[0] * out_size[1]
# Test with adaptive padding
for padding in ('same', 'corner'):
in_c = 2
out_c = 3
B = 2
# test stride is 1
input_size = (5, 5)
kernel_size = (5, 5)
stride = (1, 1)
dilation = 1
bias = False
L = input_size[0] * input_size[1]
x = torch.rand(B, L, in_c)
patch_merge = PatchMerging(
in_channels=in_c,
out_channels=out_c,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
x_out, out_size = patch_merge(x, input_size)
assert x_out.size() == (B, 25, 3)
assert out_size == (5, 5)
assert x_out.size(1) == out_size[0] * out_size[1]
# test kernel_size == stride
input_size = (5, 5)
kernel_size = (5, 5)
stride = (5, 5)
dilation = 1
bias = False
L = input_size[0] * input_size[1]
x = torch.rand(B, L, in_c)
patch_merge = PatchMerging(
in_channels=in_c,
out_channels=out_c,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
x_out, out_size = patch_merge(x, input_size)
assert x_out.size() == (B, 1, 3)
assert out_size == (1, 1)
assert x_out.size(1) == out_size[0] * out_size[1]
# test kernel_size == stride
input_size = (6, 5)
kernel_size = (5, 5)
stride = (5, 5)
dilation = 1
bias = False
L = input_size[0] * input_size[1]
x = torch.rand(B, L, in_c)
patch_merge = PatchMerging(
in_channels=in_c,
out_channels=out_c,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
x_out, out_size = patch_merge(x, input_size)
assert x_out.size() == (B, 2, 3)
assert out_size == (2, 1)
assert x_out.size(1) == out_size[0] * out_size[1]
# test different kernel_size with different stride
input_size = (6, 5)
kernel_size = (6, 2)
stride = (6, 2)
dilation = 1
bias = False
L = input_size[0] * input_size[1]
x = torch.rand(B, L, in_c)
patch_merge = PatchMerging(
in_channels=in_c,
out_channels=out_c,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
x_out, out_size = patch_merge(x, input_size)
assert x_out.size() == (B, 3, 3)
assert out_size == (1, 3)
assert x_out.size(1) == out_size[0] * out_size[1]
def test_detr_transformer_dencoder_encoder_layer():
config = ConfigDict(
dict(
return_intermediate=True,
num_layers=6,
transformerlayers=dict(
type='DetrTransformerDecoderLayer',
attn_cfgs=dict(
type='MultiheadAttention',
embed_dims=256,
num_heads=8,
dropout=0.1),
feedforward_channels=2048,
ffn_dropout=0.1,
operation_order=(
'norm',
'self_attn',
'norm',
'cross_attn',
'norm',
'ffn',
))))
assert DetrTransformerDecoder(**config).layers[0].pre_norm
assert len(DetrTransformerDecoder(**config).layers) == 6
DetrTransformerDecoder(**config)
with pytest.raises(AssertionError):
config = ConfigDict(
dict(
return_intermediate=True,
num_layers=6,
transformerlayers=[
dict(
type='DetrTransformerDecoderLayer',
attn_cfgs=dict(
type='MultiheadAttention',
embed_dims=256,
num_heads=8,
dropout=0.1),
feedforward_channels=2048,
ffn_dropout=0.1,
operation_order=('self_attn', 'norm', 'cross_attn',
'norm', 'ffn', 'norm'))
] * 5))
DetrTransformerDecoder(**config)
config = ConfigDict(
dict(
num_layers=6,
transformerlayers=dict(
type='DetrTransformerDecoderLayer',
attn_cfgs=dict(
type='MultiheadAttention',
embed_dims=256,
num_heads=8,
dropout=0.1),
feedforward_channels=2048,
ffn_dropout=0.1,
operation_order=('norm', 'self_attn', 'norm', 'cross_attn',
'norm', 'ffn', 'norm'))))
with pytest.raises(AssertionError):
# len(operation_order) == 6
DetrTransformerEncoder(**config)
def test_transformer():
config = ConfigDict(
dict(
encoder=dict(
type='DetrTransformerEncoder',
num_layers=6,
transformerlayers=dict(
type='BaseTransformerLayer',
attn_cfgs=[
dict(
type='MultiheadAttention',
embed_dims=256,
num_heads=8,
dropout=0.1)
],
feedforward_channels=2048,
ffn_dropout=0.1,
operation_order=('self_attn', 'norm', 'ffn', 'norm'))),
decoder=dict(
type='DetrTransformerDecoder',
return_intermediate=True,
num_layers=6,
transformerlayers=dict(
type='DetrTransformerDecoderLayer',
attn_cfgs=dict(
type='MultiheadAttention',
embed_dims=256,
num_heads=8,
dropout=0.1),
feedforward_channels=2048,
ffn_dropout=0.1,
operation_order=('self_attn', 'norm', 'cross_attn', 'norm',
'ffn', 'norm')),
)))
transformer = Transformer(**config)
transformer.init_weights()
| 16,994 | 28.815789 | 79 | py |
DSLA-DSLA | DSLA-DSLA/tests/test_models/test_utils/test_brick_wrappers.py | from unittest.mock import patch
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmdet.models.utils import AdaptiveAvgPool2d, adaptive_avg_pool2d
if torch.__version__ != 'parrots':
torch_version = '1.7'
else:
torch_version = 'parrots'
@patch('torch.__version__', torch_version)
def test_adaptive_avg_pool2d():
# Test the empty batch dimension
# Test the two input conditions
x_empty = torch.randn(0, 3, 4, 5)
# 1. tuple[int, int]
wrapper_out = adaptive_avg_pool2d(x_empty, (2, 2))
assert wrapper_out.shape == (0, 3, 2, 2)
# 2. int
wrapper_out = adaptive_avg_pool2d(x_empty, 2)
assert wrapper_out.shape == (0, 3, 2, 2)
# wrapper op with 3-dim input
x_normal = torch.randn(3, 3, 4, 5)
wrapper_out = adaptive_avg_pool2d(x_normal, (2, 2))
ref_out = F.adaptive_avg_pool2d(x_normal, (2, 2))
assert wrapper_out.shape == (3, 3, 2, 2)
assert torch.equal(wrapper_out, ref_out)
wrapper_out = adaptive_avg_pool2d(x_normal, 2)
ref_out = F.adaptive_avg_pool2d(x_normal, 2)
assert wrapper_out.shape == (3, 3, 2, 2)
assert torch.equal(wrapper_out, ref_out)
@patch('torch.__version__', torch_version)
def test_AdaptiveAvgPool2d():
# Test the empty batch dimension
x_empty = torch.randn(0, 3, 4, 5)
# Test the four input conditions
# 1. tuple[int, int]
wrapper = AdaptiveAvgPool2d((2, 2))
wrapper_out = wrapper(x_empty)
assert wrapper_out.shape == (0, 3, 2, 2)
# 2. int
wrapper = AdaptiveAvgPool2d(2)
wrapper_out = wrapper(x_empty)
assert wrapper_out.shape == (0, 3, 2, 2)
# 3. tuple[None, int]
wrapper = AdaptiveAvgPool2d((None, 2))
wrapper_out = wrapper(x_empty)
assert wrapper_out.shape == (0, 3, 4, 2)
# 3. tuple[int, None]
wrapper = AdaptiveAvgPool2d((2, None))
wrapper_out = wrapper(x_empty)
assert wrapper_out.shape == (0, 3, 2, 5)
# Test the normal batch dimension
x_normal = torch.randn(3, 3, 4, 5)
wrapper = AdaptiveAvgPool2d((2, 2))
ref = nn.AdaptiveAvgPool2d((2, 2))
wrapper_out = wrapper(x_normal)
ref_out = ref(x_normal)
assert wrapper_out.shape == (3, 3, 2, 2)
assert torch.equal(wrapper_out, ref_out)
wrapper = AdaptiveAvgPool2d(2)
ref = nn.AdaptiveAvgPool2d(2)
wrapper_out = wrapper(x_normal)
ref_out = ref(x_normal)
assert wrapper_out.shape == (3, 3, 2, 2)
assert torch.equal(wrapper_out, ref_out)
wrapper = AdaptiveAvgPool2d((None, 2))
ref = nn.AdaptiveAvgPool2d((None, 2))
wrapper_out = wrapper(x_normal)
ref_out = ref(x_normal)
assert wrapper_out.shape == (3, 3, 4, 2)
assert torch.equal(wrapper_out, ref_out)
wrapper = AdaptiveAvgPool2d((2, None))
ref = nn.AdaptiveAvgPool2d((2, None))
wrapper_out = wrapper(x_normal)
ref_out = ref(x_normal)
assert wrapper_out.shape == (3, 3, 2, 5)
assert torch.equal(wrapper_out, ref_out)
| 2,931 | 30.191489 | 69 | py |
DSLA-DSLA | DSLA-DSLA/tests/test_models/test_dense_heads/test_lad_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import numpy as np
import torch
from mmdet.models.dense_heads import LADHead, lad_head
from mmdet.models.dense_heads.lad_head import levels_to_images
def test_lad_head_loss():
"""Tests lad head loss when truth is empty and non-empty."""
class mock_skm:
def GaussianMixture(self, *args, **kwargs):
return self
def fit(self, loss):
pass
def predict(self, loss):
components = np.zeros_like(loss, dtype=np.long)
return components.reshape(-1)
def score_samples(self, loss):
scores = np.random.random(len(loss))
return scores
lad_head.skm = mock_skm()
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmcv.Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.1,
neg_iou_thr=0.1,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False))
# since Focal Loss is not supported on CPU
self = LADHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5))
teacher_model = LADHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5))
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
self.init_weights()
teacher_model.init_weights()
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
outs_teacher = teacher_model(feat)
label_assignment_results = teacher_model.get_label_assignment(
*outs_teacher, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore)
outs = teacher_model(feat)
empty_gt_losses = self.loss(*outs, gt_bboxes, gt_labels, img_metas,
gt_bboxes_ignore, label_assignment_results)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
empty_iou_loss = empty_gt_losses['loss_iou']
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
assert empty_iou_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
label_assignment_results = teacher_model.get_label_assignment(
*outs_teacher, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore)
one_gt_losses = self.loss(*outs, gt_bboxes, gt_labels, img_metas,
gt_bboxes_ignore, label_assignment_results)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_box_loss = one_gt_losses['loss_bbox']
onegt_iou_loss = one_gt_losses['loss_iou']
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
assert onegt_iou_loss.item() > 0, 'box loss should be non-zero'
n, c, h, w = 10, 4, 20, 20
mlvl_tensor = [torch.ones(n, c, h, w) for i in range(5)]
results = levels_to_images(mlvl_tensor)
assert len(results) == n
assert results[0].size() == (h * w * 5, c)
assert self.with_score_voting
self = LADHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5))
cls_scores = [torch.ones(2, 4, 5, 5)]
bbox_preds = [torch.ones(2, 4, 5, 5)]
iou_preds = [torch.ones(2, 1, 5, 5)]
cfg = mmcv.Config(
dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
rescale = False
self.get_bboxes(
cls_scores, bbox_preds, iou_preds, img_metas, cfg, rescale=rescale)
| 5,294 | 34.3 | 79 | py |
DSLA-DSLA | DSLA-DSLA/tests/test_models/test_dense_heads/test_anchor_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.dense_heads import AnchorHead
def test_anchor_head_loss():
"""Tests anchor head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
cfg = mmcv.Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False))
self = AnchorHead(num_classes=4, in_channels=1, train_cfg=cfg)
# Anchor head expects a multiple levels of features per image
feat = [
torch.rand(1, 1, s // (2**(i + 2)), s // (2**(i + 2)))
for i in range(len(self.anchor_generator.strides))
]
cls_scores, bbox_preds = self.forward(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
| 2,548 | 34.901408 | 79 | py |
DSLA-DSLA | DSLA-DSLA/tests/test_models/test_dense_heads/test_centernet_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
from mmcv import ConfigDict
from mmdet.models.dense_heads import CenterNetHead
def test_center_head_loss():
"""Tests center head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
test_cfg = dict(topK=100, max_per_img=100)
self = CenterNetHead(
num_classes=4, in_channel=1, feat_channel=4, test_cfg=test_cfg)
feat = [torch.rand(1, 1, s, s)]
center_out, wh_out, offset_out = self.forward(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(center_out, wh_out, offset_out, gt_bboxes,
gt_labels, img_metas, gt_bboxes_ignore)
loss_center = empty_gt_losses['loss_center_heatmap']
loss_wh = empty_gt_losses['loss_wh']
loss_offset = empty_gt_losses['loss_offset']
assert loss_center.item() > 0, 'loss_center should be non-zero'
assert loss_wh.item() == 0, (
'there should be no loss_wh when there are no true boxes')
assert loss_offset.item() == 0, (
'there should be no loss_offset when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(center_out, wh_out, offset_out, gt_bboxes,
gt_labels, img_metas, gt_bboxes_ignore)
loss_center = one_gt_losses['loss_center_heatmap']
loss_wh = one_gt_losses['loss_wh']
loss_offset = one_gt_losses['loss_offset']
assert loss_center.item() > 0, 'loss_center should be non-zero'
assert loss_wh.item() > 0, 'loss_wh should be non-zero'
assert loss_offset.item() > 0, 'loss_offset should be non-zero'
def test_centernet_head_get_bboxes():
"""Tests center head generating and decoding the heatmap."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': np.array([1., 1., 1., 1.]),
'pad_shape': (s, s, 3),
'batch_input_shape': (s, s),
'border': (0, 0, 0, 0),
'flip': False
}]
test_cfg = ConfigDict(
dict(topk=100, local_maximum_kernel=3, max_per_img=100))
gt_bboxes = [
torch.Tensor([[10, 20, 200, 240], [40, 50, 100, 200],
[10, 20, 100, 240]])
]
gt_labels = [torch.LongTensor([1, 1, 2])]
self = CenterNetHead(
num_classes=4, in_channel=1, feat_channel=4, test_cfg=test_cfg)
self.feat_shape = (1, 1, s // 4, s // 4)
targets, _ = self.get_targets(gt_bboxes, gt_labels, self.feat_shape,
img_metas[0]['pad_shape'])
center_target = targets['center_heatmap_target']
wh_target = targets['wh_target']
offset_target = targets['offset_target']
# make sure assign target right
for i in range(len(gt_bboxes[0])):
bbox, label = gt_bboxes[0][i] / 4, gt_labels[0][i]
ctx, cty = sum(bbox[0::2]) / 2, sum(bbox[1::2]) / 2
int_ctx, int_cty = int(sum(bbox[0::2]) / 2), int(sum(bbox[1::2]) / 2)
w, h = bbox[2] - bbox[0], bbox[3] - bbox[1]
x_off = ctx - int(ctx)
y_off = cty - int(cty)
assert center_target[0, label, int_cty, int_ctx] == 1
assert wh_target[0, 0, int_cty, int_ctx] == w
assert wh_target[0, 1, int_cty, int_ctx] == h
assert offset_target[0, 0, int_cty, int_ctx] == x_off
assert offset_target[0, 1, int_cty, int_ctx] == y_off
# make sure get_bboxes is right
detections = self.get_bboxes([center_target], [wh_target], [offset_target],
img_metas,
rescale=True,
with_nms=False)
out_bboxes = detections[0][0][:3]
out_clses = detections[0][1][:3]
for bbox, cls in zip(out_bboxes, out_clses):
flag = False
for gt_bbox, gt_cls in zip(gt_bboxes[0], gt_labels[0]):
if (bbox[:4] == gt_bbox[:4]).all():
flag = True
assert flag, 'get_bboxes is wrong'
| 4,385 | 39.611111 | 79 | py |
DSLA-DSLA | DSLA-DSLA/tests/test_models/test_dense_heads/test_ga_anchor_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.dense_heads import GuidedAnchorHead
def test_ga_anchor_head_loss():
"""Tests anchor head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
cfg = mmcv.Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
ga_assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
ga_sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
center_ratio=0.2,
ignore_ratio=0.5,
pos_weight=-1,
debug=False))
head = GuidedAnchorHead(num_classes=4, in_channels=4, train_cfg=cfg)
# Anchor head expects a multiple levels of features per image
if torch.cuda.is_available():
head.cuda()
feat = [
torch.rand(1, 4, s // (2**(i + 2)), s // (2**(i + 2))).cuda()
for i in range(len(head.approx_anchor_generator.base_anchors))
]
cls_scores, bbox_preds, shape_preds, loc_preds = head.forward(feat)
# Test that empty ground truth encourages the network to predict
# background
gt_bboxes = [torch.empty((0, 4)).cuda()]
gt_labels = [torch.LongTensor([]).cuda()]
gt_bboxes_ignore = None
empty_gt_losses = head.loss(cls_scores, bbox_preds, shape_preds,
loc_preds, gt_bboxes, gt_labels, img_metas,
gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]).cuda(),
]
gt_labels = [torch.LongTensor([2]).cuda()]
one_gt_losses = head.loss(cls_scores, bbox_preds, shape_preds,
loc_preds, gt_bboxes, gt_labels, img_metas,
gt_bboxes_ignore)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
| 3,410 | 36.076087 | 79 | py |
DSLA-DSLA | DSLA-DSLA/tests/test_models/test_dense_heads/test_yolof_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.dense_heads import YOLOFHead
def test_yolof_head_loss():
"""Tests yolof head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmcv.Config(
dict(
assigner=dict(
type='UniformAssigner',
pos_ignore_thr=0.15,
neg_ignore_thr=0.7),
allowed_border=-1,
pos_weight=-1,
debug=False))
self = YOLOFHead(
num_classes=4,
in_channels=1,
reg_decoded_bbox=True,
train_cfg=train_cfg,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[1, 2, 4, 8, 16],
strides=[32]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1., 1., 1., 1.],
add_ctr_clamp=True,
ctr_clamp=32),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.0))
feat = [torch.rand(1, 1, s // 32, s // 32)]
cls_scores, bbox_preds = self.forward(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_box_loss = one_gt_losses['loss_bbox']
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
| 2,716 | 34.285714 | 79 | py |
DSLA-DSLA | DSLA-DSLA/tests/test_models/test_dense_heads/test_vfnet_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.dense_heads import VFNetHead
def test_vfnet_head_loss():
"""Tests vfnet head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmcv.Config(
dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False))
# since Focal Loss is not supported on CPU
self = VFNetHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
loss_cls=dict(type='VarifocalLoss', use_sigmoid=True, loss_weight=1.0))
if torch.cuda.is_available():
self.cuda()
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size).cuda()
for feat_size in [4, 8, 16, 32, 64]
]
cls_scores, bbox_preds, bbox_preds_refine = self.forward(feat)
# Test that empty ground truth encourages the network to predict
# background
gt_bboxes = [torch.empty((0, 4)).cuda()]
gt_labels = [torch.LongTensor([]).cuda()]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, bbox_preds_refine,
gt_bboxes, gt_labels, img_metas,
gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]).cuda(),
]
gt_labels = [torch.LongTensor([2]).cuda()]
one_gt_losses = self.loss(cls_scores, bbox_preds, bbox_preds_refine,
gt_bboxes, gt_labels, img_metas,
gt_bboxes_ignore)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_box_loss = one_gt_losses['loss_bbox']
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
| 2,561 | 39.03125 | 79 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.